[llvm] MachineScheduler: Reset next cluster candidate for each node (PR #139513)

via llvm-commits llvm-commits at lists.llvm.org
Tue May 27 05:53:21 PDT 2025


https://github.com/ruiling updated https://github.com/llvm/llvm-project/pull/139513

>From 237333558093012ec321ac1a441fb14e04cc8500 Mon Sep 17 00:00:00 2001
From: Ruiling Song <ruiling.song at amd.com>
Date: Wed, 7 May 2025 11:51:10 +0800
Subject: [PATCH 1/2] MachineScheduler: Reset next cluster candidate for each
 node

When a node is picked, we should reset its next cluster candidate to
null before releasing its successors/predecessors.
---
 llvm/lib/CodeGen/MachineScheduler.cpp         |    7 +
 llvm/test/CodeGen/AArch64/expand-select.ll    |   20 +-
 llvm/test/CodeGen/AArch64/extbinopload.ll     |   85 +-
 llvm/test/CodeGen/AArch64/fptoi.ll            |  140 +-
 .../test/CodeGen/AArch64/fptoui-sat-vector.ll |   32 +-
 llvm/test/CodeGen/AArch64/itofp.ll            |  180 +-
 llvm/test/CodeGen/AArch64/nontemporal-load.ll |   17 +-
 .../AArch64/sve-fixed-vector-llrint.ll        |   86 +-
 .../CodeGen/AArch64/sve-fixed-vector-lrint.ll |   86 +-
 ...e-streaming-mode-fixed-length-bitselect.ll |   94 +-
 ...e-streaming-mode-fixed-length-fp-reduce.ll |   24 +-
 ...streaming-mode-fixed-length-int-extends.ll |  162 +-
 ...-streaming-mode-fixed-length-int-reduce.ll |   32 +-
 ...e-streaming-mode-fixed-length-int-to-fp.ll |  146 +-
 llvm/test/CodeGen/AArch64/vec_uaddo.ll        |    2 +-
 llvm/test/CodeGen/AArch64/vec_umulo.ll        |    8 +-
 llvm/test/CodeGen/AArch64/vselect-ext.ll      |   30 +-
 .../AArch64/wide-scalar-shift-legalization.ll |   59 +-
 llvm/test/CodeGen/AArch64/zext-to-tbl.ll      |  107 +-
 llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll   |   23 +-
 .../AMDGPU/GlobalISel/extractelement.ll       |   60 +-
 llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll   |  635 +-
 llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll   |  581 +-
 .../AMDGPU/GlobalISel/insertelement.i16.ll    |   23 +-
 .../AMDGPU/GlobalISel/insertelement.i8.ll     |   19 +-
 .../AMDGPU/GlobalISel/insertelement.ll        |   10 +-
 .../GlobalISel/llvm.amdgcn.intersect_ray.ll   |    8 +-
 llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll   |    4 +-
 llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll    |  249 +-
 .../test/CodeGen/AMDGPU/GlobalISel/saddsat.ll |   79 +-
 .../test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll |   38 +-
 .../test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll |   98 +-
 .../CodeGen/AMDGPU/GlobalISel/udiv.i64.ll     |   48 +-
 .../test/CodeGen/AMDGPU/GlobalISel/udivrem.ll |  138 +-
 .../CodeGen/AMDGPU/GlobalISel/urem.i64.ll     |   48 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll  | 6124 ++++++++---------
 .../CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll   |   26 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll   |    8 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll   |  480 +-
 .../test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll |    4 +-
 .../AMDGPU/amdgpu-cs-chain-preserve-cc.ll     |    4 +-
 .../atomic_optimizations_local_pointer.ll     |   12 +-
 llvm/test/CodeGen/AMDGPU/bf16.ll              | 3012 ++++----
 .../AMDGPU/buffer-fat-pointers-memcpy.ll      |    8 +-
 .../CodeGen/AMDGPU/call-argument-types.ll     |   30 +-
 .../test/CodeGen/AMDGPU/carryout-selection.ll |    3 +-
 llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll   |   46 +-
 llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll   |   48 +-
 llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll     |   10 +-
 llvm/test/CodeGen/AMDGPU/fdiv.ll              |    2 +-
 llvm/test/CodeGen/AMDGPU/fmed3.ll             |    4 +-
 .../CodeGen/AMDGPU/fneg-modifier-casting.ll   |    2 +-
 llvm/test/CodeGen/AMDGPU/freeze.ll            |   52 +-
 .../CodeGen/AMDGPU/function-args-inreg.ll     |    6 +-
 .../AMDGPU/gfx-callable-argument-types.ll     |   45 +-
 .../AMDGPU/gfx-callable-return-types.ll       |   20 +-
 llvm/test/CodeGen/AMDGPU/half.ll              |    2 +-
 llvm/test/CodeGen/AMDGPU/i1-to-bf16.ll        |   32 +-
 llvm/test/CodeGen/AMDGPU/idiv-licm.ll         |    7 +-
 llvm/test/CodeGen/AMDGPU/idot4u.ll            |    6 +-
 .../CodeGen/AMDGPU/indirect-addressing-si.ll  |   64 +-
 llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll |   28 +-
 .../CodeGen/AMDGPU/integer-mad-patterns.ll    |   12 +-
 .../test/CodeGen/AMDGPU/lds-misaligned-bug.ll |   12 +-
 ...vm.amdgcn.global.atomic.ordered.add.b64.ll |    2 +-
 .../AMDGPU/llvm.amdgcn.intersect_ray.ll       |   20 +-
 .../AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll  |    4 +-
 .../CodeGen/AMDGPU/llvm.amdgcn.writelane.ll   |    4 +-
 llvm/test/CodeGen/AMDGPU/llvm.log.ll          |    7 +-
 llvm/test/CodeGen/AMDGPU/llvm.log10.ll        |    7 +-
 llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll  |  212 +-
 llvm/test/CodeGen/AMDGPU/llvm.maximum.f32.ll  |  230 +-
 llvm/test/CodeGen/AMDGPU/llvm.maximum.f64.ll  |  563 +-
 llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll  |   22 +-
 llvm/test/CodeGen/AMDGPU/llvm.minimum.f32.ll  |  230 +-
 llvm/test/CodeGen/AMDGPU/llvm.minimum.f64.ll  |  563 +-
 llvm/test/CodeGen/AMDGPU/llvm.round.ll        |    6 +-
 llvm/test/CodeGen/AMDGPU/load-constant-i1.ll  |   41 +-
 llvm/test/CodeGen/AMDGPU/load-constant-i16.ll |   98 +-
 llvm/test/CodeGen/AMDGPU/load-constant-i32.ll |   12 +-
 llvm/test/CodeGen/AMDGPU/load-constant-i8.ll  |   36 +-
 llvm/test/CodeGen/AMDGPU/load-global-i16.ll   |    2 +-
 llvm/test/CodeGen/AMDGPU/load-global-i32.ll   |  341 +-
 ...er-buffer-fat-pointers-lastuse-metadata.ll |   16 +-
 ...uffer-fat-pointers-nontemporal-metadata.ll |   32 +-
 llvm/test/CodeGen/AMDGPU/maximumnum.bf16.ll   | 1742 +++--
 .../CodeGen/AMDGPU/memintrinsic-unroll.ll     |    6 +-
 llvm/test/CodeGen/AMDGPU/minimumnum.bf16.ll   | 1742 +++--
 .../CodeGen/AMDGPU/narrow_math_for_and.ll     |    4 +-
 llvm/test/CodeGen/AMDGPU/permute_i8.ll        |  102 +-
 llvm/test/CodeGen/AMDGPU/pr51516.mir          |    2 +-
 .../AMDGPU/promote-constOffset-to-imm.ll      |   28 +-
 llvm/test/CodeGen/AMDGPU/repeated-divisor.ll  |    4 +-
 llvm/test/CodeGen/AMDGPU/sdiv.ll              |  192 +-
 llvm/test/CodeGen/AMDGPU/select.f16.ll        |  334 +-
 .../AMDGPU/splitkit-getsubrangeformask.ll     |   14 +-
 llvm/test/CodeGen/AMDGPU/srem.ll              |   12 +-
 llvm/test/CodeGen/AMDGPU/store-local.128.ll   |    4 +-
 llvm/test/CodeGen/AMDGPU/vopd-combine.mir     |   12 +-
 llvm/test/CodeGen/PowerPC/p10-fi-elim.ll      |    4 +-
 ...lar-shift-by-byte-multiple-legalization.ll |   70 +-
 llvm/test/CodeGen/RISCV/memcmp-optsize.ll     |  132 +-
 llvm/test/CodeGen/RISCV/memcmp.ll             |  132 +-
 .../RISCV/rvv/fixed-vectors-int-buildvec.ll   |  298 +-
 .../RISCV/rvv/fixed-vectors-masked-gather.ll  |   40 +-
 llvm/test/CodeGen/RISCV/rvv/pr125306.ll       |   16 +-
 .../CodeGen/RISCV/unaligned-load-store.ll     |   54 +-
 llvm/test/CodeGen/RISCV/vararg.ll             |   18 +-
 ...lar-shift-by-byte-multiple-legalization.ll |  388 +-
 .../RISCV/wide-scalar-shift-legalization.ll   |  272 +-
 110 files changed, 10761 insertions(+), 10796 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 236c55cb04142..e283cf0f392f1 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -967,6 +967,12 @@ void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
 
 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
+  // Reset the next successor, For example, we want to cluster A B C.
+  // After A is picked, we will set B as next cluster succ, but if we pick
+  // D instead of B after A, then we need to reset the next cluster succ because
+  // we have decided to not pick the cluster candidate B during pickNode().
+  // Leaving B as the NextClusterSucc just make things messy.
+  NextClusterSucc = nullptr;
   for (SDep &Succ : SU->Succs)
     releaseSucc(SU, &Succ);
 }
@@ -1004,6 +1010,7 @@ void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
 
 /// releasePredecessors - Call releasePred on each of SU's predecessors.
 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
+  NextClusterPred = nullptr;
   for (SDep &Pred : SU->Preds)
     releasePred(SU, &Pred);
 }
diff --git a/llvm/test/CodeGen/AArch64/expand-select.ll b/llvm/test/CodeGen/AArch64/expand-select.ll
index 1ed2e09c6b4d4..7ca6adb1338d3 100644
--- a/llvm/test/CodeGen/AArch64/expand-select.ll
+++ b/llvm/test/CodeGen/AArch64/expand-select.ll
@@ -8,11 +8,11 @@ define void @foo(i32 %In1, <2 x i128> %In2, <2 x i128> %In3, ptr %Out) {
 ; CHECK-NEXT:    fmov s0, wzr
 ; CHECK-NEXT:    ldr x11, [sp]
 ; CHECK-NEXT:    fmov s1, w8
-; CHECK-NEXT:    ldp x9, x10, [sp, #8]
+; CHECK-NEXT:    ldp x8, x10, [sp, #8]
 ; CHECK-NEXT:    cmeq v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    tst w8, #0x1
-; CHECK-NEXT:    csel x8, x5, x9, ne
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    tst w9, #0x1
+; CHECK-NEXT:    csel x8, x5, x8, ne
 ; CHECK-NEXT:    csel x9, x4, x11, ne
 ; CHECK-NEXT:    stp x9, x8, [x10, #16]
 ; CHECK-NEXT:    csel x8, x3, x7, ne
@@ -36,14 +36,14 @@ define void @bar(i32 %In1, <2 x i96> %In2, <2 x i96> %In3, ptr %Out) {
 ; CHECK-NEXT:    ldr x10, [sp, #16]
 ; CHECK-NEXT:    fmov s1, w8
 ; CHECK-NEXT:    cmeq v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    tst w8, #0x1
-; CHECK-NEXT:    ldp x9, x8, [sp]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    tst w9, #0x1
+; CHECK-NEXT:    ldp x8, x9, [sp]
 ; CHECK-NEXT:    csel x11, x2, x6, ne
 ; CHECK-NEXT:    str x11, [x10]
-; CHECK-NEXT:    csel x9, x4, x9, ne
-; CHECK-NEXT:    csel x8, x5, x8, ne
-; CHECK-NEXT:    stur x9, [x10, #12]
+; CHECK-NEXT:    csel x8, x4, x8, ne
+; CHECK-NEXT:    stur x8, [x10, #12]
+; CHECK-NEXT:    csel x8, x5, x9, ne
 ; CHECK-NEXT:    csel x9, x3, x7, ne
 ; CHECK-NEXT:    str w8, [x10, #20]
 ; CHECK-NEXT:    str w9, [x10, #8]
diff --git a/llvm/test/CodeGen/AArch64/extbinopload.ll b/llvm/test/CodeGen/AArch64/extbinopload.ll
index 82114d60c4a93..75f3ffc9515e5 100644
--- a/llvm/test/CodeGen/AArch64/extbinopload.ll
+++ b/llvm/test/CodeGen/AArch64/extbinopload.ll
@@ -667,30 +667,30 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
 ; CHECK-NEXT:    add x10, x3, #12
 ; CHECK-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-NEXT:    ld1 { v0.s }[3], [x3], #4
-; CHECK-NEXT:    ldr s3, [x0, #12]
-; CHECK-NEXT:    ldp s2, s7, [x0, #4]
+; CHECK-NEXT:    ldr s4, [x0, #12]
+; CHECK-NEXT:    ldp s5, s2, [x2, #4]
 ; CHECK-NEXT:    ldr s6, [x2, #12]
-; CHECK-NEXT:    ldp s5, s4, [x2, #4]
-; CHECK-NEXT:    ld1 { v3.s }[1], [x11]
+; CHECK-NEXT:    ldp s3, s7, [x0, #4]
+; CHECK-NEXT:    ld1 { v4.s }[1], [x11]
 ; CHECK-NEXT:    ld1 { v6.s }[1], [x10]
-; CHECK-NEXT:    ld1 { v2.s }[1], [x9]
-; CHECK-NEXT:    ld1 { v4.s }[1], [x8]
+; CHECK-NEXT:    ld1 { v2.s }[1], [x8]
 ; CHECK-NEXT:    ld1 { v5.s }[1], [x3]
 ; CHECK-NEXT:    add x8, x1, #8
+; CHECK-NEXT:    ld1 { v3.s }[1], [x9]
 ; CHECK-NEXT:    ld1 { v7.s }[1], [x8]
-; CHECK-NEXT:    uaddl v2.8h, v2.8b, v3.8b
-; CHECK-NEXT:    ushll v4.8h, v4.8b, #0
-; CHECK-NEXT:    uaddl v3.8h, v5.8b, v6.8b
+; CHECK-NEXT:    ushll v2.8h, v2.8b, #0
+; CHECK-NEXT:    uaddl v3.8h, v3.8b, v4.8b
+; CHECK-NEXT:    uaddl v4.8h, v5.8b, v6.8b
 ; CHECK-NEXT:    uaddw v1.8h, v1.8h, v7.8b
-; CHECK-NEXT:    uaddw2 v4.8h, v4.8h, v0.16b
-; CHECK-NEXT:    ushll v0.4s, v2.4h, #3
-; CHECK-NEXT:    ushll v5.4s, v3.4h, #3
+; CHECK-NEXT:    uaddw2 v2.8h, v2.8h, v0.16b
+; CHECK-NEXT:    ushll v0.4s, v3.4h, #3
+; CHECK-NEXT:    ushll v5.4s, v4.4h, #3
+; CHECK-NEXT:    ushll2 v4.4s, v4.8h, #3
 ; CHECK-NEXT:    ushll2 v3.4s, v3.8h, #3
-; CHECK-NEXT:    ushll2 v2.4s, v2.8h, #3
 ; CHECK-NEXT:    uaddw v0.4s, v0.4s, v1.4h
-; CHECK-NEXT:    uaddw2 v1.4s, v2.4s, v1.8h
-; CHECK-NEXT:    uaddw2 v3.4s, v3.4s, v4.8h
-; CHECK-NEXT:    uaddw v2.4s, v5.4s, v4.4h
+; CHECK-NEXT:    uaddw2 v1.4s, v3.4s, v1.8h
+; CHECK-NEXT:    uaddw2 v3.4s, v4.4s, v2.8h
+; CHECK-NEXT:    uaddw v2.4s, v5.4s, v2.4h
 ; CHECK-NEXT:    ret
   %lp1 = load <4 x i8>, ptr %p
   store <4 x i8> %lp1, ptr %z
@@ -1073,24 +1073,24 @@ define <16 x i32> @extrause_ext2(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
 ; CHECK-NEXT:    ld1 { v6.s }[1], [x10]
 ; CHECK-NEXT:    ld1 { v5.s }[1], [x9]
 ; CHECK-NEXT:    ld1 { v7.s }[1], [x8]
-; CHECK-NEXT:    uaddl v16.8h, v2.8b, v3.8b
-; CHECK-NEXT:    uaddl v3.8h, v1.8b, v6.8b
-; CHECK-NEXT:    uaddl v2.8h, v4.8b, v5.8b
+; CHECK-NEXT:    uaddl v2.8h, v2.8b, v3.8b
+; CHECK-NEXT:    uaddl v1.8h, v1.8b, v6.8b
+; CHECK-NEXT:    uaddl v3.8h, v4.8b, v5.8b
 ; CHECK-NEXT:    uaddl v4.8h, v0.8b, v7.8b
-; CHECK-NEXT:    ushll v0.4s, v16.4h, #3
-; CHECK-NEXT:    ushll2 v1.4s, v16.8h, #3
-; CHECK-NEXT:    ushll2 v18.4s, v16.8h, #0
-; CHECK-NEXT:    ushll v6.4s, v2.4h, #3
-; CHECK-NEXT:    ushll2 v7.4s, v2.8h, #3
-; CHECK-NEXT:    ushll2 v5.4s, v2.8h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v2.8h, #0
+; CHECK-NEXT:    ushll v5.4s, v2.4h, #3
+; CHECK-NEXT:    ushll2 v16.4s, v2.8h, #3
+; CHECK-NEXT:    ushll v6.4s, v3.4h, #3
+; CHECK-NEXT:    ushll2 v7.4s, v3.8h, #3
 ; CHECK-NEXT:    ushll v17.4s, v2.4h, #0
-; CHECK-NEXT:    uaddw2 v1.4s, v1.4s, v3.8h
-; CHECK-NEXT:    uaddw v0.4s, v0.4s, v3.4h
+; CHECK-NEXT:    ushll2 v18.4s, v3.8h, #0
+; CHECK-NEXT:    ushll v19.4s, v3.4h, #0
+; CHECK-NEXT:    stp q17, q0, [x4]
+; CHECK-NEXT:    uaddw v0.4s, v5.4s, v1.4h
+; CHECK-NEXT:    uaddw2 v1.4s, v16.4s, v1.8h
 ; CHECK-NEXT:    uaddw2 v3.4s, v7.4s, v4.8h
 ; CHECK-NEXT:    uaddw v2.4s, v6.4s, v4.4h
-; CHECK-NEXT:    ushll v4.4s, v16.4h, #0
-; CHECK-NEXT:    stp q17, q5, [x4, #32]
-; CHECK-NEXT:    stp q4, q18, [x4]
+; CHECK-NEXT:    stp q19, q18, [x4, #32]
 ; CHECK-NEXT:    ret
   %lp1 = load <4 x i8>, ptr %p
   %p2 = getelementptr i8, ptr %p, i32 4
@@ -1176,19 +1176,20 @@ define <16 x i32> @extrause_shl(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
 ; CHECK-NEXT:    ld1 { v5.s }[1], [x9]
 ; CHECK-NEXT:    ld1 { v7.s }[1], [x8]
 ; CHECK-NEXT:    uaddl v2.8h, v2.8b, v3.8b
+; CHECK-NEXT:    uaddl v1.8h, v1.8b, v6.8b
 ; CHECK-NEXT:    uaddl v3.8h, v4.8b, v5.8b
-; CHECK-NEXT:    uaddl v4.8h, v1.8b, v6.8b
-; CHECK-NEXT:    ushll v5.4s, v2.4h, #3
-; CHECK-NEXT:    ushll2 v6.4s, v2.8h, #3
-; CHECK-NEXT:    uaddl v2.8h, v0.8b, v7.8b
-; CHECK-NEXT:    ushll v7.4s, v3.4h, #3
-; CHECK-NEXT:    ushll2 v16.4s, v3.8h, #3
-; CHECK-NEXT:    uaddw2 v1.4s, v6.4s, v4.8h
-; CHECK-NEXT:    uaddw v0.4s, v5.4s, v4.4h
-; CHECK-NEXT:    stp q5, q6, [x4]
-; CHECK-NEXT:    uaddw2 v3.4s, v16.4s, v2.8h
-; CHECK-NEXT:    uaddw v2.4s, v7.4s, v2.4h
-; CHECK-NEXT:    stp q7, q16, [x4, #32]
+; CHECK-NEXT:    uaddl v5.8h, v0.8b, v7.8b
+; CHECK-NEXT:    ushll v4.4s, v2.4h, #3
+; CHECK-NEXT:    ushll2 v2.4s, v2.8h, #3
+; CHECK-NEXT:    ushll v6.4s, v3.4h, #3
+; CHECK-NEXT:    ushll2 v7.4s, v3.8h, #3
+; CHECK-NEXT:    uaddw v0.4s, v4.4s, v1.4h
+; CHECK-NEXT:    uaddw2 v1.4s, v2.4s, v1.8h
+; CHECK-NEXT:    str q4, [x4]
+; CHECK-NEXT:    stp q2, q6, [x4, #16]
+; CHECK-NEXT:    uaddw2 v3.4s, v7.4s, v5.8h
+; CHECK-NEXT:    uaddw v2.4s, v6.4s, v5.4h
+; CHECK-NEXT:    str q7, [x4, #48]
 ; CHECK-NEXT:    ret
   %lp1 = load <4 x i8>, ptr %p
   %p2 = getelementptr i8, ptr %p, i32 4
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 9c4f0207b84ce..ae3b6a54a1f7f 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -2825,42 +2825,42 @@ define <32 x i64> @fptos_v32f32_v32i64(<32 x float> %a) {
 ; CHECK-SD-NEXT:    fcvtl v7.2d, v7.2s
 ; CHECK-SD-NEXT:    fcvtl2 v17.2d, v6.4s
 ; CHECK-SD-NEXT:    fcvtl v6.2d, v6.2s
-; CHECK-SD-NEXT:    fcvtl2 v18.2d, v5.4s
-; CHECK-SD-NEXT:    fcvtl v5.2d, v5.2s
+; CHECK-SD-NEXT:    fcvtl2 v21.2d, v2.4s
+; CHECK-SD-NEXT:    fcvtl v2.2d, v2.2s
 ; CHECK-SD-NEXT:    fcvtl2 v19.2d, v4.4s
 ; CHECK-SD-NEXT:    fcvtl v4.2d, v4.2s
+; CHECK-SD-NEXT:    fcvtl2 v18.2d, v5.4s
 ; CHECK-SD-NEXT:    fcvtl2 v20.2d, v3.4s
+; CHECK-SD-NEXT:    fcvtl v5.2d, v5.2s
 ; CHECK-SD-NEXT:    fcvtl v3.2d, v3.2s
 ; CHECK-SD-NEXT:    fcvtzs v16.2d, v16.2d
 ; CHECK-SD-NEXT:    fcvtzs v7.2d, v7.2d
 ; CHECK-SD-NEXT:    fcvtzs v17.2d, v17.2d
 ; CHECK-SD-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-SD-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-SD-NEXT:    fcvtzs v19.2d, v19.2d
+; CHECK-SD-NEXT:    fcvtzs v4.2d, v4.2d
 ; CHECK-SD-NEXT:    fcvtzs v18.2d, v18.2d
+; CHECK-SD-NEXT:    fcvtzs v20.2d, v20.2d
 ; CHECK-SD-NEXT:    fcvtzs v5.2d, v5.2d
-; CHECK-SD-NEXT:    fcvtzs v4.2d, v4.2d
 ; CHECK-SD-NEXT:    fcvtzs v3.2d, v3.2d
 ; CHECK-SD-NEXT:    stp q7, q16, [x8, #224]
-; CHECK-SD-NEXT:    fcvtl2 v7.2d, v2.4s
-; CHECK-SD-NEXT:    fcvtzs v16.2d, v19.2d
-; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
-; CHECK-SD-NEXT:    fcvtl v2.2d, v2.2s
-; CHECK-SD-NEXT:    fcvtl2 v5.2d, v0.4s
+; CHECK-SD-NEXT:    fcvtzs v16.2d, v21.2d
 ; CHECK-SD-NEXT:    stp q6, q17, [x8, #192]
-; CHECK-SD-NEXT:    fcvtl2 v6.2d, v1.4s
-; CHECK-SD-NEXT:    fcvtzs v17.2d, v20.2d
+; CHECK-SD-NEXT:    fcvtl2 v17.2d, v1.4s
 ; CHECK-SD-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-SD-NEXT:    stp q4, q19, [x8, #128]
+; CHECK-SD-NEXT:    stp q3, q20, [x8, #96]
+; CHECK-SD-NEXT:    stp q2, q16, [x8, #64]
+; CHECK-SD-NEXT:    fcvtl2 v16.2d, v0.4s
 ; CHECK-SD-NEXT:    fcvtl v0.2d, v0.2s
-; CHECK-SD-NEXT:    stp q4, q16, [x8, #128]
-; CHECK-SD-NEXT:    fcvtzs v7.2d, v7.2d
-; CHECK-SD-NEXT:    fcvtzs v2.2d, v2.2d
-; CHECK-SD-NEXT:    fcvtzs v4.2d, v6.2d
-; CHECK-SD-NEXT:    stp q3, q17, [x8, #96]
-; CHECK-SD-NEXT:    fcvtzs v3.2d, v5.2d
+; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
+; CHECK-SD-NEXT:    fcvtzs v6.2d, v17.2d
 ; CHECK-SD-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-SD-NEXT:    fcvtzs v4.2d, v16.2d
 ; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
-; CHECK-SD-NEXT:    stp q2, q7, [x8, #64]
-; CHECK-SD-NEXT:    stp q0, q3, [x8]
-; CHECK-SD-NEXT:    stp q1, q4, [x8, #32]
+; CHECK-SD-NEXT:    stp q1, q6, [x8, #32]
+; CHECK-SD-NEXT:    stp q0, q4, [x8]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: fptos_v32f32_v32i64:
@@ -2918,42 +2918,42 @@ define <32 x i64> @fptou_v32f32_v32i64(<32 x float> %a) {
 ; CHECK-SD-NEXT:    fcvtl v7.2d, v7.2s
 ; CHECK-SD-NEXT:    fcvtl2 v17.2d, v6.4s
 ; CHECK-SD-NEXT:    fcvtl v6.2d, v6.2s
-; CHECK-SD-NEXT:    fcvtl2 v18.2d, v5.4s
-; CHECK-SD-NEXT:    fcvtl v5.2d, v5.2s
+; CHECK-SD-NEXT:    fcvtl2 v21.2d, v2.4s
+; CHECK-SD-NEXT:    fcvtl v2.2d, v2.2s
 ; CHECK-SD-NEXT:    fcvtl2 v19.2d, v4.4s
 ; CHECK-SD-NEXT:    fcvtl v4.2d, v4.2s
+; CHECK-SD-NEXT:    fcvtl2 v18.2d, v5.4s
 ; CHECK-SD-NEXT:    fcvtl2 v20.2d, v3.4s
+; CHECK-SD-NEXT:    fcvtl v5.2d, v5.2s
 ; CHECK-SD-NEXT:    fcvtl v3.2d, v3.2s
 ; CHECK-SD-NEXT:    fcvtzu v16.2d, v16.2d
 ; CHECK-SD-NEXT:    fcvtzu v7.2d, v7.2d
 ; CHECK-SD-NEXT:    fcvtzu v17.2d, v17.2d
 ; CHECK-SD-NEXT:    fcvtzu v6.2d, v6.2d
+; CHECK-SD-NEXT:    fcvtzu v2.2d, v2.2d
+; CHECK-SD-NEXT:    fcvtzu v19.2d, v19.2d
+; CHECK-SD-NEXT:    fcvtzu v4.2d, v4.2d
 ; CHECK-SD-NEXT:    fcvtzu v18.2d, v18.2d
+; CHECK-SD-NEXT:    fcvtzu v20.2d, v20.2d
 ; CHECK-SD-NEXT:    fcvtzu v5.2d, v5.2d
-; CHECK-SD-NEXT:    fcvtzu v4.2d, v4.2d
 ; CHECK-SD-NEXT:    fcvtzu v3.2d, v3.2d
 ; CHECK-SD-NEXT:    stp q7, q16, [x8, #224]
-; CHECK-SD-NEXT:    fcvtl2 v7.2d, v2.4s
-; CHECK-SD-NEXT:    fcvtzu v16.2d, v19.2d
-; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
-; CHECK-SD-NEXT:    fcvtl v2.2d, v2.2s
-; CHECK-SD-NEXT:    fcvtl2 v5.2d, v0.4s
+; CHECK-SD-NEXT:    fcvtzu v16.2d, v21.2d
 ; CHECK-SD-NEXT:    stp q6, q17, [x8, #192]
-; CHECK-SD-NEXT:    fcvtl2 v6.2d, v1.4s
-; CHECK-SD-NEXT:    fcvtzu v17.2d, v20.2d
+; CHECK-SD-NEXT:    fcvtl2 v17.2d, v1.4s
 ; CHECK-SD-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-SD-NEXT:    stp q4, q19, [x8, #128]
+; CHECK-SD-NEXT:    stp q3, q20, [x8, #96]
+; CHECK-SD-NEXT:    stp q2, q16, [x8, #64]
+; CHECK-SD-NEXT:    fcvtl2 v16.2d, v0.4s
 ; CHECK-SD-NEXT:    fcvtl v0.2d, v0.2s
-; CHECK-SD-NEXT:    stp q4, q16, [x8, #128]
-; CHECK-SD-NEXT:    fcvtzu v7.2d, v7.2d
-; CHECK-SD-NEXT:    fcvtzu v2.2d, v2.2d
-; CHECK-SD-NEXT:    fcvtzu v4.2d, v6.2d
-; CHECK-SD-NEXT:    stp q3, q17, [x8, #96]
-; CHECK-SD-NEXT:    fcvtzu v3.2d, v5.2d
+; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
+; CHECK-SD-NEXT:    fcvtzu v6.2d, v17.2d
 ; CHECK-SD-NEXT:    fcvtzu v1.2d, v1.2d
+; CHECK-SD-NEXT:    fcvtzu v4.2d, v16.2d
 ; CHECK-SD-NEXT:    fcvtzu v0.2d, v0.2d
-; CHECK-SD-NEXT:    stp q2, q7, [x8, #64]
-; CHECK-SD-NEXT:    stp q0, q3, [x8]
-; CHECK-SD-NEXT:    stp q1, q4, [x8, #32]
+; CHECK-SD-NEXT:    stp q1, q6, [x8, #32]
+; CHECK-SD-NEXT:    stp q0, q4, [x8]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: fptou_v32f32_v32i64:
@@ -5244,45 +5244,45 @@ define <32 x i64> @fptos_v32f16_v32i64(<32 x half> %a) {
 ; CHECK-GI-FP16-NEXT:    mov v17.d[1], v23.d[0]
 ; CHECK-GI-FP16-NEXT:    mov v1.d[1], v29.d[0]
 ; CHECK-GI-FP16-NEXT:    mov v19.d[1], v30.d[0]
-; CHECK-GI-FP16-NEXT:    mov h21, v3.h[1]
+; CHECK-GI-FP16-NEXT:    mov h16, v3.h[1]
 ; CHECK-GI-FP16-NEXT:    stp q6, q5, [x8, #32]
 ; CHECK-GI-FP16-NEXT:    mov v20.d[1], v22.d[0]
-; CHECK-GI-FP16-NEXT:    mov h16, v3.h[2]
+; CHECK-GI-FP16-NEXT:    mov h21, v3.h[2]
 ; CHECK-GI-FP16-NEXT:    mov h7, v3.h[3]
 ; CHECK-GI-FP16-NEXT:    mov h22, v3.h[4]
-; CHECK-GI-FP16-NEXT:    mov h23, v3.h[5]
-; CHECK-GI-FP16-NEXT:    mov h6, v3.h[6]
+; CHECK-GI-FP16-NEXT:    mov h6, v3.h[5]
+; CHECK-GI-FP16-NEXT:    mov h23, v3.h[6]
 ; CHECK-GI-FP16-NEXT:    mov h5, v3.h[7]
 ; CHECK-GI-FP16-NEXT:    mov v18.d[1], v24.d[0]
 ; CHECK-GI-FP16-NEXT:    mov v2.d[1], v25.d[0]
 ; CHECK-GI-FP16-NEXT:    fcvt d3, h3
-; CHECK-GI-FP16-NEXT:    fcvt d21, h21
-; CHECK-GI-FP16-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-GI-FP16-NEXT:    fcvt d16, h16
+; CHECK-GI-FP16-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-GI-FP16-NEXT:    fcvt d21, h21
 ; CHECK-GI-FP16-NEXT:    fcvtzs v4.2d, v4.2d
 ; CHECK-GI-FP16-NEXT:    fcvt d7, h7
 ; CHECK-GI-FP16-NEXT:    fcvt d22, h22
-; CHECK-GI-FP16-NEXT:    fcvt d23, h23
-; CHECK-GI-FP16-NEXT:    fcvtzs v1.2d, v1.2d
 ; CHECK-GI-FP16-NEXT:    fcvt d6, h6
+; CHECK-GI-FP16-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-GI-FP16-NEXT:    fcvt d23, h23
 ; CHECK-GI-FP16-NEXT:    fcvt d5, h5
 ; CHECK-GI-FP16-NEXT:    fcvtzs v19.2d, v19.2d
-; CHECK-GI-FP16-NEXT:    mov v3.d[1], v21.d[0]
-; CHECK-GI-FP16-NEXT:    fcvtzs v20.2d, v20.2d
+; CHECK-GI-FP16-NEXT:    mov v3.d[1], v16.d[0]
+; CHECK-GI-FP16-NEXT:    fcvtzs v16.2d, v20.2d
 ; CHECK-GI-FP16-NEXT:    stp q0, q4, [x8, #64]
 ; CHECK-GI-FP16-NEXT:    fcvtzs v0.2d, v17.2d
 ; CHECK-GI-FP16-NEXT:    fcvtzs v4.2d, v18.2d
-; CHECK-GI-FP16-NEXT:    mov v16.d[1], v7.d[0]
-; CHECK-GI-FP16-NEXT:    mov v22.d[1], v23.d[0]
-; CHECK-GI-FP16-NEXT:    mov v6.d[1], v5.d[0]
+; CHECK-GI-FP16-NEXT:    mov v21.d[1], v7.d[0]
+; CHECK-GI-FP16-NEXT:    mov v22.d[1], v6.d[0]
+; CHECK-GI-FP16-NEXT:    mov v23.d[1], v5.d[0]
 ; CHECK-GI-FP16-NEXT:    stp q1, q19, [x8, #96]
 ; CHECK-GI-FP16-NEXT:    fcvtzs v1.2d, v2.2d
 ; CHECK-GI-FP16-NEXT:    fcvtzs v2.2d, v3.2d
-; CHECK-GI-FP16-NEXT:    stp q20, q0, [x8, #128]
-; CHECK-GI-FP16-NEXT:    fcvtzs v0.2d, v16.2d
+; CHECK-GI-FP16-NEXT:    stp q16, q0, [x8, #128]
+; CHECK-GI-FP16-NEXT:    fcvtzs v0.2d, v21.2d
 ; CHECK-GI-FP16-NEXT:    fcvtzs v3.2d, v22.2d
 ; CHECK-GI-FP16-NEXT:    stp q4, q1, [x8, #160]
-; CHECK-GI-FP16-NEXT:    fcvtzs v1.2d, v6.2d
+; CHECK-GI-FP16-NEXT:    fcvtzs v1.2d, v23.2d
 ; CHECK-GI-FP16-NEXT:    stp q2, q0, [x8, #192]
 ; CHECK-GI-FP16-NEXT:    stp q3, q1, [x8, #224]
 ; CHECK-GI-FP16-NEXT:    ret
@@ -5645,45 +5645,45 @@ define <32 x i64> @fptou_v32f16_v32i64(<32 x half> %a) {
 ; CHECK-GI-FP16-NEXT:    mov v17.d[1], v23.d[0]
 ; CHECK-GI-FP16-NEXT:    mov v1.d[1], v29.d[0]
 ; CHECK-GI-FP16-NEXT:    mov v19.d[1], v30.d[0]
-; CHECK-GI-FP16-NEXT:    mov h21, v3.h[1]
+; CHECK-GI-FP16-NEXT:    mov h16, v3.h[1]
 ; CHECK-GI-FP16-NEXT:    stp q6, q5, [x8, #32]
 ; CHECK-GI-FP16-NEXT:    mov v20.d[1], v22.d[0]
-; CHECK-GI-FP16-NEXT:    mov h16, v3.h[2]
+; CHECK-GI-FP16-NEXT:    mov h21, v3.h[2]
 ; CHECK-GI-FP16-NEXT:    mov h7, v3.h[3]
 ; CHECK-GI-FP16-NEXT:    mov h22, v3.h[4]
-; CHECK-GI-FP16-NEXT:    mov h23, v3.h[5]
-; CHECK-GI-FP16-NEXT:    mov h6, v3.h[6]
+; CHECK-GI-FP16-NEXT:    mov h6, v3.h[5]
+; CHECK-GI-FP16-NEXT:    mov h23, v3.h[6]
 ; CHECK-GI-FP16-NEXT:    mov h5, v3.h[7]
 ; CHECK-GI-FP16-NEXT:    mov v18.d[1], v24.d[0]
 ; CHECK-GI-FP16-NEXT:    mov v2.d[1], v25.d[0]
 ; CHECK-GI-FP16-NEXT:    fcvt d3, h3
-; CHECK-GI-FP16-NEXT:    fcvt d21, h21
-; CHECK-GI-FP16-NEXT:    fcvtzu v0.2d, v0.2d
 ; CHECK-GI-FP16-NEXT:    fcvt d16, h16
+; CHECK-GI-FP16-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-GI-FP16-NEXT:    fcvt d21, h21
 ; CHECK-GI-FP16-NEXT:    fcvtzu v4.2d, v4.2d
 ; CHECK-GI-FP16-NEXT:    fcvt d7, h7
 ; CHECK-GI-FP16-NEXT:    fcvt d22, h22
-; CHECK-GI-FP16-NEXT:    fcvt d23, h23
-; CHECK-GI-FP16-NEXT:    fcvtzu v1.2d, v1.2d
 ; CHECK-GI-FP16-NEXT:    fcvt d6, h6
+; CHECK-GI-FP16-NEXT:    fcvtzu v1.2d, v1.2d
+; CHECK-GI-FP16-NEXT:    fcvt d23, h23
 ; CHECK-GI-FP16-NEXT:    fcvt d5, h5
 ; CHECK-GI-FP16-NEXT:    fcvtzu v19.2d, v19.2d
-; CHECK-GI-FP16-NEXT:    mov v3.d[1], v21.d[0]
-; CHECK-GI-FP16-NEXT:    fcvtzu v20.2d, v20.2d
+; CHECK-GI-FP16-NEXT:    mov v3.d[1], v16.d[0]
+; CHECK-GI-FP16-NEXT:    fcvtzu v16.2d, v20.2d
 ; CHECK-GI-FP16-NEXT:    stp q0, q4, [x8, #64]
 ; CHECK-GI-FP16-NEXT:    fcvtzu v0.2d, v17.2d
 ; CHECK-GI-FP16-NEXT:    fcvtzu v4.2d, v18.2d
-; CHECK-GI-FP16-NEXT:    mov v16.d[1], v7.d[0]
-; CHECK-GI-FP16-NEXT:    mov v22.d[1], v23.d[0]
-; CHECK-GI-FP16-NEXT:    mov v6.d[1], v5.d[0]
+; CHECK-GI-FP16-NEXT:    mov v21.d[1], v7.d[0]
+; CHECK-GI-FP16-NEXT:    mov v22.d[1], v6.d[0]
+; CHECK-GI-FP16-NEXT:    mov v23.d[1], v5.d[0]
 ; CHECK-GI-FP16-NEXT:    stp q1, q19, [x8, #96]
 ; CHECK-GI-FP16-NEXT:    fcvtzu v1.2d, v2.2d
 ; CHECK-GI-FP16-NEXT:    fcvtzu v2.2d, v3.2d
-; CHECK-GI-FP16-NEXT:    stp q20, q0, [x8, #128]
-; CHECK-GI-FP16-NEXT:    fcvtzu v0.2d, v16.2d
+; CHECK-GI-FP16-NEXT:    stp q16, q0, [x8, #128]
+; CHECK-GI-FP16-NEXT:    fcvtzu v0.2d, v21.2d
 ; CHECK-GI-FP16-NEXT:    fcvtzu v3.2d, v22.2d
 ; CHECK-GI-FP16-NEXT:    stp q4, q1, [x8, #160]
-; CHECK-GI-FP16-NEXT:    fcvtzu v1.2d, v6.2d
+; CHECK-GI-FP16-NEXT:    fcvtzu v1.2d, v23.2d
 ; CHECK-GI-FP16-NEXT:    stp q2, q0, [x8, #192]
 ; CHECK-GI-FP16-NEXT:    stp q3, q1, [x8, #224]
 ; CHECK-GI-FP16-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index f2c4e976b8c16..b1b5154a57b4d 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -3521,31 +3521,31 @@ define <8 x i100> @test_unsigned_v8f16_v8i100(<8 x half> %f) {
 ; CHECK-NEXT:    fmov s0, s8
 ; CHECK-NEXT:    bl __fixunssfti
 ; CHECK-NEXT:    extr x8, x21, x27, #28
-; CHECK-NEXT:    extr x9, x29, x20, #28
+; CHECK-NEXT:    str x24, [x19]
+; CHECK-NEXT:    bfi x22, x20, #36, #28
 ; CHECK-NEXT:    stur x28, [x19, #75]
+; CHECK-NEXT:    extr x9, x29, x20, #28
 ; CHECK-NEXT:    fcmp s8, #0.0
-; CHECK-NEXT:    bfi x22, x20, #36, #28
-; CHECK-NEXT:    lsr x11, x29, #28
 ; CHECK-NEXT:    stur x8, [x19, #41]
-; CHECK-NEXT:    str x9, [x19, #16]
-; CHECK-NEXT:    ldr x10, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    ldr x11, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    stp x22, x9, [x19, #8]
+; CHECK-NEXT:    lsr x9, x29, #28
 ; CHECK-NEXT:    csel x8, xzr, x0, lt
-; CHECK-NEXT:    csel x9, xzr, x1, lt
+; CHECK-NEXT:    csel x10, xzr, x1, lt
 ; CHECK-NEXT:    fcmp s8, s9
-; CHECK-NEXT:    stp x24, x22, [x19]
-; CHECK-NEXT:    stur x10, [x19, #50]
-; CHECK-NEXT:    lsr x10, x21, #28
-; CHECK-NEXT:    strb w11, [x19, #24]
-; CHECK-NEXT:    strb w10, [x19, #49]
-; CHECK-NEXT:    csel x9, x23, x9, gt
+; CHECK-NEXT:    stur x11, [x19, #50]
+; CHECK-NEXT:    lsr x11, x21, #28
+; CHECK-NEXT:    strb w9, [x19, #24]
+; CHECK-NEXT:    strb w11, [x19, #49]
+; CHECK-NEXT:    csel x10, x23, x10, gt
 ; CHECK-NEXT:    csinv x8, x8, xzr, le
 ; CHECK-NEXT:    ldp x12, x11, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    bfi x9, x27, #36, #28
+; CHECK-NEXT:    bfi x10, x27, #36, #28
 ; CHECK-NEXT:    stur x8, [x19, #25]
-; CHECK-NEXT:    stur x9, [x19, #33]
-; CHECK-NEXT:    extr x10, x11, x12, #28
+; CHECK-NEXT:    stur x10, [x19, #33]
+; CHECK-NEXT:    extr x9, x11, x12, #28
 ; CHECK-NEXT:    bfi x26, x12, #36, #28
-; CHECK-NEXT:    stur x10, [x19, #91]
+; CHECK-NEXT:    stur x9, [x19, #91]
 ; CHECK-NEXT:    ldp x10, x9, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    stur x26, [x19, #83]
 ; CHECK-NEXT:    extr x8, x9, x10, #28
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index fb2bdb4d63f47..2c8642be80922 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -2243,46 +2243,46 @@ entry:
 define <32 x double> @stofp_v32i32_v32f64(<32 x i32> %a) {
 ; CHECK-SD-LABEL: stofp_v32i32_v32f64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    sshll2 v16.2d, v7.4s, #0
-; CHECK-SD-NEXT:    sshll v7.2d, v7.2s, #0
 ; CHECK-SD-NEXT:    sshll2 v17.2d, v6.4s, #0
 ; CHECK-SD-NEXT:    sshll v6.2d, v6.2s, #0
-; CHECK-SD-NEXT:    sshll2 v19.2d, v4.4s, #0
+; CHECK-SD-NEXT:    sshll2 v16.2d, v7.4s, #0
+; CHECK-SD-NEXT:    sshll v19.2d, v3.2s, #0
+; CHECK-SD-NEXT:    sshll v7.2d, v7.2s, #0
+; CHECK-SD-NEXT:    sshll2 v3.2d, v3.4s, #0
+; CHECK-SD-NEXT:    sshll2 v20.2d, v4.4s, #0
 ; CHECK-SD-NEXT:    sshll v4.2d, v4.2s, #0
 ; CHECK-SD-NEXT:    sshll2 v18.2d, v5.4s, #0
-; CHECK-SD-NEXT:    sshll v5.2d, v5.2s, #0
-; CHECK-SD-NEXT:    scvtf v16.2d, v16.2d
-; CHECK-SD-NEXT:    scvtf v7.2d, v7.2d
 ; CHECK-SD-NEXT:    scvtf v17.2d, v17.2d
 ; CHECK-SD-NEXT:    scvtf v6.2d, v6.2d
+; CHECK-SD-NEXT:    scvtf v16.2d, v16.2d
+; CHECK-SD-NEXT:    scvtf v7.2d, v7.2d
+; CHECK-SD-NEXT:    scvtf v3.2d, v3.2d
+; CHECK-SD-NEXT:    sshll2 v21.2d, v2.4s, #0
+; CHECK-SD-NEXT:    scvtf v20.2d, v20.2d
 ; CHECK-SD-NEXT:    scvtf v4.2d, v4.2d
+; CHECK-SD-NEXT:    sshll v5.2d, v5.2s, #0
+; CHECK-SD-NEXT:    sshll v2.2d, v2.2s, #0
 ; CHECK-SD-NEXT:    scvtf v18.2d, v18.2d
-; CHECK-SD-NEXT:    scvtf v5.2d, v5.2d
-; CHECK-SD-NEXT:    stp q7, q16, [x8, #224]
-; CHECK-SD-NEXT:    sshll2 v16.2d, v3.4s, #0
-; CHECK-SD-NEXT:    sshll v3.2d, v3.2s, #0
-; CHECK-SD-NEXT:    scvtf v7.2d, v19.2d
 ; CHECK-SD-NEXT:    stp q6, q17, [x8, #192]
-; CHECK-SD-NEXT:    sshll2 v17.2d, v2.4s, #0
-; CHECK-SD-NEXT:    sshll v2.2d, v2.2s, #0
-; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
-; CHECK-SD-NEXT:    scvtf v6.2d, v16.2d
-; CHECK-SD-NEXT:    scvtf v3.2d, v3.2d
-; CHECK-SD-NEXT:    sshll2 v16.2d, v1.4s, #0
-; CHECK-SD-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-SD-NEXT:    scvtf v5.2d, v17.2d
-; CHECK-SD-NEXT:    stp q4, q7, [x8, #128]
+; CHECK-SD-NEXT:    scvtf v17.2d, v19.2d
+; CHECK-SD-NEXT:    stp q7, q16, [x8, #224]
 ; CHECK-SD-NEXT:    sshll2 v7.2d, v0.4s, #0
 ; CHECK-SD-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT:    stp q4, q20, [x8, #128]
+; CHECK-SD-NEXT:    scvtf v16.2d, v21.2d
+; CHECK-SD-NEXT:    scvtf v5.2d, v5.2d
 ; CHECK-SD-NEXT:    scvtf v2.2d, v2.2d
-; CHECK-SD-NEXT:    scvtf v4.2d, v16.2d
-; CHECK-SD-NEXT:    stp q3, q6, [x8, #96]
-; CHECK-SD-NEXT:    scvtf v1.2d, v1.2d
-; CHECK-SD-NEXT:    scvtf v3.2d, v7.2d
+; CHECK-SD-NEXT:    stp q17, q3, [x8, #96]
+; CHECK-SD-NEXT:    sshll2 v3.2d, v1.4s, #0
+; CHECK-SD-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT:    scvtf v4.2d, v7.2d
 ; CHECK-SD-NEXT:    scvtf v0.2d, v0.2d
-; CHECK-SD-NEXT:    stp q2, q5, [x8, #64]
-; CHECK-SD-NEXT:    stp q1, q4, [x8, #32]
-; CHECK-SD-NEXT:    stp q0, q3, [x8]
+; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
+; CHECK-SD-NEXT:    stp q2, q16, [x8, #64]
+; CHECK-SD-NEXT:    scvtf v3.2d, v3.2d
+; CHECK-SD-NEXT:    scvtf v1.2d, v1.2d
+; CHECK-SD-NEXT:    stp q0, q4, [x8]
+; CHECK-SD-NEXT:    stp q1, q3, [x8, #32]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: stofp_v32i32_v32f64:
@@ -2336,46 +2336,46 @@ entry:
 define <32 x double> @utofp_v32i32_v32f64(<32 x i32> %a) {
 ; CHECK-SD-LABEL: utofp_v32i32_v32f64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    ushll2 v16.2d, v7.4s, #0
-; CHECK-SD-NEXT:    ushll v7.2d, v7.2s, #0
 ; CHECK-SD-NEXT:    ushll2 v17.2d, v6.4s, #0
 ; CHECK-SD-NEXT:    ushll v6.2d, v6.2s, #0
-; CHECK-SD-NEXT:    ushll2 v19.2d, v4.4s, #0
+; CHECK-SD-NEXT:    ushll2 v16.2d, v7.4s, #0
+; CHECK-SD-NEXT:    ushll v19.2d, v3.2s, #0
+; CHECK-SD-NEXT:    ushll v7.2d, v7.2s, #0
+; CHECK-SD-NEXT:    ushll2 v3.2d, v3.4s, #0
+; CHECK-SD-NEXT:    ushll2 v20.2d, v4.4s, #0
 ; CHECK-SD-NEXT:    ushll v4.2d, v4.2s, #0
 ; CHECK-SD-NEXT:    ushll2 v18.2d, v5.4s, #0
-; CHECK-SD-NEXT:    ushll v5.2d, v5.2s, #0
-; CHECK-SD-NEXT:    ucvtf v16.2d, v16.2d
-; CHECK-SD-NEXT:    ucvtf v7.2d, v7.2d
 ; CHECK-SD-NEXT:    ucvtf v17.2d, v17.2d
 ; CHECK-SD-NEXT:    ucvtf v6.2d, v6.2d
+; CHECK-SD-NEXT:    ucvtf v16.2d, v16.2d
+; CHECK-SD-NEXT:    ucvtf v7.2d, v7.2d
+; CHECK-SD-NEXT:    ucvtf v3.2d, v3.2d
+; CHECK-SD-NEXT:    ushll2 v21.2d, v2.4s, #0
+; CHECK-SD-NEXT:    ucvtf v20.2d, v20.2d
 ; CHECK-SD-NEXT:    ucvtf v4.2d, v4.2d
+; CHECK-SD-NEXT:    ushll v5.2d, v5.2s, #0
+; CHECK-SD-NEXT:    ushll v2.2d, v2.2s, #0
 ; CHECK-SD-NEXT:    ucvtf v18.2d, v18.2d
-; CHECK-SD-NEXT:    ucvtf v5.2d, v5.2d
-; CHECK-SD-NEXT:    stp q7, q16, [x8, #224]
-; CHECK-SD-NEXT:    ushll2 v16.2d, v3.4s, #0
-; CHECK-SD-NEXT:    ushll v3.2d, v3.2s, #0
-; CHECK-SD-NEXT:    ucvtf v7.2d, v19.2d
 ; CHECK-SD-NEXT:    stp q6, q17, [x8, #192]
-; CHECK-SD-NEXT:    ushll2 v17.2d, v2.4s, #0
-; CHECK-SD-NEXT:    ushll v2.2d, v2.2s, #0
-; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
-; CHECK-SD-NEXT:    ucvtf v6.2d, v16.2d
-; CHECK-SD-NEXT:    ucvtf v3.2d, v3.2d
-; CHECK-SD-NEXT:    ushll2 v16.2d, v1.4s, #0
-; CHECK-SD-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-SD-NEXT:    ucvtf v5.2d, v17.2d
-; CHECK-SD-NEXT:    stp q4, q7, [x8, #128]
+; CHECK-SD-NEXT:    ucvtf v17.2d, v19.2d
+; CHECK-SD-NEXT:    stp q7, q16, [x8, #224]
 ; CHECK-SD-NEXT:    ushll2 v7.2d, v0.4s, #0
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT:    stp q4, q20, [x8, #128]
+; CHECK-SD-NEXT:    ucvtf v16.2d, v21.2d
+; CHECK-SD-NEXT:    ucvtf v5.2d, v5.2d
 ; CHECK-SD-NEXT:    ucvtf v2.2d, v2.2d
-; CHECK-SD-NEXT:    ucvtf v4.2d, v16.2d
-; CHECK-SD-NEXT:    stp q3, q6, [x8, #96]
-; CHECK-SD-NEXT:    ucvtf v1.2d, v1.2d
-; CHECK-SD-NEXT:    ucvtf v3.2d, v7.2d
+; CHECK-SD-NEXT:    stp q17, q3, [x8, #96]
+; CHECK-SD-NEXT:    ushll2 v3.2d, v1.4s, #0
+; CHECK-SD-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT:    ucvtf v4.2d, v7.2d
 ; CHECK-SD-NEXT:    ucvtf v0.2d, v0.2d
-; CHECK-SD-NEXT:    stp q2, q5, [x8, #64]
-; CHECK-SD-NEXT:    stp q1, q4, [x8, #32]
-; CHECK-SD-NEXT:    stp q0, q3, [x8]
+; CHECK-SD-NEXT:    stp q5, q18, [x8, #160]
+; CHECK-SD-NEXT:    stp q2, q16, [x8, #64]
+; CHECK-SD-NEXT:    ucvtf v3.2d, v3.2d
+; CHECK-SD-NEXT:    ucvtf v1.2d, v1.2d
+; CHECK-SD-NEXT:    stp q0, q4, [x8]
+; CHECK-SD-NEXT:    stp q1, q3, [x8, #32]
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: utofp_v32i32_v32f64:
@@ -2863,7 +2863,7 @@ define <32 x double> @stofp_v32i16_v32f64(<32 x i16> %a) {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    sshll2 v4.4s, v3.8h, #0
 ; CHECK-SD-NEXT:    sshll2 v5.4s, v2.8h, #0
-; CHECK-SD-NEXT:    sshll2 v7.4s, v1.8h, #0
+; CHECK-SD-NEXT:    sshll2 v16.4s, v1.8h, #0
 ; CHECK-SD-NEXT:    sshll2 v17.4s, v0.8h, #0
 ; CHECK-SD-NEXT:    sshll v3.4s, v3.4h, #0
 ; CHECK-SD-NEXT:    sshll v1.4s, v1.4h, #0
@@ -2871,43 +2871,43 @@ define <32 x double> @stofp_v32i16_v32f64(<32 x i16> %a) {
 ; CHECK-SD-NEXT:    sshll v0.4s, v0.4h, #0
 ; CHECK-SD-NEXT:    sshll2 v6.2d, v4.4s, #0
 ; CHECK-SD-NEXT:    sshll v4.2d, v4.2s, #0
-; CHECK-SD-NEXT:    sshll2 v16.2d, v5.4s, #0
+; CHECK-SD-NEXT:    sshll2 v7.2d, v5.4s, #0
 ; CHECK-SD-NEXT:    sshll v5.2d, v5.2s, #0
-; CHECK-SD-NEXT:    sshll2 v18.2d, v7.4s, #0
-; CHECK-SD-NEXT:    sshll v7.2d, v7.2s, #0
+; CHECK-SD-NEXT:    sshll2 v18.2d, v16.4s, #0
+; CHECK-SD-NEXT:    sshll v16.2d, v16.2s, #0
 ; CHECK-SD-NEXT:    sshll2 v19.2d, v17.4s, #0
 ; CHECK-SD-NEXT:    scvtf v6.2d, v6.2d
 ; CHECK-SD-NEXT:    scvtf v4.2d, v4.2d
-; CHECK-SD-NEXT:    scvtf v16.2d, v16.2d
-; CHECK-SD-NEXT:    scvtf v5.2d, v5.2d
 ; CHECK-SD-NEXT:    scvtf v7.2d, v7.2d
+; CHECK-SD-NEXT:    scvtf v5.2d, v5.2d
+; CHECK-SD-NEXT:    scvtf v16.2d, v16.2d
 ; CHECK-SD-NEXT:    stp q4, q6, [x8, #224]
 ; CHECK-SD-NEXT:    sshll v6.2d, v17.2s, #0
 ; CHECK-SD-NEXT:    scvtf v17.2d, v18.2d
-; CHECK-SD-NEXT:    sshll2 v4.2d, v3.4s, #0
-; CHECK-SD-NEXT:    stp q5, q16, [x8, #160]
+; CHECK-SD-NEXT:    stp q5, q7, [x8, #160]
+; CHECK-SD-NEXT:    sshll2 v7.2d, v3.4s, #0
 ; CHECK-SD-NEXT:    sshll v3.2d, v3.2s, #0
-; CHECK-SD-NEXT:    scvtf v16.2d, v19.2d
+; CHECK-SD-NEXT:    scvtf v4.2d, v19.2d
 ; CHECK-SD-NEXT:    scvtf v5.2d, v6.2d
 ; CHECK-SD-NEXT:    sshll2 v6.2d, v2.4s, #0
 ; CHECK-SD-NEXT:    sshll v2.2d, v2.2s, #0
-; CHECK-SD-NEXT:    scvtf v4.2d, v4.2d
+; CHECK-SD-NEXT:    scvtf v7.2d, v7.2d
 ; CHECK-SD-NEXT:    scvtf v3.2d, v3.2d
-; CHECK-SD-NEXT:    stp q7, q17, [x8, #96]
-; CHECK-SD-NEXT:    sshll2 v7.2d, v1.4s, #0
+; CHECK-SD-NEXT:    stp q16, q17, [x8, #96]
+; CHECK-SD-NEXT:    sshll2 v16.2d, v1.4s, #0
 ; CHECK-SD-NEXT:    sshll v1.2d, v1.2s, #0
 ; CHECK-SD-NEXT:    scvtf v6.2d, v6.2d
 ; CHECK-SD-NEXT:    scvtf v2.2d, v2.2d
-; CHECK-SD-NEXT:    stp q5, q16, [x8, #32]
-; CHECK-SD-NEXT:    sshll2 v5.2d, v0.4s, #0
+; CHECK-SD-NEXT:    stp q5, q4, [x8, #32]
+; CHECK-SD-NEXT:    sshll2 v4.2d, v0.4s, #0
 ; CHECK-SD-NEXT:    sshll v0.2d, v0.2s, #0
-; CHECK-SD-NEXT:    scvtf v7.2d, v7.2d
-; CHECK-SD-NEXT:    stp q3, q4, [x8, #192]
+; CHECK-SD-NEXT:    scvtf v5.2d, v16.2d
+; CHECK-SD-NEXT:    stp q3, q7, [x8, #192]
 ; CHECK-SD-NEXT:    scvtf v1.2d, v1.2d
-; CHECK-SD-NEXT:    scvtf v3.2d, v5.2d
+; CHECK-SD-NEXT:    scvtf v3.2d, v4.2d
 ; CHECK-SD-NEXT:    scvtf v0.2d, v0.2d
 ; CHECK-SD-NEXT:    stp q2, q6, [x8, #128]
-; CHECK-SD-NEXT:    stp q1, q7, [x8, #64]
+; CHECK-SD-NEXT:    stp q1, q5, [x8, #64]
 ; CHECK-SD-NEXT:    stp q0, q3, [x8]
 ; CHECK-SD-NEXT:    ret
 ;
@@ -2972,7 +2972,7 @@ define <32 x double> @utofp_v32i16_v32f64(<32 x i16> %a) {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    ushll2 v4.4s, v3.8h, #0
 ; CHECK-SD-NEXT:    ushll2 v5.4s, v2.8h, #0
-; CHECK-SD-NEXT:    ushll2 v7.4s, v1.8h, #0
+; CHECK-SD-NEXT:    ushll2 v16.4s, v1.8h, #0
 ; CHECK-SD-NEXT:    ushll2 v17.4s, v0.8h, #0
 ; CHECK-SD-NEXT:    ushll v3.4s, v3.4h, #0
 ; CHECK-SD-NEXT:    ushll v1.4s, v1.4h, #0
@@ -2980,43 +2980,43 @@ define <32 x double> @utofp_v32i16_v32f64(<32 x i16> %a) {
 ; CHECK-SD-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-SD-NEXT:    ushll2 v6.2d, v4.4s, #0
 ; CHECK-SD-NEXT:    ushll v4.2d, v4.2s, #0
-; CHECK-SD-NEXT:    ushll2 v16.2d, v5.4s, #0
+; CHECK-SD-NEXT:    ushll2 v7.2d, v5.4s, #0
 ; CHECK-SD-NEXT:    ushll v5.2d, v5.2s, #0
-; CHECK-SD-NEXT:    ushll2 v18.2d, v7.4s, #0
-; CHECK-SD-NEXT:    ushll v7.2d, v7.2s, #0
+; CHECK-SD-NEXT:    ushll2 v18.2d, v16.4s, #0
+; CHECK-SD-NEXT:    ushll v16.2d, v16.2s, #0
 ; CHECK-SD-NEXT:    ushll2 v19.2d, v17.4s, #0
 ; CHECK-SD-NEXT:    ucvtf v6.2d, v6.2d
 ; CHECK-SD-NEXT:    ucvtf v4.2d, v4.2d
-; CHECK-SD-NEXT:    ucvtf v16.2d, v16.2d
-; CHECK-SD-NEXT:    ucvtf v5.2d, v5.2d
 ; CHECK-SD-NEXT:    ucvtf v7.2d, v7.2d
+; CHECK-SD-NEXT:    ucvtf v5.2d, v5.2d
+; CHECK-SD-NEXT:    ucvtf v16.2d, v16.2d
 ; CHECK-SD-NEXT:    stp q4, q6, [x8, #224]
 ; CHECK-SD-NEXT:    ushll v6.2d, v17.2s, #0
 ; CHECK-SD-NEXT:    ucvtf v17.2d, v18.2d
-; CHECK-SD-NEXT:    ushll2 v4.2d, v3.4s, #0
-; CHECK-SD-NEXT:    stp q5, q16, [x8, #160]
+; CHECK-SD-NEXT:    stp q5, q7, [x8, #160]
+; CHECK-SD-NEXT:    ushll2 v7.2d, v3.4s, #0
 ; CHECK-SD-NEXT:    ushll v3.2d, v3.2s, #0
-; CHECK-SD-NEXT:    ucvtf v16.2d, v19.2d
+; CHECK-SD-NEXT:    ucvtf v4.2d, v19.2d
 ; CHECK-SD-NEXT:    ucvtf v5.2d, v6.2d
 ; CHECK-SD-NEXT:    ushll2 v6.2d, v2.4s, #0
 ; CHECK-SD-NEXT:    ushll v2.2d, v2.2s, #0
-; CHECK-SD-NEXT:    ucvtf v4.2d, v4.2d
+; CHECK-SD-NEXT:    ucvtf v7.2d, v7.2d
 ; CHECK-SD-NEXT:    ucvtf v3.2d, v3.2d
-; CHECK-SD-NEXT:    stp q7, q17, [x8, #96]
-; CHECK-SD-NEXT:    ushll2 v7.2d, v1.4s, #0
+; CHECK-SD-NEXT:    stp q16, q17, [x8, #96]
+; CHECK-SD-NEXT:    ushll2 v16.2d, v1.4s, #0
 ; CHECK-SD-NEXT:    ushll v1.2d, v1.2s, #0
 ; CHECK-SD-NEXT:    ucvtf v6.2d, v6.2d
 ; CHECK-SD-NEXT:    ucvtf v2.2d, v2.2d
-; CHECK-SD-NEXT:    stp q5, q16, [x8, #32]
-; CHECK-SD-NEXT:    ushll2 v5.2d, v0.4s, #0
+; CHECK-SD-NEXT:    stp q5, q4, [x8, #32]
+; CHECK-SD-NEXT:    ushll2 v4.2d, v0.4s, #0
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0
-; CHECK-SD-NEXT:    ucvtf v7.2d, v7.2d
-; CHECK-SD-NEXT:    stp q3, q4, [x8, #192]
+; CHECK-SD-NEXT:    ucvtf v5.2d, v16.2d
+; CHECK-SD-NEXT:    stp q3, q7, [x8, #192]
 ; CHECK-SD-NEXT:    ucvtf v1.2d, v1.2d
-; CHECK-SD-NEXT:    ucvtf v3.2d, v5.2d
+; CHECK-SD-NEXT:    ucvtf v3.2d, v4.2d
 ; CHECK-SD-NEXT:    ucvtf v0.2d, v0.2d
 ; CHECK-SD-NEXT:    stp q2, q6, [x8, #128]
-; CHECK-SD-NEXT:    stp q1, q7, [x8, #64]
+; CHECK-SD-NEXT:    stp q1, q5, [x8, #64]
 ; CHECK-SD-NEXT:    stp q0, q3, [x8]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/nontemporal-load.ll b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
index adb209c0c6348..ffafe69b29266 100644
--- a/llvm/test/CodeGen/AArch64/nontemporal-load.ll
+++ b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
@@ -472,16 +472,17 @@ define <33 x i8> @test_ldnp_v33i8(ptr %A) {
 define <4 x i65> @test_ldnp_v4i65(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4i65:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    ldp x8, x9, [x0, #16]
+; CHECK-NEXT:    ldp x8, x9, [x0, #8]
+; CHECK-NEXT:    ldr x10, [x0, #24]
 ; CHECK-NEXT:    ldrb w11, [x0, #32]
-; CHECK-NEXT:    ldp x0, x10, [x0]
+; CHECK-NEXT:    ldr x0, [x0]
+; CHECK-NEXT:    ubfx x5, x10, #2, #1
+; CHECK-NEXT:    extr x2, x9, x8, #1
+; CHECK-NEXT:    extr x4, x10, x9, #2
+; CHECK-NEXT:    extr x6, x11, x10, #3
+; CHECK-NEXT:    ubfx x3, x9, #1, #1
 ; CHECK-NEXT:    ubfx x7, x11, #3, #1
-; CHECK-NEXT:    extr x4, x9, x8, #2
-; CHECK-NEXT:    extr x6, x11, x9, #3
-; CHECK-NEXT:    ubfx x3, x8, #1, #1
-; CHECK-NEXT:    extr x2, x8, x10, #1
-; CHECK-NEXT:    ubfx x5, x9, #2, #1
-; CHECK-NEXT:    and x1, x10, #0x1
+; CHECK-NEXT:    and x1, x8, #0x1
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: test_ldnp_v4i65:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
index c77861509e4a1..7f144df499be0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
@@ -756,75 +756,75 @@ define <32 x i64> @llrint_v32f64(<32 x double> %x) {
 ; CHECK-NEXT:    mov z18.d, z16.d[2]
 ; CHECK-NEXT:    mov z7.d, z16.d[1]
 ; CHECK-NEXT:    fcvtzs x13, d3
-; CHECK-NEXT:    fcvtzs x14, d20
 ; CHECK-NEXT:    str x9, [sp, #128]
+; CHECK-NEXT:    fcvtzs x9, d20
 ; CHECK-NEXT:    mov z16.d, z4.d[3]
-; CHECK-NEXT:    fcvtzs x9, d18
-; CHECK-NEXT:    mov z18.d, z4.d[2]
+; CHECK-NEXT:    ldp q3, q19, [x29, #80]
 ; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
 ; CHECK-NEXT:    stp x11, x10, [sp, #144]
-; CHECK-NEXT:    fcvtzs x10, d7
+; CHECK-NEXT:    fcvtzs x10, d18
+; CHECK-NEXT:    fcvtzs x11, d7
+; CHECK-NEXT:    mov z18.d, z4.d[2]
 ; CHECK-NEXT:    mov z7.d, z4.d[1]
 ; CHECK-NEXT:    str x13, [sp, #136]
-; CHECK-NEXT:    fcvtzs x11, d16
+; CHECK-NEXT:    fcvtzs x13, d16
 ; CHECK-NEXT:    mov z16.d, z6.d[3]
-; CHECK-NEXT:    fcvtzs x13, d18
-; CHECK-NEXT:    ldp q3, q19, [x29, #80]
-; CHECK-NEXT:    stp x9, x14, [sp, #176]
-; CHECK-NEXT:    fcvtzs x9, d4
+; CHECK-NEXT:    splice z3.d, p1, z3.d, z19.d
+; CHECK-NEXT:    mov z1.d, z5.d[1]
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    stp x10, x9, [sp, #176]
+; CHECK-NEXT:    fcvtzs x9, d18
+; CHECK-NEXT:    fcvtzs x10, d4
+; CHECK-NEXT:    stp x12, x11, [sp, #160]
+; CHECK-NEXT:    fcvtzs x11, d7
 ; CHECK-NEXT:    mov z4.d, z6.d[2]
-; CHECK-NEXT:    stp x12, x10, [sp, #160]
-; CHECK-NEXT:    fcvtzs x10, d7
 ; CHECK-NEXT:    mov z7.d, z6.d[1]
 ; CHECK-NEXT:    fcvtzs x12, d6
-; CHECK-NEXT:    splice z3.d, p1, z3.d, z19.d
 ; CHECK-NEXT:    mov z6.d, z5.d[2]
-; CHECK-NEXT:    stp x13, x11, [sp, #208]
-; CHECK-NEXT:    fcvtzs x11, d16
+; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
+; CHECK-NEXT:    stp x9, x13, [sp, #208]
+; CHECK-NEXT:    fcvtzs x9, d16
 ; CHECK-NEXT:    fcvtzs x13, d4
+; CHECK-NEXT:    stp x10, x11, [sp, #192]
+; CHECK-NEXT:    fcvtzs x10, d7
 ; CHECK-NEXT:    mov z4.d, z5.d[3]
-; CHECK-NEXT:    mov z1.d, z5.d[1]
-; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-NEXT:    stp x9, x10, [sp, #192]
-; CHECK-NEXT:    fcvtzs x9, d7
-; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
-; CHECK-NEXT:    fcvtzs x10, d4
-; CHECK-NEXT:    stp x13, x11, [sp, #240]
-; CHECK-NEXT:    fcvtzs x11, d6
-; CHECK-NEXT:    mov z4.d, z2.d[3]
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    stp x12, x9, [sp, #224]
-; CHECK-NEXT:    fcvtzs x9, d5
+; CHECK-NEXT:    fcvtzs x11, d4
+; CHECK-NEXT:    stp x13, x9, [sp, #240]
+; CHECK-NEXT:    fcvtzs x9, d6
+; CHECK-NEXT:    stp x12, x10, [sp, #224]
+; CHECK-NEXT:    fcvtzs x10, d5
 ; CHECK-NEXT:    fcvtzs x12, d1
+; CHECK-NEXT:    mov z4.d, z2.d[3]
 ; CHECK-NEXT:    mov z5.d, z2.d[2]
 ; CHECK-NEXT:    mov z1.d, z2.d[1]
+; CHECK-NEXT:    fcvtzs x13, d2
 ; CHECK-NEXT:    mov z2.d, z3.d[2]
-; CHECK-NEXT:    stp x11, x10, [sp, #16]
-; CHECK-NEXT:    fcvtzs x10, d4
-; CHECK-NEXT:    mov z4.d, z3.d[3]
+; CHECK-NEXT:    stp x9, x11, [sp, #16]
+; CHECK-NEXT:    fcvtzs x9, d4
 ; CHECK-NEXT:    fcvtzs x11, d5
-; CHECK-NEXT:    stp x9, x12, [sp]
-; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    stp x10, x12, [sp]
+; CHECK-NEXT:    fcvtzs x10, d1
+; CHECK-NEXT:    mov z4.d, z3.d[3]
 ; CHECK-NEXT:    mov z1.d, z3.d[1]
 ; CHECK-NEXT:    fcvtzs x12, d4
-; CHECK-NEXT:    stp x11, x10, [sp, #48]
-; CHECK-NEXT:    fcvtzs x10, d2
+; CHECK-NEXT:    stp x11, x9, [sp, #48]
+; CHECK-NEXT:    fcvtzs x9, d2
 ; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    stp x13, x9, [sp, #32]
-; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    stp x13, x10, [sp, #32]
+; CHECK-NEXT:    fcvtzs x10, d1
 ; CHECK-NEXT:    mov z2.d, z0.d[3]
 ; CHECK-NEXT:    mov z3.d, z0.d[2]
 ; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    stp x10, x12, [sp, #80]
+; CHECK-NEXT:    stp x9, x12, [sp, #80]
 ; CHECK-NEXT:    fcvtzs x12, d0
-; CHECK-NEXT:    fcvtzs x10, d3
-; CHECK-NEXT:    stp x11, x9, [sp, #64]
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    stp x10, x13, [sp, #112]
-; CHECK-NEXT:    add x10, sp, #192
-; CHECK-NEXT:    stp x12, x9, [sp, #96]
+; CHECK-NEXT:    fcvtzs x13, d2
+; CHECK-NEXT:    fcvtzs x9, d3
+; CHECK-NEXT:    stp x11, x10, [sp, #64]
+; CHECK-NEXT:    fcvtzs x10, d1
+; CHECK-NEXT:    stp x9, x13, [sp, #112]
 ; CHECK-NEXT:    add x9, sp, #128
+; CHECK-NEXT:    stp x12, x10, [sp, #96]
+; CHECK-NEXT:    add x10, sp, #192
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x9]
 ; CHECK-NEXT:    add x9, sp, #160
 ; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x10]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
index 6a97e7ad64bf3..9fe8d92a182ac 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
@@ -1506,75 +1506,75 @@ define <32 x iXLen> @lrint_v32f64(<32 x double> %x) {
 ; CHECK-i64-NEXT:    mov z18.d, z16.d[2]
 ; CHECK-i64-NEXT:    mov z7.d, z16.d[1]
 ; CHECK-i64-NEXT:    fcvtzs x13, d3
-; CHECK-i64-NEXT:    fcvtzs x14, d20
 ; CHECK-i64-NEXT:    str x9, [sp, #128]
+; CHECK-i64-NEXT:    fcvtzs x9, d20
 ; CHECK-i64-NEXT:    mov z16.d, z4.d[3]
-; CHECK-i64-NEXT:    fcvtzs x9, d18
-; CHECK-i64-NEXT:    mov z18.d, z4.d[2]
+; CHECK-i64-NEXT:    ldp q3, q19, [x29, #80]
 ; CHECK-i64-NEXT:    frintx z2.d, p0/m, z2.d
 ; CHECK-i64-NEXT:    stp x11, x10, [sp, #144]
-; CHECK-i64-NEXT:    fcvtzs x10, d7
+; CHECK-i64-NEXT:    fcvtzs x10, d18
+; CHECK-i64-NEXT:    fcvtzs x11, d7
+; CHECK-i64-NEXT:    mov z18.d, z4.d[2]
 ; CHECK-i64-NEXT:    mov z7.d, z4.d[1]
 ; CHECK-i64-NEXT:    str x13, [sp, #136]
-; CHECK-i64-NEXT:    fcvtzs x11, d16
+; CHECK-i64-NEXT:    fcvtzs x13, d16
 ; CHECK-i64-NEXT:    mov z16.d, z6.d[3]
-; CHECK-i64-NEXT:    fcvtzs x13, d18
-; CHECK-i64-NEXT:    ldp q3, q19, [x29, #80]
-; CHECK-i64-NEXT:    stp x9, x14, [sp, #176]
-; CHECK-i64-NEXT:    fcvtzs x9, d4
+; CHECK-i64-NEXT:    splice z3.d, p1, z3.d, z19.d
+; CHECK-i64-NEXT:    mov z1.d, z5.d[1]
+; CHECK-i64-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-i64-NEXT:    stp x10, x9, [sp, #176]
+; CHECK-i64-NEXT:    fcvtzs x9, d18
+; CHECK-i64-NEXT:    fcvtzs x10, d4
+; CHECK-i64-NEXT:    stp x12, x11, [sp, #160]
+; CHECK-i64-NEXT:    fcvtzs x11, d7
 ; CHECK-i64-NEXT:    mov z4.d, z6.d[2]
-; CHECK-i64-NEXT:    stp x12, x10, [sp, #160]
-; CHECK-i64-NEXT:    fcvtzs x10, d7
 ; CHECK-i64-NEXT:    mov z7.d, z6.d[1]
 ; CHECK-i64-NEXT:    fcvtzs x12, d6
-; CHECK-i64-NEXT:    splice z3.d, p1, z3.d, z19.d
 ; CHECK-i64-NEXT:    mov z6.d, z5.d[2]
-; CHECK-i64-NEXT:    stp x13, x11, [sp, #208]
-; CHECK-i64-NEXT:    fcvtzs x11, d16
+; CHECK-i64-NEXT:    frintx z3.d, p0/m, z3.d
+; CHECK-i64-NEXT:    stp x9, x13, [sp, #208]
+; CHECK-i64-NEXT:    fcvtzs x9, d16
 ; CHECK-i64-NEXT:    fcvtzs x13, d4
+; CHECK-i64-NEXT:    stp x10, x11, [sp, #192]
+; CHECK-i64-NEXT:    fcvtzs x10, d7
 ; CHECK-i64-NEXT:    mov z4.d, z5.d[3]
-; CHECK-i64-NEXT:    mov z1.d, z5.d[1]
-; CHECK-i64-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-i64-NEXT:    stp x9, x10, [sp, #192]
-; CHECK-i64-NEXT:    fcvtzs x9, d7
-; CHECK-i64-NEXT:    frintx z3.d, p0/m, z3.d
-; CHECK-i64-NEXT:    fcvtzs x10, d4
-; CHECK-i64-NEXT:    stp x13, x11, [sp, #240]
-; CHECK-i64-NEXT:    fcvtzs x11, d6
-; CHECK-i64-NEXT:    mov z4.d, z2.d[3]
-; CHECK-i64-NEXT:    fcvtzs x13, d2
-; CHECK-i64-NEXT:    stp x12, x9, [sp, #224]
-; CHECK-i64-NEXT:    fcvtzs x9, d5
+; CHECK-i64-NEXT:    fcvtzs x11, d4
+; CHECK-i64-NEXT:    stp x13, x9, [sp, #240]
+; CHECK-i64-NEXT:    fcvtzs x9, d6
+; CHECK-i64-NEXT:    stp x12, x10, [sp, #224]
+; CHECK-i64-NEXT:    fcvtzs x10, d5
 ; CHECK-i64-NEXT:    fcvtzs x12, d1
+; CHECK-i64-NEXT:    mov z4.d, z2.d[3]
 ; CHECK-i64-NEXT:    mov z5.d, z2.d[2]
 ; CHECK-i64-NEXT:    mov z1.d, z2.d[1]
+; CHECK-i64-NEXT:    fcvtzs x13, d2
 ; CHECK-i64-NEXT:    mov z2.d, z3.d[2]
-; CHECK-i64-NEXT:    stp x11, x10, [sp, #16]
-; CHECK-i64-NEXT:    fcvtzs x10, d4
-; CHECK-i64-NEXT:    mov z4.d, z3.d[3]
+; CHECK-i64-NEXT:    stp x9, x11, [sp, #16]
+; CHECK-i64-NEXT:    fcvtzs x9, d4
 ; CHECK-i64-NEXT:    fcvtzs x11, d5
-; CHECK-i64-NEXT:    stp x9, x12, [sp]
-; CHECK-i64-NEXT:    fcvtzs x9, d1
+; CHECK-i64-NEXT:    stp x10, x12, [sp]
+; CHECK-i64-NEXT:    fcvtzs x10, d1
+; CHECK-i64-NEXT:    mov z4.d, z3.d[3]
 ; CHECK-i64-NEXT:    mov z1.d, z3.d[1]
 ; CHECK-i64-NEXT:    fcvtzs x12, d4
-; CHECK-i64-NEXT:    stp x11, x10, [sp, #48]
-; CHECK-i64-NEXT:    fcvtzs x10, d2
+; CHECK-i64-NEXT:    stp x11, x9, [sp, #48]
+; CHECK-i64-NEXT:    fcvtzs x9, d2
 ; CHECK-i64-NEXT:    fcvtzs x11, d3
-; CHECK-i64-NEXT:    stp x13, x9, [sp, #32]
-; CHECK-i64-NEXT:    fcvtzs x9, d1
+; CHECK-i64-NEXT:    stp x13, x10, [sp, #32]
+; CHECK-i64-NEXT:    fcvtzs x10, d1
 ; CHECK-i64-NEXT:    mov z2.d, z0.d[3]
 ; CHECK-i64-NEXT:    mov z3.d, z0.d[2]
 ; CHECK-i64-NEXT:    mov z1.d, z0.d[1]
-; CHECK-i64-NEXT:    fcvtzs x13, d2
-; CHECK-i64-NEXT:    stp x10, x12, [sp, #80]
+; CHECK-i64-NEXT:    stp x9, x12, [sp, #80]
 ; CHECK-i64-NEXT:    fcvtzs x12, d0
-; CHECK-i64-NEXT:    fcvtzs x10, d3
-; CHECK-i64-NEXT:    stp x11, x9, [sp, #64]
-; CHECK-i64-NEXT:    fcvtzs x9, d1
-; CHECK-i64-NEXT:    stp x10, x13, [sp, #112]
-; CHECK-i64-NEXT:    add x10, sp, #192
-; CHECK-i64-NEXT:    stp x12, x9, [sp, #96]
+; CHECK-i64-NEXT:    fcvtzs x13, d2
+; CHECK-i64-NEXT:    fcvtzs x9, d3
+; CHECK-i64-NEXT:    stp x11, x10, [sp, #64]
+; CHECK-i64-NEXT:    fcvtzs x10, d1
+; CHECK-i64-NEXT:    stp x9, x13, [sp, #112]
 ; CHECK-i64-NEXT:    add x9, sp, #128
+; CHECK-i64-NEXT:    stp x12, x10, [sp, #96]
+; CHECK-i64-NEXT:    add x10, sp, #192
 ; CHECK-i64-NEXT:    ld1d { z0.d }, p0/z, [x9]
 ; CHECK-i64-NEXT:    add x9, sp, #160
 ; CHECK-i64-NEXT:    ld1d { z2.d }, p0/z, [x10]
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
index d3c446c9904b2..d29e43509dfe9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
@@ -40,64 +40,64 @@ define <8 x i32> @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %r
 ; NONEON-NOSVE-NEXT:    stp q0, q2, [sp, #-128]!
 ; NONEON-NOSVE-NEXT:    .cfi_def_cfa_offset 128
 ; NONEON-NOSVE-NEXT:    stp q1, q3, [sp, #48]
-; NONEON-NOSVE-NEXT:    ldp w8, w14, [sp, #48]
-; NONEON-NOSVE-NEXT:    ldp w9, w4, [sp, #64]
-; NONEON-NOSVE-NEXT:    ldp w13, w11, [sp, #56]
-; NONEON-NOSVE-NEXT:    neg w3, w8
-; NONEON-NOSVE-NEXT:    neg w15, w14
+; NONEON-NOSVE-NEXT:    ldp w13, w11, [sp, #48]
+; NONEON-NOSVE-NEXT:    ldp w14, w4, [sp, #64]
+; NONEON-NOSVE-NEXT:    ldp w17, w16, [sp]
+; NONEON-NOSVE-NEXT:    ldp w9, w8, [sp, #56]
+; NONEON-NOSVE-NEXT:    neg w15, w11
+; NONEON-NOSVE-NEXT:    neg w3, w13
 ; NONEON-NOSVE-NEXT:    str q4, [sp, #32]
-; NONEON-NOSVE-NEXT:    and w9, w3, w9
+; NONEON-NOSVE-NEXT:    and w14, w3, w14
 ; NONEON-NOSVE-NEXT:    and w15, w15, w4
-; NONEON-NOSVE-NEXT:    str q5, [sp, #80]
+; NONEON-NOSVE-NEXT:    neg w1, w17
 ; NONEON-NOSVE-NEXT:    ldp w5, w3, [sp, #72]
-; NONEON-NOSVE-NEXT:    ldp w16, w12, [sp]
-; NONEON-NOSVE-NEXT:    neg w4, w11
-; NONEON-NOSVE-NEXT:    neg w2, w13
-; NONEON-NOSVE-NEXT:    sub w11, w11, #1
-; NONEON-NOSVE-NEXT:    and w3, w4, w3
-; NONEON-NOSVE-NEXT:    and w2, w2, w5
-; NONEON-NOSVE-NEXT:    sub w13, w13, #1
 ; NONEON-NOSVE-NEXT:    ldp w6, w4, [sp, #16]
-; NONEON-NOSVE-NEXT:    ldp w10, w17, [sp, #8]
-; NONEON-NOSVE-NEXT:    neg w1, w16
-; NONEON-NOSVE-NEXT:    neg w0, w12
-; NONEON-NOSVE-NEXT:    sub w16, w16, #1
+; NONEON-NOSVE-NEXT:    ldp w12, w10, [sp, #8]
+; NONEON-NOSVE-NEXT:    neg w2, w9
+; NONEON-NOSVE-NEXT:    neg w7, w8
+; NONEON-NOSVE-NEXT:    sub w17, w17, #1
+; NONEON-NOSVE-NEXT:    and w2, w2, w5
 ; NONEON-NOSVE-NEXT:    and w1, w1, w6
-; NONEON-NOSVE-NEXT:    and w0, w0, w4
-; NONEON-NOSVE-NEXT:    sub w12, w12, #1
+; NONEON-NOSVE-NEXT:    and w3, w7, w3
 ; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #24]
-; NONEON-NOSVE-NEXT:    neg w18, w17
-; NONEON-NOSVE-NEXT:    neg w4, w10
-; NONEON-NOSVE-NEXT:    sub w17, w17, #1
+; NONEON-NOSVE-NEXT:    neg w0, w12
+; NONEON-NOSVE-NEXT:    neg w7, w16
+; NONEON-NOSVE-NEXT:    neg w18, w10
+; NONEON-NOSVE-NEXT:    and w4, w7, w4
 ; NONEON-NOSVE-NEXT:    sub w10, w10, #1
-; NONEON-NOSVE-NEXT:    sub w14, w14, #1
-; NONEON-NOSVE-NEXT:    sub w8, w8, #1
-; NONEON-NOSVE-NEXT:    and w4, w4, w5
+; NONEON-NOSVE-NEXT:    sub w12, w12, #1
+; NONEON-NOSVE-NEXT:    and w0, w0, w5
 ; NONEON-NOSVE-NEXT:    and w18, w18, w6
-; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #32]
+; NONEON-NOSVE-NEXT:    str q5, [sp, #80]
+; NONEON-NOSVE-NEXT:    ldp w7, w5, [sp, #32]
+; NONEON-NOSVE-NEXT:    sub w16, w16, #1
+; NONEON-NOSVE-NEXT:    sub w8, w8, #1
+; NONEON-NOSVE-NEXT:    sub w9, w9, #1
+; NONEON-NOSVE-NEXT:    and w17, w17, w7
 ; NONEON-NOSVE-NEXT:    and w16, w16, w5
+; NONEON-NOSVE-NEXT:    ldp w6, w7, [sp, #40]
 ; NONEON-NOSVE-NEXT:    and w12, w12, w6
-; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #40]
-; NONEON-NOSVE-NEXT:    and w10, w10, w5
-; NONEON-NOSVE-NEXT:    and w17, w17, w6
-; NONEON-NOSVE-NEXT:    orr w17, w17, w18
-; NONEON-NOSVE-NEXT:    orr w10, w10, w4
-; NONEON-NOSVE-NEXT:    ldp w18, w4, [sp, #88]
+; NONEON-NOSVE-NEXT:    and w10, w10, w7
+; NONEON-NOSVE-NEXT:    orr w10, w10, w18
+; NONEON-NOSVE-NEXT:    orr w12, w12, w0
+; NONEON-NOSVE-NEXT:    ldp w18, w0, [sp, #88]
 ; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #80]
-; NONEON-NOSVE-NEXT:    stp w10, w17, [sp, #104]
-; NONEON-NOSVE-NEXT:    orr w10, w12, w0
-; NONEON-NOSVE-NEXT:    orr w12, w16, w1
-; NONEON-NOSVE-NEXT:    and w11, w11, w4
-; NONEON-NOSVE-NEXT:    stp w12, w10, [sp, #96]
-; NONEON-NOSVE-NEXT:    and w10, w13, w18
-; NONEON-NOSVE-NEXT:    orr w11, w11, w3
-; NONEON-NOSVE-NEXT:    and w12, w14, w6
-; NONEON-NOSVE-NEXT:    orr w10, w10, w2
-; NONEON-NOSVE-NEXT:    and w8, w8, w5
-; NONEON-NOSVE-NEXT:    stp w10, w11, [sp, #120]
-; NONEON-NOSVE-NEXT:    orr w10, w12, w15
-; NONEON-NOSVE-NEXT:    orr w8, w8, w9
-; NONEON-NOSVE-NEXT:    stp w8, w10, [sp, #112]
+; NONEON-NOSVE-NEXT:    stp w12, w10, [sp, #104]
+; NONEON-NOSVE-NEXT:    sub w10, w11, #1
+; NONEON-NOSVE-NEXT:    sub w11, w13, #1
+; NONEON-NOSVE-NEXT:    and w8, w8, w0
+; NONEON-NOSVE-NEXT:    and w9, w9, w18
+; NONEON-NOSVE-NEXT:    orr w12, w16, w4
+; NONEON-NOSVE-NEXT:    orr w8, w8, w3
+; NONEON-NOSVE-NEXT:    orr w9, w9, w2
+; NONEON-NOSVE-NEXT:    and w10, w10, w6
+; NONEON-NOSVE-NEXT:    stp w9, w8, [sp, #120]
+; NONEON-NOSVE-NEXT:    and w8, w11, w5
+; NONEON-NOSVE-NEXT:    orr w13, w17, w1
+; NONEON-NOSVE-NEXT:    orr w9, w10, w15
+; NONEON-NOSVE-NEXT:    orr w8, w8, w14
+; NONEON-NOSVE-NEXT:    stp w13, w12, [sp, #96]
+; NONEON-NOSVE-NEXT:    stp w8, w9, [sp, #112]
 ; NONEON-NOSVE-NEXT:    ldp q0, q1, [sp, #96]
 ; NONEON-NOSVE-NEXT:    add sp, sp, #128
 ; NONEON-NOSVE-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
index 95ca0a68a7212..7946f945895b9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
@@ -980,10 +980,10 @@ define float @fmaxv_v8f32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    ldp s3, s2, [sp]
 ; NONEON-NOSVE-NEXT:    fmaxnm s0, s2, s0
 ; NONEON-NOSVE-NEXT:    fmaxnm s1, s3, s1
-; NONEON-NOSVE-NEXT:    ldp s2, s4, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp s3, s4, [sp, #8]
 ; NONEON-NOSVE-NEXT:    fmaxnm s0, s1, s0
-; NONEON-NOSVE-NEXT:    ldp s3, s1, [sp, #24]
-; NONEON-NOSVE-NEXT:    fmaxnm s2, s2, s3
+; NONEON-NOSVE-NEXT:    ldp s2, s1, [sp, #24]
+; NONEON-NOSVE-NEXT:    fmaxnm s2, s3, s2
 ; NONEON-NOSVE-NEXT:    fmaxnm s1, s4, s1
 ; NONEON-NOSVE-NEXT:    fmaxnm s0, s0, s2
 ; NONEON-NOSVE-NEXT:    fmaxnm s0, s0, s1
@@ -1310,10 +1310,10 @@ define float @fminv_v8f32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    ldp s3, s2, [sp]
 ; NONEON-NOSVE-NEXT:    fminnm s0, s2, s0
 ; NONEON-NOSVE-NEXT:    fminnm s1, s3, s1
-; NONEON-NOSVE-NEXT:    ldp s2, s4, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp s3, s4, [sp, #8]
 ; NONEON-NOSVE-NEXT:    fminnm s0, s1, s0
-; NONEON-NOSVE-NEXT:    ldp s3, s1, [sp, #24]
-; NONEON-NOSVE-NEXT:    fminnm s2, s2, s3
+; NONEON-NOSVE-NEXT:    ldp s2, s1, [sp, #24]
+; NONEON-NOSVE-NEXT:    fminnm s2, s3, s2
 ; NONEON-NOSVE-NEXT:    fminnm s1, s4, s1
 ; NONEON-NOSVE-NEXT:    fminnm s0, s0, s2
 ; NONEON-NOSVE-NEXT:    fminnm s0, s0, s1
@@ -1640,10 +1640,10 @@ define float @fmaximumv_v8f32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    ldp s3, s2, [sp]
 ; NONEON-NOSVE-NEXT:    fmax s0, s2, s0
 ; NONEON-NOSVE-NEXT:    fmax s1, s3, s1
-; NONEON-NOSVE-NEXT:    ldp s2, s4, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp s3, s4, [sp, #8]
 ; NONEON-NOSVE-NEXT:    fmax s0, s1, s0
-; NONEON-NOSVE-NEXT:    ldp s3, s1, [sp, #24]
-; NONEON-NOSVE-NEXT:    fmax s2, s2, s3
+; NONEON-NOSVE-NEXT:    ldp s2, s1, [sp, #24]
+; NONEON-NOSVE-NEXT:    fmax s2, s3, s2
 ; NONEON-NOSVE-NEXT:    fmax s1, s4, s1
 ; NONEON-NOSVE-NEXT:    fmax s0, s0, s2
 ; NONEON-NOSVE-NEXT:    fmax s0, s0, s1
@@ -1970,10 +1970,10 @@ define float @fminimumv_v8f32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    ldp s3, s2, [sp]
 ; NONEON-NOSVE-NEXT:    fmin s0, s2, s0
 ; NONEON-NOSVE-NEXT:    fmin s1, s3, s1
-; NONEON-NOSVE-NEXT:    ldp s2, s4, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp s3, s4, [sp, #8]
 ; NONEON-NOSVE-NEXT:    fmin s0, s1, s0
-; NONEON-NOSVE-NEXT:    ldp s3, s1, [sp, #24]
-; NONEON-NOSVE-NEXT:    fmin s2, s2, s3
+; NONEON-NOSVE-NEXT:    ldp s2, s1, [sp, #24]
+; NONEON-NOSVE-NEXT:    fmin s2, s3, s2
 ; NONEON-NOSVE-NEXT:    fmin s1, s4, s1
 ; NONEON-NOSVE-NEXT:    fmin s0, s0, s2
 ; NONEON-NOSVE-NEXT:    fmin s0, s0, s1
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
index 25a6ea490c163..40c8ab27c0b02 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
@@ -1146,65 +1146,64 @@ define void @sext_v16i8_v16i64(<16 x i8> %a, ptr %out) {
 define void @sext_v32i8_v32i64(ptr %in, ptr %out) {
 ; CHECK-LABEL: sext_v32i8_v32i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    add z0.b, z0.b, z0.b
 ; CHECK-NEXT:    add z1.b, z1.b, z1.b
 ; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    sunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z2.b, z2.b, z0.b, #8
 ; CHECK-NEXT:    sunpklo z0.h, z0.b
-; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    sunpklo z1.h, z1.b
-; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z4.s, z3.h
 ; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT:    sunpklo z4.s, z0.h
-; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
-; CHECK-NEXT:    sunpklo z5.s, z1.h
-; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
 ; CHECK-NEXT:    sunpklo z2.h, z2.b
-; CHECK-NEXT:    sunpklo z3.h, z3.b
-; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z5.s, z0.h
+; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    sunpklo z3.s, z3.h
 ; CHECK-NEXT:    sunpklo z16.d, z4.s
 ; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
-; CHECK-NEXT:    sunpklo z1.s, z1.h
-; CHECK-NEXT:    sunpklo z17.d, z5.s
-; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
 ; CHECK-NEXT:    sunpklo z6.s, z2.h
-; CHECK-NEXT:    sunpklo z7.s, z3.h
 ; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
-; CHECK-NEXT:    sunpklo z4.d, z4.s
-; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT:    sunpklo z19.d, z0.s
+; CHECK-NEXT:    ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT:    mov z17.d, z5.d
+; CHECK-NEXT:    sunpklo z1.s, z1.h
 ; CHECK-NEXT:    sunpklo z5.d, z5.s
-; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z4.d, z4.s
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z19.d, z3.s
 ; CHECK-NEXT:    sunpklo z2.s, z2.h
+; CHECK-NEXT:    sunpklo z7.s, z7.h
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    ext z17.b, z17.b, z17.b, #8
 ; CHECK-NEXT:    sunpklo z18.d, z6.s
 ; CHECK-NEXT:    ext z6.b, z6.b, z6.b, #8
-; CHECK-NEXT:    sunpklo z3.s, z3.h
+; CHECK-NEXT:    sunpklo z20.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
 ; CHECK-NEXT:    stp q16, q4, [x1, #128]
-; CHECK-NEXT:    mov z16.d, z7.d
-; CHECK-NEXT:    sunpklo z0.d, z0.s
-; CHECK-NEXT:    stp q17, q5, [x1]
-; CHECK-NEXT:    sunpklo z5.d, z7.s
-; CHECK-NEXT:    sunpklo z4.d, z6.s
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    ext z16.b, z16.b, z7.b, #8
-; CHECK-NEXT:    mov z7.d, z2.d
-; CHECK-NEXT:    stp q19, q0, [x1, #160]
-; CHECK-NEXT:    sunpklo z0.d, z2.s
-; CHECK-NEXT:    ext z6.b, z6.b, z1.b, #8
-; CHECK-NEXT:    sunpklo z1.d, z1.s
-; CHECK-NEXT:    stp q18, q4, [x1, #192]
-; CHECK-NEXT:    mov z4.d, z3.d
-; CHECK-NEXT:    ext z7.b, z7.b, z2.b, #8
-; CHECK-NEXT:    sunpklo z16.d, z16.s
-; CHECK-NEXT:    sunpklo z6.d, z6.s
-; CHECK-NEXT:    ext z4.b, z4.b, z3.b, #8
-; CHECK-NEXT:    sunpklo z2.d, z7.s
 ; CHECK-NEXT:    sunpklo z3.d, z3.s
-; CHECK-NEXT:    stp q5, q16, [x1, #64]
-; CHECK-NEXT:    stp q1, q6, [x1, #32]
+; CHECK-NEXT:    sunpklo z16.d, z0.s
+; CHECK-NEXT:    sunpklo z17.d, z17.s
+; CHECK-NEXT:    mov z4.d, z7.d
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z1.d, z1.s
+; CHECK-NEXT:    ext z4.b, z4.b, z7.b, #8
+; CHECK-NEXT:    stp q19, q3, [x1, #160]
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q5, q17, [x1]
+; CHECK-NEXT:    sunpklo z5.d, z6.s
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    stp q20, q1, [x1, #192]
+; CHECK-NEXT:    sunpklo z7.d, z7.s
 ; CHECK-NEXT:    sunpklo z1.d, z4.s
-; CHECK-NEXT:    stp q0, q2, [x1, #224]
-; CHECK-NEXT:    stp q3, q1, [x1, #96]
+; CHECK-NEXT:    ext z6.b, z6.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z2.d, z2.s
+; CHECK-NEXT:    stp q16, q0, [x1, #32]
+; CHECK-NEXT:    stp q18, q5, [x1, #64]
+; CHECK-NEXT:    sunpklo z3.d, z6.s
+; CHECK-NEXT:    stp q7, q1, [x1, #224]
+; CHECK-NEXT:    stp q2, q3, [x1, #96]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: sext_v32i8_v32i64:
@@ -3131,65 +3130,64 @@ define void @zext_v16i8_v16i64(<16 x i8> %a, ptr %out) {
 define void @zext_v32i8_v32i64(ptr %in, ptr %out) {
 ; CHECK-LABEL: zext_v32i8_v32i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    add z0.b, z0.b, z0.b
 ; CHECK-NEXT:    add z1.b, z1.b, z1.b
 ; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    uunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z2.b, z2.b, z0.b, #8
 ; CHECK-NEXT:    uunpklo z0.h, z0.b
-; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    uunpklo z1.h, z1.b
-; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z4.s, z3.h
 ; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT:    uunpklo z4.s, z0.h
-; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
-; CHECK-NEXT:    uunpklo z5.s, z1.h
-; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
 ; CHECK-NEXT:    uunpklo z2.h, z2.b
-; CHECK-NEXT:    uunpklo z3.h, z3.b
-; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z5.s, z0.h
+; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    uunpklo z3.s, z3.h
 ; CHECK-NEXT:    uunpklo z16.d, z4.s
 ; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
-; CHECK-NEXT:    uunpklo z1.s, z1.h
-; CHECK-NEXT:    uunpklo z17.d, z5.s
-; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
 ; CHECK-NEXT:    uunpklo z6.s, z2.h
-; CHECK-NEXT:    uunpklo z7.s, z3.h
 ; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
-; CHECK-NEXT:    uunpklo z4.d, z4.s
-; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT:    uunpklo z19.d, z0.s
+; CHECK-NEXT:    ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT:    mov z17.d, z5.d
+; CHECK-NEXT:    uunpklo z1.s, z1.h
 ; CHECK-NEXT:    uunpklo z5.d, z5.s
-; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z19.d, z3.s
 ; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z7.s, z7.h
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    ext z17.b, z17.b, z17.b, #8
 ; CHECK-NEXT:    uunpklo z18.d, z6.s
 ; CHECK-NEXT:    ext z6.b, z6.b, z6.b, #8
-; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    uunpklo z20.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
 ; CHECK-NEXT:    stp q16, q4, [x1, #128]
-; CHECK-NEXT:    mov z16.d, z7.d
-; CHECK-NEXT:    uunpklo z0.d, z0.s
-; CHECK-NEXT:    stp q17, q5, [x1]
-; CHECK-NEXT:    uunpklo z5.d, z7.s
-; CHECK-NEXT:    uunpklo z4.d, z6.s
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    ext z16.b, z16.b, z7.b, #8
-; CHECK-NEXT:    mov z7.d, z2.d
-; CHECK-NEXT:    stp q19, q0, [x1, #160]
-; CHECK-NEXT:    uunpklo z0.d, z2.s
-; CHECK-NEXT:    ext z6.b, z6.b, z1.b, #8
-; CHECK-NEXT:    uunpklo z1.d, z1.s
-; CHECK-NEXT:    stp q18, q4, [x1, #192]
-; CHECK-NEXT:    mov z4.d, z3.d
-; CHECK-NEXT:    ext z7.b, z7.b, z2.b, #8
-; CHECK-NEXT:    uunpklo z16.d, z16.s
-; CHECK-NEXT:    uunpklo z6.d, z6.s
-; CHECK-NEXT:    ext z4.b, z4.b, z3.b, #8
-; CHECK-NEXT:    uunpklo z2.d, z7.s
 ; CHECK-NEXT:    uunpklo z3.d, z3.s
-; CHECK-NEXT:    stp q5, q16, [x1, #64]
-; CHECK-NEXT:    stp q1, q6, [x1, #32]
+; CHECK-NEXT:    uunpklo z16.d, z0.s
+; CHECK-NEXT:    uunpklo z17.d, z17.s
+; CHECK-NEXT:    mov z4.d, z7.d
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    ext z4.b, z4.b, z7.b, #8
+; CHECK-NEXT:    stp q19, q3, [x1, #160]
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q5, q17, [x1]
+; CHECK-NEXT:    uunpklo z5.d, z6.s
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    stp q20, q1, [x1, #192]
+; CHECK-NEXT:    uunpklo z7.d, z7.s
 ; CHECK-NEXT:    uunpklo z1.d, z4.s
-; CHECK-NEXT:    stp q0, q2, [x1, #224]
-; CHECK-NEXT:    stp q3, q1, [x1, #96]
+; CHECK-NEXT:    ext z6.b, z6.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    stp q16, q0, [x1, #32]
+; CHECK-NEXT:    stp q18, q5, [x1, #64]
+; CHECK-NEXT:    uunpklo z3.d, z6.s
+; CHECK-NEXT:    stp q7, q1, [x1, #224]
+; CHECK-NEXT:    stp q2, q3, [x1, #96]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: zext_v32i8_v32i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
index 244dcc734bd7c..2678324728d0e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
@@ -855,11 +855,11 @@ define i32 @smaxv_v8i32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    cmp w11, w10
 ; NONEON-NOSVE-NEXT:    csel w9, w11, w10, gt
 ; NONEON-NOSVE-NEXT:    cmp w9, w8
-; NONEON-NOSVE-NEXT:    ldp w10, w12, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp w11, w12, [sp, #8]
 ; NONEON-NOSVE-NEXT:    csel w8, w9, w8, gt
-; NONEON-NOSVE-NEXT:    ldp w11, w9, [sp, #24]
-; NONEON-NOSVE-NEXT:    cmp w10, w11
-; NONEON-NOSVE-NEXT:    csel w10, w10, w11, gt
+; NONEON-NOSVE-NEXT:    ldp w10, w9, [sp, #24]
+; NONEON-NOSVE-NEXT:    cmp w11, w10
+; NONEON-NOSVE-NEXT:    csel w10, w11, w10, gt
 ; NONEON-NOSVE-NEXT:    cmp w8, w10
 ; NONEON-NOSVE-NEXT:    csel w8, w8, w10, gt
 ; NONEON-NOSVE-NEXT:    cmp w12, w9
@@ -1363,11 +1363,11 @@ define i32 @sminv_v8i32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    cmp w11, w10
 ; NONEON-NOSVE-NEXT:    csel w9, w11, w10, lt
 ; NONEON-NOSVE-NEXT:    cmp w9, w8
-; NONEON-NOSVE-NEXT:    ldp w10, w12, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp w11, w12, [sp, #8]
 ; NONEON-NOSVE-NEXT:    csel w8, w9, w8, lt
-; NONEON-NOSVE-NEXT:    ldp w11, w9, [sp, #24]
-; NONEON-NOSVE-NEXT:    cmp w10, w11
-; NONEON-NOSVE-NEXT:    csel w10, w10, w11, lt
+; NONEON-NOSVE-NEXT:    ldp w10, w9, [sp, #24]
+; NONEON-NOSVE-NEXT:    cmp w11, w10
+; NONEON-NOSVE-NEXT:    csel w10, w11, w10, lt
 ; NONEON-NOSVE-NEXT:    cmp w8, w10
 ; NONEON-NOSVE-NEXT:    csel w8, w8, w10, lt
 ; NONEON-NOSVE-NEXT:    cmp w12, w9
@@ -1871,11 +1871,11 @@ define i32 @umaxv_v8i32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    cmp w11, w10
 ; NONEON-NOSVE-NEXT:    csel w9, w11, w10, hi
 ; NONEON-NOSVE-NEXT:    cmp w9, w8
-; NONEON-NOSVE-NEXT:    ldp w10, w12, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp w11, w12, [sp, #8]
 ; NONEON-NOSVE-NEXT:    csel w8, w9, w8, hi
-; NONEON-NOSVE-NEXT:    ldp w11, w9, [sp, #24]
-; NONEON-NOSVE-NEXT:    cmp w10, w11
-; NONEON-NOSVE-NEXT:    csel w10, w10, w11, hi
+; NONEON-NOSVE-NEXT:    ldp w10, w9, [sp, #24]
+; NONEON-NOSVE-NEXT:    cmp w11, w10
+; NONEON-NOSVE-NEXT:    csel w10, w11, w10, hi
 ; NONEON-NOSVE-NEXT:    cmp w8, w10
 ; NONEON-NOSVE-NEXT:    csel w8, w8, w10, hi
 ; NONEON-NOSVE-NEXT:    cmp w12, w9
@@ -2379,11 +2379,11 @@ define i32 @uminv_v8i32(ptr %a) {
 ; NONEON-NOSVE-NEXT:    cmp w11, w10
 ; NONEON-NOSVE-NEXT:    csel w9, w11, w10, lo
 ; NONEON-NOSVE-NEXT:    cmp w9, w8
-; NONEON-NOSVE-NEXT:    ldp w10, w12, [sp, #8]
+; NONEON-NOSVE-NEXT:    ldp w11, w12, [sp, #8]
 ; NONEON-NOSVE-NEXT:    csel w8, w9, w8, lo
-; NONEON-NOSVE-NEXT:    ldp w11, w9, [sp, #24]
-; NONEON-NOSVE-NEXT:    cmp w10, w11
-; NONEON-NOSVE-NEXT:    csel w10, w10, w11, lo
+; NONEON-NOSVE-NEXT:    ldp w10, w9, [sp, #24]
+; NONEON-NOSVE-NEXT:    cmp w11, w10
+; NONEON-NOSVE-NEXT:    csel w10, w11, w10, lo
 ; NONEON-NOSVE-NEXT:    cmp w8, w10
 ; NONEON-NOSVE-NEXT:    csel w8, w8, w10, lo
 ; NONEON-NOSVE-NEXT:    cmp w12, w9
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
index d61f92b406294..46a2ce6ed7109 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
@@ -562,41 +562,42 @@ define void @ucvtf_v16i16_v16f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    uunpklo z3.s, z1.h
-; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
-; CHECK-NEXT:    uunpklo z0.s, z0.h
-; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    uunpklo z1.s, z1.h
-; CHECK-NEXT:    mov z5.d, z3.d
-; CHECK-NEXT:    uunpklo z4.d, z0.s
-; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ext z2.b, z2.b, z0.b, #8
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    mov z4.d, z1.d
 ; CHECK-NEXT:    uunpklo z2.s, z2.h
-; CHECK-NEXT:    ext z5.b, z5.b, z3.b, #8
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    uunpklo z3.d, z3.s
-; CHECK-NEXT:    uunpklo z0.d, z0.s
-; CHECK-NEXT:    ucvtf z4.d, p0/m, z4.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    uunpklo z5.d, z5.s
-; CHECK-NEXT:    ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    uunpklo z5.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ext z4.b, z4.b, z1.b, #8
 ; CHECK-NEXT:    uunpklo z1.d, z1.s
-; CHECK-NEXT:    ucvtf z3.d, p0/m, z3.d
-; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
-; CHECK-NEXT:    ext z6.b, z6.b, z2.b, #8
-; CHECK-NEXT:    uunpklo z2.d, z2.s
-; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    mov z7.d, z3.d
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
 ; CHECK-NEXT:    ucvtf z5.d, p0/m, z5.d
 ; CHECK-NEXT:    ucvtf z1.d, p0/m, z1.d
+; CHECK-NEXT:    ext z6.b, z6.b, z2.b, #8
+; CHECK-NEXT:    ext z7.b, z7.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    ucvtf z4.d, p0/m, z4.d
 ; CHECK-NEXT:    uunpklo z6.d, z6.s
-; CHECK-NEXT:    stp q4, q0, [x1, #64]
+; CHECK-NEXT:    uunpklo z7.d, z7.s
 ; CHECK-NEXT:    ucvtf z2.d, p0/m, z2.d
-; CHECK-NEXT:    stp q3, q5, [x1]
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    ucvtf z3.d, p0/m, z7.d
-; CHECK-NEXT:    movprfx z0, z6
-; CHECK-NEXT:    ucvtf z0.d, p0/m, z6.d
-; CHECK-NEXT:    stp q1, q3, [x1, #32]
-; CHECK-NEXT:    stp q2, q0, [x1, #96]
+; CHECK-NEXT:    stp q5, q0, [x1, #64]
+; CHECK-NEXT:    ucvtf z3.d, p0/m, z3.d
+; CHECK-NEXT:    stp q1, q4, [x1]
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    ucvtf z1.d, p0/m, z6.d
+; CHECK-NEXT:    movprfx z0, z7
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z7.d
+; CHECK-NEXT:    stp q3, q0, [x1, #32]
+; CHECK-NEXT:    stp q2, q1, [x1, #96]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: ucvtf_v16i16_v16f64:
@@ -2000,41 +2001,42 @@ define void @scvtf_v16i16_v16f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    sunpklo z3.s, z1.h
-; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
-; CHECK-NEXT:    sunpklo z0.s, z0.h
-; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    sunpklo z1.s, z1.h
-; CHECK-NEXT:    mov z5.d, z3.d
-; CHECK-NEXT:    sunpklo z4.d, z0.s
-; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ext z2.b, z2.b, z0.b, #8
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    mov z4.d, z1.d
 ; CHECK-NEXT:    sunpklo z2.s, z2.h
-; CHECK-NEXT:    ext z5.b, z5.b, z3.b, #8
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    sunpklo z3.d, z3.s
-; CHECK-NEXT:    sunpklo z0.d, z0.s
-; CHECK-NEXT:    scvtf z4.d, p0/m, z4.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    sunpklo z5.d, z5.s
-; CHECK-NEXT:    ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.s, z3.h
+; CHECK-NEXT:    sunpklo z5.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ext z4.b, z4.b, z1.b, #8
 ; CHECK-NEXT:    sunpklo z1.d, z1.s
-; CHECK-NEXT:    scvtf z3.d, p0/m, z3.d
-; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
-; CHECK-NEXT:    ext z6.b, z6.b, z2.b, #8
-; CHECK-NEXT:    sunpklo z2.d, z2.s
-; CHECK-NEXT:    sunpklo z7.d, z7.s
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    mov z7.d, z3.d
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    sunpklo z4.d, z4.s
 ; CHECK-NEXT:    scvtf z5.d, p0/m, z5.d
 ; CHECK-NEXT:    scvtf z1.d, p0/m, z1.d
+; CHECK-NEXT:    ext z6.b, z6.b, z2.b, #8
+; CHECK-NEXT:    ext z7.b, z7.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z2.d, z2.s
+; CHECK-NEXT:    sunpklo z3.d, z3.s
+; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    scvtf z4.d, p0/m, z4.d
 ; CHECK-NEXT:    sunpklo z6.d, z6.s
-; CHECK-NEXT:    stp q4, q0, [x1, #64]
+; CHECK-NEXT:    sunpklo z7.d, z7.s
 ; CHECK-NEXT:    scvtf z2.d, p0/m, z2.d
-; CHECK-NEXT:    stp q3, q5, [x1]
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    scvtf z3.d, p0/m, z7.d
-; CHECK-NEXT:    movprfx z0, z6
-; CHECK-NEXT:    scvtf z0.d, p0/m, z6.d
-; CHECK-NEXT:    stp q1, q3, [x1, #32]
-; CHECK-NEXT:    stp q2, q0, [x1, #96]
+; CHECK-NEXT:    stp q5, q0, [x1, #64]
+; CHECK-NEXT:    scvtf z3.d, p0/m, z3.d
+; CHECK-NEXT:    stp q1, q4, [x1]
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    scvtf z1.d, p0/m, z6.d
+; CHECK-NEXT:    movprfx z0, z7
+; CHECK-NEXT:    scvtf z0.d, p0/m, z7.d
+; CHECK-NEXT:    stp q3, q0, [x1, #32]
+; CHECK-NEXT:    stp q2, q1, [x1, #96]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: scvtf_v16i16_v16f64:
@@ -2481,38 +2483,38 @@ define void @scvtf_v16i32_v16f64(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0, #32]
 ; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    ldp q5, q4, [x0]
+; CHECK-NEXT:    ldp q5, q3, [x0]
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z4.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    mov z6.d, z3.d
 ; CHECK-NEXT:    mov z7.d, z5.d
 ; CHECK-NEXT:    ext z2.b, z2.b, z0.b, #8
-; CHECK-NEXT:    ext z3.b, z3.b, z1.b, #8
+; CHECK-NEXT:    ext z4.b, z4.b, z1.b, #8
 ; CHECK-NEXT:    sunpklo z0.d, z0.s
 ; CHECK-NEXT:    sunpklo z1.d, z1.s
-; CHECK-NEXT:    ext z6.b, z6.b, z4.b, #8
+; CHECK-NEXT:    ext z6.b, z6.b, z3.b, #8
 ; CHECK-NEXT:    ext z7.b, z7.b, z5.b, #8
-; CHECK-NEXT:    sunpklo z4.d, z4.s
+; CHECK-NEXT:    sunpklo z3.d, z3.s
 ; CHECK-NEXT:    sunpklo z5.d, z5.s
 ; CHECK-NEXT:    sunpklo z2.d, z2.s
-; CHECK-NEXT:    sunpklo z3.d, z3.s
+; CHECK-NEXT:    sunpklo z4.d, z4.s
 ; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sunpklo z6.d, z6.s
 ; CHECK-NEXT:    sunpklo z7.d, z7.s
 ; CHECK-NEXT:    scvtf z1.d, p0/m, z1.d
-; CHECK-NEXT:    scvtf z4.d, p0/m, z4.d
-; CHECK-NEXT:    scvtf z2.d, p0/m, z2.d
 ; CHECK-NEXT:    scvtf z3.d, p0/m, z3.d
-; CHECK-NEXT:    stp q1, q3, [x1, #64]
-; CHECK-NEXT:    movprfx z1, z7
-; CHECK-NEXT:    scvtf z1.d, p0/m, z7.d
+; CHECK-NEXT:    scvtf z2.d, p0/m, z2.d
+; CHECK-NEXT:    scvtf z4.d, p0/m, z4.d
+; CHECK-NEXT:    stp q1, q4, [x1, #64]
+; CHECK-NEXT:    movprfx z1, z5
+; CHECK-NEXT:    scvtf z1.d, p0/m, z5.d
 ; CHECK-NEXT:    stp q0, q2, [x1, #96]
 ; CHECK-NEXT:    movprfx z0, z6
 ; CHECK-NEXT:    scvtf z0.d, p0/m, z6.d
-; CHECK-NEXT:    movprfx z2, z5
-; CHECK-NEXT:    scvtf z2.d, p0/m, z5.d
-; CHECK-NEXT:    stp q2, q1, [x1]
-; CHECK-NEXT:    stp q4, q0, [x1, #32]
+; CHECK-NEXT:    movprfx z2, z7
+; CHECK-NEXT:    scvtf z2.d, p0/m, z7.d
+; CHECK-NEXT:    stp q1, q2, [x1]
+; CHECK-NEXT:    stp q3, q0, [x1, #32]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: scvtf_v16i32_v16f64:
diff --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
index b29195eed9149..2f51208e49351 100644
--- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
@@ -278,8 +278,8 @@ define <2 x i32> @uaddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, ptr %p2) nounwind
 ; CHECK-NEXT:    fmov s0, w13
 ; CHECK-NEXT:    mov v0.s[1], w10
 ; CHECK-NEXT:    ldr x10, [sp]
-; CHECK-NEXT:    stp x8, x9, [x10, #16]
 ; CHECK-NEXT:    stp x11, x12, [x10]
+; CHECK-NEXT:    stp x8, x9, [x10, #16]
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #31
 ; CHECK-NEXT:    cmlt v0.2s, v0.2s, #0
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/vec_umulo.ll b/llvm/test/CodeGen/AArch64/vec_umulo.ll
index 12ea8862a03cd..935f4272218af 100644
--- a/llvm/test/CodeGen/AArch64/vec_umulo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_umulo.ll
@@ -340,12 +340,12 @@ define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, ptr %p2) nounwind
 ; CHECK-NEXT:    csinc w11, w12, wzr, lo
 ; CHECK-NEXT:    ldr x12, [sp]
 ; CHECK-NEXT:    fmov s0, w11
-; CHECK-NEXT:    mul x11, x0, x4
+; CHECK-NEXT:    mul x11, x2, x6
 ; CHECK-NEXT:    mov v0.s[1], w8
-; CHECK-NEXT:    mul x8, x2, x6
-; CHECK-NEXT:    stp x11, x10, [x12]
+; CHECK-NEXT:    mul x8, x0, x4
+; CHECK-NEXT:    stp x11, x9, [x12, #16]
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #31
-; CHECK-NEXT:    stp x8, x9, [x12, #16]
+; CHECK-NEXT:    stp x8, x10, [x12]
 ; CHECK-NEXT:    cmlt v0.2s, v0.2s, #0
 ; CHECK-NEXT:    ret
   %t = call {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1)
diff --git a/llvm/test/CodeGen/AArch64/vselect-ext.ll b/llvm/test/CodeGen/AArch64/vselect-ext.ll
index 0b90343a40c83..76b7f3d9dfc0e 100644
--- a/llvm/test/CodeGen/AArch64/vselect-ext.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-ext.ll
@@ -334,7 +334,7 @@ define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_other_use(<16
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    movi.16b v16, #10
 ; CHECK-NEXT:    ushll.8h v19, v0, #0
-; CHECK-NEXT:    ldr q21, [sp]
+; CHECK-NEXT:    ldr q22, [sp]
 ; CHECK-NEXT:    ushll.4s v24, v19, #0
 ; CHECK-NEXT:    ushll2.4s v19, v19, #0
 ; CHECK-NEXT:    cmhi.16b v16, v0, v16
@@ -345,33 +345,33 @@ define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_other_use(<16
 ; CHECK-NEXT:    ushll2.4s v0, v0, #0
 ; CHECK-NEXT:    sshll2.4s v18, v17, #0
 ; CHECK-NEXT:    sshll.4s v17, v17, #0
-; CHECK-NEXT:    sshll2.4s v22, v16, #0
+; CHECK-NEXT:    sshll2.4s v20, v16, #0
 ; CHECK-NEXT:    sshll.4s v16, v16, #0
-; CHECK-NEXT:    sshll2.2d v20, v18, #0
+; CHECK-NEXT:    sshll2.2d v21, v18, #0
 ; CHECK-NEXT:    sshll.2d v23, v18, #0
 ; CHECK-NEXT:    sshll2.2d v26, v17, #0
-; CHECK-NEXT:    sshll.2d v27, v17, #0
-; CHECK-NEXT:    and.16b v20, v21, v20
-; CHECK-NEXT:    sshll2.2d v21, v22, #0
+; CHECK-NEXT:    sshll2.2d v27, v20, #0
+; CHECK-NEXT:    and.16b v21, v22, v21
+; CHECK-NEXT:    sshll.2d v22, v17, #0
 ; CHECK-NEXT:    and.16b v7, v7, v23
-; CHECK-NEXT:    sshll.2d v23, v22, #0
+; CHECK-NEXT:    sshll.2d v23, v20, #0
 ; CHECK-NEXT:    and.16b v6, v6, v26
 ; CHECK-NEXT:    sshll2.2d v26, v16, #0
-; CHECK-NEXT:    and.16b v5, v5, v27
-; CHECK-NEXT:    stp q7, q20, [x0, #96]
-; CHECK-NEXT:    sshll.2d v20, v16, #0
-; CHECK-NEXT:    and.16b v21, v4, v21
+; CHECK-NEXT:    and.16b v27, v4, v27
 ; CHECK-NEXT:    and.16b v4, v0, v18
+; CHECK-NEXT:    and.16b v0, v24, v16
+; CHECK-NEXT:    stp q7, q21, [x0, #96]
+; CHECK-NEXT:    sshll.2d v21, v16, #0
+; CHECK-NEXT:    and.16b v5, v5, v22
 ; CHECK-NEXT:    and.16b v7, v3, v23
-; CHECK-NEXT:    and.16b v3, v19, v22
+; CHECK-NEXT:    and.16b v3, v19, v20
 ; CHECK-NEXT:    stp q5, q6, [x0, #64]
-; CHECK-NEXT:    and.16b v0, v24, v16
 ; CHECK-NEXT:    and.16b v6, v2, v26
 ; CHECK-NEXT:    and.16b v2, v25, v17
-; CHECK-NEXT:    and.16b v5, v1, v20
+; CHECK-NEXT:    and.16b v5, v1, v21
 ; CHECK-NEXT:    mov.16b v1, v3
 ; CHECK-NEXT:    mov.16b v3, v4
-; CHECK-NEXT:    stp q7, q21, [x0, #32]
+; CHECK-NEXT:    stp q7, q27, [x0, #32]
 ; CHECK-NEXT:    stp q5, q6, [x0]
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll
index 531e0fa740da7..92fd4fe30980c 100644
--- a/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll
@@ -168,14 +168,13 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; ALL-NEXT:    stp q0, q0, [sp, #32]
 ; ALL-NEXT:    eor x12, x12, #0x3f
 ; ALL-NEXT:    add x8, x9, x8
-; ALL-NEXT:    ldp x13, x11, [x8]
-; ALL-NEXT:    ldr x9, [x8, #24]
-; ALL-NEXT:    ldr x8, [x8, #16]
-; ALL-NEXT:    lsl x14, x9, #1
+; ALL-NEXT:    ldp x13, x9, [x8]
+; ALL-NEXT:    ldp x8, x11, [x8, #16]
+; ALL-NEXT:    lsl x15, x9, #1
 ; ALL-NEXT:    lsr x9, x9, x10
-; ALL-NEXT:    lsl x15, x11, #1
-; ALL-NEXT:    lsr x11, x11, x10
 ; ALL-NEXT:    lsr x13, x13, x10
+; ALL-NEXT:    lsl x14, x11, #1
+; ALL-NEXT:    lsr x11, x11, x10
 ; ALL-NEXT:    lsl x14, x14, x12
 ; ALL-NEXT:    lsl x12, x15, x12
 ; ALL-NEXT:    lsl x15, x8, #1
@@ -183,10 +182,10 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; ALL-NEXT:    mvn w10, w10
 ; ALL-NEXT:    lsl x10, x15, x10
 ; ALL-NEXT:    orr x8, x14, x8
-; ALL-NEXT:    stp x8, x9, [x2, #16]
-; ALL-NEXT:    orr x9, x12, x13
-; ALL-NEXT:    orr x8, x11, x10
-; ALL-NEXT:    stp x9, x8, [x2]
+; ALL-NEXT:    stp x8, x11, [x2, #16]
+; ALL-NEXT:    orr x11, x12, x13
+; ALL-NEXT:    orr x8, x9, x10
+; ALL-NEXT:    stp x11, x8, [x2]
 ; ALL-NEXT:    add sp, sp, #64
 ; ALL-NEXT:    ret
   %src = load i256, ptr %src.ptr, align 1
@@ -213,14 +212,13 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; ALL-NEXT:    str q0, [sp]
 ; ALL-NEXT:    eor x12, x12, #0x3f
 ; ALL-NEXT:    sub x8, x9, x8
-; ALL-NEXT:    ldp x11, x13, [x8, #16]
-; ALL-NEXT:    ldr x9, [x8]
-; ALL-NEXT:    ldr x8, [x8, #8]
-; ALL-NEXT:    lsr x15, x9, #1
+; ALL-NEXT:    ldp x9, x13, [x8, #16]
+; ALL-NEXT:    ldp x11, x8, [x8]
+; ALL-NEXT:    lsr x14, x9, #1
 ; ALL-NEXT:    lsl x9, x9, x10
-; ALL-NEXT:    lsr x14, x11, #1
-; ALL-NEXT:    lsl x11, x11, x10
 ; ALL-NEXT:    lsl x13, x13, x10
+; ALL-NEXT:    lsr x15, x11, #1
+; ALL-NEXT:    lsl x11, x11, x10
 ; ALL-NEXT:    lsr x14, x14, x12
 ; ALL-NEXT:    lsr x12, x15, x12
 ; ALL-NEXT:    lsr x15, x8, #1
@@ -228,10 +226,10 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; ALL-NEXT:    mvn w10, w10
 ; ALL-NEXT:    lsr x10, x15, x10
 ; ALL-NEXT:    orr x8, x8, x12
-; ALL-NEXT:    stp x9, x8, [x2]
-; ALL-NEXT:    orr x9, x13, x14
-; ALL-NEXT:    orr x8, x11, x10
-; ALL-NEXT:    stp x8, x9, [x2, #16]
+; ALL-NEXT:    stp x11, x8, [x2]
+; ALL-NEXT:    orr x11, x13, x14
+; ALL-NEXT:    orr x8, x9, x10
+; ALL-NEXT:    stp x8, x11, [x2, #16]
 ; ALL-NEXT:    add sp, sp, #64
 ; ALL-NEXT:    ret
   %src = load i256, ptr %src.ptr, align 1
@@ -258,14 +256,13 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; ALL-NEXT:    eor x12, x12, #0x3f
 ; ALL-NEXT:    stp x8, x8, [sp, #32]
 ; ALL-NEXT:    add x8, x11, x9
-; ALL-NEXT:    ldp x13, x11, [x8]
-; ALL-NEXT:    ldr x9, [x8, #24]
-; ALL-NEXT:    ldr x8, [x8, #16]
-; ALL-NEXT:    lsl x14, x9, #1
-; ALL-NEXT:    asr x9, x9, x10
-; ALL-NEXT:    lsl x15, x11, #1
-; ALL-NEXT:    lsr x11, x11, x10
+; ALL-NEXT:    ldp x13, x9, [x8]
+; ALL-NEXT:    ldp x8, x11, [x8, #16]
+; ALL-NEXT:    lsl x15, x9, #1
+; ALL-NEXT:    lsr x9, x9, x10
 ; ALL-NEXT:    lsr x13, x13, x10
+; ALL-NEXT:    lsl x14, x11, #1
+; ALL-NEXT:    asr x11, x11, x10
 ; ALL-NEXT:    lsl x14, x14, x12
 ; ALL-NEXT:    lsl x12, x15, x12
 ; ALL-NEXT:    lsl x15, x8, #1
@@ -273,10 +270,10 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; ALL-NEXT:    mvn w10, w10
 ; ALL-NEXT:    lsl x10, x15, x10
 ; ALL-NEXT:    orr x8, x14, x8
-; ALL-NEXT:    stp x8, x9, [x2, #16]
-; ALL-NEXT:    orr x9, x12, x13
-; ALL-NEXT:    orr x8, x11, x10
-; ALL-NEXT:    stp x9, x8, [x2]
+; ALL-NEXT:    stp x8, x11, [x2, #16]
+; ALL-NEXT:    orr x11, x12, x13
+; ALL-NEXT:    orr x8, x9, x10
+; ALL-NEXT:    stp x11, x8, [x2]
 ; ALL-NEXT:    add sp, sp, #64
 ; ALL-NEXT:    ret
   %src = load i256, ptr %src.ptr, align 1
diff --git a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
index eb83aa5a13e52..46a2459485987 100644
--- a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
@@ -1486,17 +1486,17 @@ define void @zext_v16i32_to_v16i64_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    cmp x8, #512
 ; CHECK-NEXT:    ldp q5, q4, [x9]
 ; CHECK-NEXT:    ushll2.2d v2, v0, #0
-; CHECK-NEXT:    ushll.2d v0, v0, #0
 ; CHECK-NEXT:    ushll2.2d v3, v1, #0
+; CHECK-NEXT:    ushll.2d v0, v0, #0
 ; CHECK-NEXT:    ushll.2d v1, v1, #0
 ; CHECK-NEXT:    stp q0, q2, [x1, #96]
 ; CHECK-NEXT:    ushll2.2d v2, v4, #0
-; CHECK-NEXT:    ushll.2d v0, v4, #0
+; CHECK-NEXT:    ushll2.2d v0, v5, #0
 ; CHECK-NEXT:    stp q1, q3, [x1, #64]
-; CHECK-NEXT:    ushll2.2d v3, v5, #0
+; CHECK-NEXT:    ushll.2d v3, v4, #0
 ; CHECK-NEXT:    ushll.2d v1, v5, #0
-; CHECK-NEXT:    stp q0, q2, [x1, #32]
-; CHECK-NEXT:    stp q1, q3, [x1], #128
+; CHECK-NEXT:    stp q3, q2, [x1, #32]
+; CHECK-NEXT:    stp q1, q0, [x1], #128
 ; CHECK-NEXT:    b.ne LBB15_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
@@ -1683,26 +1683,26 @@ define void @zext_v8i8_to_v8i64_with_add_in_sequence_in_loop(ptr %src, ptr %dst)
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldp d2, d3, [x9, #-8]
 ; CHECK-NEXT:    subs x10, x10, #16
-; CHECK-NEXT:    ldp q6, q5, [x8, #-32]
+; CHECK-NEXT:    ldp q7, q5, [x8, #-32]
 ; CHECK-NEXT:    add x9, x9, #16
-; CHECK-NEXT:    ldp q17, q16, [x8, #-64]
+; CHECK-NEXT:    ldp q17, q6, [x8, #-64]
 ; CHECK-NEXT:    tbl.16b v4, { v2 }, v1
 ; CHECK-NEXT:    tbl.16b v2, { v2 }, v0
-; CHECK-NEXT:    tbl.16b v7, { v3 }, v1
+; CHECK-NEXT:    tbl.16b v16, { v3 }, v1
 ; CHECK-NEXT:    tbl.16b v3, { v3 }, v0
 ; CHECK-NEXT:    uaddw2.2d v5, v5, v4
-; CHECK-NEXT:    uaddw.2d v4, v6, v4
-; CHECK-NEXT:    uaddw2.2d v6, v16, v2
-; CHECK-NEXT:    ldp q18, q16, [x8, #32]
+; CHECK-NEXT:    uaddw2.2d v6, v6, v2
+; CHECK-NEXT:    uaddw.2d v4, v7, v4
+; CHECK-NEXT:    ldp q18, q7, [x8, #32]
 ; CHECK-NEXT:    uaddw.2d v2, v17, v2
 ; CHECK-NEXT:    stp q4, q5, [x8, #-32]
-; CHECK-NEXT:    uaddw2.2d v5, v16, v7
-; CHECK-NEXT:    ldp q16, q4, [x8]
-; CHECK-NEXT:    uaddw.2d v7, v18, v7
+; CHECK-NEXT:    uaddw2.2d v5, v7, v16
 ; CHECK-NEXT:    stp q2, q6, [x8, #-64]
-; CHECK-NEXT:    uaddw2.2d v4, v4, v3
-; CHECK-NEXT:    uaddw.2d v2, v16, v3
-; CHECK-NEXT:    stp q7, q5, [x8, #32]
+; CHECK-NEXT:    uaddw.2d v16, v18, v16
+; CHECK-NEXT:    ldp q7, q6, [x8]
+; CHECK-NEXT:    stp q16, q5, [x8, #32]
+; CHECK-NEXT:    uaddw2.2d v4, v6, v3
+; CHECK-NEXT:    uaddw.2d v2, v7, v3
 ; CHECK-NEXT:    stp q2, q4, [x8], #128
 ; CHECK-NEXT:    b.ne LBB17_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
@@ -1826,34 +1826,34 @@ define void @zext_v16i8_to_v16i64_in_sequence_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    ushll.8h v1, v1, #0
 ; CHECK-NEXT:    ushll2.4s v3, v2, #0
 ; CHECK-NEXT:    ushll.4s v2, v2, #0
-; CHECK-NEXT:    ushll2.4s v5, v0, #0
+; CHECK-NEXT:    ushll2.4s v4, v0, #0
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    ushll2.2d v4, v3, #0
+; CHECK-NEXT:    ushll2.2d v5, v3, #0
 ; CHECK-NEXT:    ushll.2d v3, v3, #0
 ; CHECK-NEXT:    ushll2.2d v7, v2, #0
+; CHECK-NEXT:    ushll2.2d v16, v4, #0
 ; CHECK-NEXT:    ushll.2d v2, v2, #0
-; CHECK-NEXT:    stp q3, q4, [x9, #-32]
-; CHECK-NEXT:    ushll2.2d v4, v5, #0
+; CHECK-NEXT:    ushll.2d v4, v4, #0
+; CHECK-NEXT:    stp q3, q5, [x9, #-32]
 ; CHECK-NEXT:    ushll2.4s v3, v6, #0
-; CHECK-NEXT:    ushll.2d v5, v5, #0
-; CHECK-NEXT:    stp q2, q7, [x9, #-64]
-; CHECK-NEXT:    ushll2.2d v7, v0, #0
+; CHECK-NEXT:    ushll2.2d v5, v0, #0
 ; CHECK-NEXT:    ushll.2d v0, v0, #0
-; CHECK-NEXT:    ushll.4s v2, v6, #0
-; CHECK-NEXT:    stp q5, q4, [x9, #-96]
-; CHECK-NEXT:    ushll2.2d v4, v3, #0
-; CHECK-NEXT:    ushll2.4s v5, v1, #0
+; CHECK-NEXT:    stp q4, q16, [x9, #-96]
+; CHECK-NEXT:    ushll.4s v6, v6, #0
+; CHECK-NEXT:    stp q2, q7, [x9, #-64]
+; CHECK-NEXT:    ushll2.4s v4, v1, #0
+; CHECK-NEXT:    ushll2.2d v2, v3, #0
 ; CHECK-NEXT:    ushll.2d v3, v3, #0
-; CHECK-NEXT:    stp q0, q7, [x9, #-128]
+; CHECK-NEXT:    stp q0, q5, [x9, #-128]
 ; CHECK-NEXT:    ushll.4s v0, v1, #0
-; CHECK-NEXT:    ushll2.2d v6, v2, #0
-; CHECK-NEXT:    ushll.2d v1, v2, #0
-; CHECK-NEXT:    ushll2.2d v2, v5, #0
-; CHECK-NEXT:    stp q3, q4, [x9, #96]
-; CHECK-NEXT:    ushll.2d v3, v5, #0
+; CHECK-NEXT:    ushll2.2d v5, v6, #0
+; CHECK-NEXT:    ushll.2d v1, v6, #0
+; CHECK-NEXT:    stp q3, q2, [x9, #96]
+; CHECK-NEXT:    ushll2.2d v2, v4, #0
+; CHECK-NEXT:    ushll.2d v3, v4, #0
 ; CHECK-NEXT:    ushll2.2d v4, v0, #0
 ; CHECK-NEXT:    ushll.2d v0, v0, #0
-; CHECK-NEXT:    stp q1, q6, [x9, #64]
+; CHECK-NEXT:    stp q1, q5, [x9, #64]
 ; CHECK-NEXT:    stp q3, q2, [x9, #32]
 ; CHECK-NEXT:    stp q0, q4, [x9], #128
 ; CHECK-NEXT:    b.ne LBB18_1
@@ -2678,9 +2678,9 @@ define void @zext_v8i8_to_v8i33_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    orr x10, x10, x12, lsl #4
 ; CHECK-NEXT:    fmov x12, d3
 ; CHECK-NEXT:    stp x10, x9, [x1, #16]
+; CHECK-NEXT:    fmov x9, d0
 ; CHECK-NEXT:    orr x11, x11, x12, lsl #2
-; CHECK-NEXT:    fmov x12, d0
-; CHECK-NEXT:    orr x9, x12, x13, lsl #33
+; CHECK-NEXT:    orr x9, x9, x13, lsl #33
 ; CHECK-NEXT:    stp x9, x11, [x1], #128
 ; CHECK-NEXT:    b.ne LBB22_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
@@ -2913,28 +2913,29 @@ define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
 ; CHECK-NEXT:  LBB25_1: ; %loop
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldr q4, [x0]
-; CHECK-NEXT:    ldp q16, q7, [x1, #32]
-; CHECK-NEXT:    ldr q18, [x8, #16]!
+; CHECK-NEXT:    ldp q17, q7, [x1, #32]
+; CHECK-NEXT:    ldr q18, [x1]
 ; CHECK-NEXT:    subs w2, w2, #1
 ; CHECK-NEXT:    tbl.16b v5, { v4 }, v3
 ; CHECK-NEXT:    tbl.16b v6, { v4 }, v0
-; CHECK-NEXT:    tbl.16b v17, { v4 }, v2
-; CHECK-NEXT:    tbl.16b v4, { v4 }, v1
+; CHECK-NEXT:    tbl.16b v16, { v4 }, v1
+; CHECK-NEXT:    tbl.16b v4, { v4 }, v2
+; CHECK-NEXT:    ldr q21, [x8, #16]!
+; CHECK-NEXT:    mov x1, x8
 ; CHECK-NEXT:    umull2.2d v19, v5, v7
+; CHECK-NEXT:    umull2.2d v20, v6, v17
+; CHECK-NEXT:    umull2.2d v22, v16, v18
 ; CHECK-NEXT:    umull.2d v5, v5, v7
-; CHECK-NEXT:    ldr q7, [x1]
-; CHECK-NEXT:    umull2.2d v20, v6, v16
-; CHECK-NEXT:    umull2.2d v21, v17, v18
-; CHECK-NEXT:    umull.2d v17, v17, v18
-; CHECK-NEXT:    umull2.2d v18, v4, v7
-; CHECK-NEXT:    umull.2d v4, v4, v7
-; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    stp q5, q19, [x0, #96]
-; CHECK-NEXT:    umull.2d v5, v6, v16
+; CHECK-NEXT:    umull2.2d v7, v4, v21
+; CHECK-NEXT:    umull.2d v4, v4, v21
+; CHECK-NEXT:    umull.2d v16, v16, v18
+; CHECK-NEXT:    umull.2d v6, v6, v17
 ; CHECK-NEXT:    str q20, [x0, #80]
-; CHECK-NEXT:    stp q4, q18, [x0]
-; CHECK-NEXT:    stp q17, q21, [x0, #32]
-; CHECK-NEXT:    str q5, [x0, #64]!
+; CHECK-NEXT:    stp q22, q4, [x0, #16]
+; CHECK-NEXT:    stp q5, q19, [x0, #96]
+; CHECK-NEXT:    str q7, [x0, #48]
+; CHECK-NEXT:    str q16, [x0]
+; CHECK-NEXT:    str q6, [x0, #64]!
 ; CHECK-NEXT:    b.ne LBB25_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    mov w0, wzr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
index 82d87358e1faf..1b8f0cac01cb1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
@@ -1744,12 +1744,12 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
 ; GFX10-NEXT:    v_or_b32_e32 v2, v6, v8
 ; GFX10-NEXT:    v_or_b32_e32 v8, v7, v9
 ; GFX10-NEXT:    v_ashrrev_i64 v[6:7], v3, v[4:5]
-; GFX10-NEXT:    v_ashrrev_i32_e32 v3, 31, v5
+; GFX10-NEXT:    v_ashrrev_i32_e32 v4, 31, v5
 ; GFX10-NEXT:    v_cndmask_b32_e32 v2, v10, v2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v11, v8, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v11, v8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v4, v1, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v3, v6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v4, v6, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_ashr_i65:
@@ -1757,22 +1757,21 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_bfe_i32 v4, v2, 0, 1
 ; GFX11-NEXT:    v_sub_nc_u32_e32 v2, 64, v3
-; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v3
 ; GFX11-NEXT:    v_lshrrev_b64 v[6:7], v3, v[0:1]
 ; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v3
-; GFX11-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v3
+; GFX11-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v2, v[4:5]
-; GFX11-NEXT:    v_ashrrev_i64 v[10:11], v10, v[4:5]
 ; GFX11-NEXT:    v_or_b32_e32 v2, v6, v8
 ; GFX11-NEXT:    v_or_b32_e32 v8, v7, v9
+; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v3
 ; GFX11-NEXT:    v_ashrrev_i64 v[6:7], v3, v[4:5]
-; GFX11-NEXT:    v_ashrrev_i32_e32 v3, 31, v5
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, v10, v2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v4, v11, v8, vcc_lo
+; GFX11-NEXT:    v_ashrrev_i64 v[10:11], v10, v[4:5]
+; GFX11-NEXT:    v_ashrrev_i32_e32 v4, 31, v5
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v10, v2 :: v_dual_cndmask_b32 v3, v11, v8
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v4, v1, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, v3, v6, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, v4, v6, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = ashr i65 %value, %amount
   ret i65 %result
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
index 9ef16aef0dd16..9b35920f8547a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
@@ -347,30 +347,30 @@ define i64 @dyn_extract_v8i64_const_s_v(i32 %sel) {
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s5
 ; GFX10-NEXT:    s_mov_b64 s[6:7], 1
-; GFX10-NEXT:    s_mov_b64 s[4:5], 3
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 2, v0
+; GFX10-NEXT:    s_mov_b64 s[8:9], 3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, s6, v1, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v2, s7, v2, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    s_mov_b64 s[6:7], 4
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    s_mov_b64 s[4:5], 5
+; GFX10-NEXT:    s_mov_b64 s[6:7], 4
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 4, v0
+; GFX10-NEXT:    s_mov_b64 s[8:9], 5
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    s_mov_b64 s[6:7], 6
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX10-NEXT:    s_mov_b64 s[4:5], 7
+; GFX10-NEXT:    s_mov_b64 s[6:7], 6
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 6, v0
+; GFX10-NEXT:    s_mov_b64 s[8:9], 7
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    s_mov_b64 s[6:7], 8
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
+; GFX10-NEXT:    s_mov_b64 s[6:7], 8
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, s4
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s7, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
@@ -382,30 +382,30 @@ define i64 @dyn_extract_v8i64_const_s_v(i32 %sel) {
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11-NEXT:    v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
 ; GFX11-NEXT:    s_mov_b64 s[2:3], 1
-; GFX11-NEXT:    s_mov_b64 s[0:1], 3
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 2, v0
+; GFX11-NEXT:    s_mov_b64 s[4:5], 3
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_mov_b64 s[2:3], 4
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    s_mov_b64 s[0:1], 5
+; GFX11-NEXT:    s_mov_b64 s[2:3], 4
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 4, v0
+; GFX11-NEXT:    s_mov_b64 s[4:5], 5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s3, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX11-NEXT:    s_mov_b64 s[2:3], 6
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX11-NEXT:    s_mov_b64 s[0:1], 7
+; GFX11-NEXT:    s_mov_b64 s[2:3], 6
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 6, v0
+; GFX11-NEXT:    s_mov_b64 s[4:5], 7
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s3, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX11-NEXT:    s_mov_b64 s[2:3], 8
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
+; GFX11-NEXT:    s_mov_b64 s[2:3], 8
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index 2a186f527ab70..04db85ade203a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -5990,98 +5990,98 @@ define i128 @v_fshl_i128(i128 %lhs, i128 %rhs, i128 %amt) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_and_b32_e32 v18, 0x7f, v8
-; GFX10-NEXT:    v_not_b32_e32 v10, v8
+; GFX10-NEXT:    v_not_b32_e32 v12, v8
 ; GFX10-NEXT:    v_lshrrev_b64 v[4:5], 1, v[4:5]
-; GFX10-NEXT:    v_lshrrev_b64 v[12:13], 1, v[6:7]
-; GFX10-NEXT:    v_sub_nc_u32_e32 v11, 64, v18
-; GFX10-NEXT:    v_and_b32_e32 v19, 0x7f, v10
+; GFX10-NEXT:    v_sub_nc_u32_e32 v10, 64, v18
+; GFX10-NEXT:    v_and_b32_e32 v19, 0x7f, v12
+; GFX10-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v18
 ; GFX10-NEXT:    v_lshlrev_b64 v[8:9], v18, v[2:3]
 ; GFX10-NEXT:    v_lshl_or_b32 v5, v6, 31, v5
-; GFX10-NEXT:    v_add_nc_u32_e32 v20, 0xffffffc0, v18
-; GFX10-NEXT:    v_lshrrev_b64 v[10:11], v11, v[0:1]
+; GFX10-NEXT:    v_lshrrev_b64 v[10:11], v10, v[0:1]
+; GFX10-NEXT:    v_lshrrev_b64 v[6:7], 1, v[6:7]
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v16, 64, v19
-; GFX10-NEXT:    v_lshlrev_b64 v[6:7], v18, v[0:1]
-; GFX10-NEXT:    v_lshrrev_b64 v[14:15], v19, v[4:5]
-; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v20, v[0:1]
+; GFX10-NEXT:    v_lshlrev_b64 v[12:13], v18, v[0:1]
+; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v14, v[0:1]
 ; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v18
-; GFX10-NEXT:    v_or_b32_e32 v10, v10, v8
-; GFX10-NEXT:    v_add_nc_u32_e32 v8, 0xffffffc0, v19
-; GFX10-NEXT:    v_lshlrev_b64 v[16:17], v16, v[12:13]
+; GFX10-NEXT:    v_or_b32_e32 v8, v10, v8
+; GFX10-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v19
+; GFX10-NEXT:    v_lshrrev_b64 v[14:15], v19, v[4:5]
+; GFX10-NEXT:    v_lshlrev_b64 v[16:17], v16, v[6:7]
 ; GFX10-NEXT:    v_or_b32_e32 v11, v11, v9
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s4, 64, v19
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v0, v10, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v8, v[12:13]
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 0, v19
-; GFX10-NEXT:    v_or_b32_e32 v14, v14, v16
-; GFX10-NEXT:    v_or_b32_e32 v15, v15, v17
+; GFX10-NEXT:    v_cndmask_b32_e32 v20, v0, v8, vcc_lo
+; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v10, v[6:7]
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s5, 64, v19
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v18
+; GFX10-NEXT:    v_or_b32_e32 v0, v14, v16
+; GFX10-NEXT:    v_or_b32_e32 v10, v15, v17
 ; GFX10-NEXT:    v_cndmask_b32_e32 v11, v1, v11, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v19, v[12:13]
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v14, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v15, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v9, v5, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, 0, v0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, 0, v1, s4
-; GFX10-NEXT:    v_or_b32_e32 v0, v6, v4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v19
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, 0, v12, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v0, s5
+; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v19, v[6:7]
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v9, v10, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, 0, v13, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v20, v2, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, 0, v0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, 0, v1, s5
+; GFX10-NEXT:    v_or_b32_e32 v0, v12, v4
 ; GFX10-NEXT:    v_or_b32_e32 v1, v7, v5
-; GFX10-NEXT:    v_or_b32_e32 v2, v2, v8
-; GFX10-NEXT:    v_or_b32_e32 v3, v3, v9
+; GFX10-NEXT:    v_or_b32_e32 v2, v2, v6
+; GFX10-NEXT:    v_or_b32_e32 v3, v3, v8
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_fshl_i128:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_and_b32_e32 v18, 0x7f, v8
-; GFX11-NEXT:    v_not_b32_e32 v10, v8
+; GFX11-NEXT:    v_not_b32_e32 v12, v8
 ; GFX11-NEXT:    v_lshrrev_b64 v[4:5], 1, v[4:5]
-; GFX11-NEXT:    v_lshrrev_b64 v[12:13], 1, v[6:7]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v11, 64, v18
-; GFX11-NEXT:    v_and_b32_e32 v19, 0x7f, v10
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_sub_nc_u32_e32 v10, 64, v18
+; GFX11-NEXT:    v_and_b32_e32 v19, 0x7f, v12
 ; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v18, v[2:3]
+; GFX11-NEXT:    v_lshlrev_b64 v[12:13], v18, v[0:1]
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v18
+; GFX11-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v18
+; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v10, v[0:1]
 ; GFX11-NEXT:    v_lshl_or_b32 v5, v6, 31, v5
-; GFX11-NEXT:    v_lshlrev_b64 v[6:7], v18, v[0:1]
-; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v11, v[0:1]
+; GFX11-NEXT:    v_lshrrev_b64 v[6:7], 1, v[6:7]
 ; GFX11-NEXT:    v_sub_nc_u32_e32 v16, 64, v19
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v18
-; GFX11-NEXT:    v_add_nc_u32_e32 v20, 0xffffffc0, v18
+; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v14, v[0:1]
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v19
+; GFX11-NEXT:    v_or_b32_e32 v8, v10, v8
+; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v19
 ; GFX11-NEXT:    v_lshrrev_b64 v[14:15], v19, v[4:5]
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v19
-; GFX11-NEXT:    v_or_b32_e32 v10, v10, v8
-; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc_lo
-; GFX11-NEXT:    v_add_nc_u32_e32 v8, 0xffffffc0, v19
-; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v16, v[12:13]
-; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v20, v[0:1]
+; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v16, v[6:7]
 ; GFX11-NEXT:    v_or_b32_e32 v11, v11, v9
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v19
-; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v8, v[12:13]
-; GFX11-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc_lo
-; GFX11-NEXT:    v_or_b32_e32 v14, v14, v16
-; GFX11-NEXT:    v_or_b32_e32 v15, v15, v17
-; GFX11-NEXT:    v_dual_cndmask_b32 v10, v0, v10 :: v_dual_cndmask_b32 v11, v1, v11
-; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v19, v[12:13]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v14, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v15, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v9, v5, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, 0, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, 0, v1, s0
-; GFX11-NEXT:    v_or_b32_e32 v0, v6, v4
+; GFX11-NEXT:    v_cndmask_b32_e32 v20, v0, v8, vcc_lo
+; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v10, v[6:7]
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v19
+; GFX11-NEXT:    v_cndmask_b32_e32 v12, 0, v12, vcc_lo
+; GFX11-NEXT:    v_or_b32_e32 v0, v14, v16
+; GFX11-NEXT:    v_or_b32_e32 v10, v15, v17
+; GFX11-NEXT:    v_cndmask_b32_e32 v11, v1, v11, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v18
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v0, s1
+; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v19, v[6:7]
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v9, v10, s1
+; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0, v13, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v20, v2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, 0, v0, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v8, 0, v1, s1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v0, v12, v4
 ; GFX11-NEXT:    v_or_b32_e32 v1, v7, v5
-; GFX11-NEXT:    v_or_b32_e32 v2, v2, v8
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_or_b32_e32 v3, v3, v9
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v2, v2, v6
+; GFX11-NEXT:    v_or_b32_e32 v3, v3, v8
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i128 @llvm.fshl.i128(i128 %lhs, i128 %rhs, i128 %amt)
   ret i128 %result
@@ -6249,45 +6249,45 @@ define amdgpu_ps <4 x float> @v_fshl_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
 ; GFX10-LABEL: v_fshl_i128_ssv:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_b32_e32 v12, 0x7f, v0
-; GFX10-NEXT:    v_not_b32_e32 v2, v0
+; GFX10-NEXT:    v_not_b32_e32 v6, v0
 ; GFX10-NEXT:    s_mov_b32 s8, 0
 ; GFX10-NEXT:    s_lshr_b64 s[4:5], s[4:5], 1
 ; GFX10-NEXT:    s_lshl_b32 s9, s6, 31
-; GFX10-NEXT:    v_sub_nc_u32_e32 v3, 64, v12
-; GFX10-NEXT:    v_and_b32_e32 v13, 0x7f, v2
+; GFX10-NEXT:    v_sub_nc_u32_e32 v2, 64, v12
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v12, s[2:3]
+; GFX10-NEXT:    v_and_b32_e32 v13, 0x7f, v6
+; GFX10-NEXT:    v_add_nc_u32_e32 v7, 0xffffffc0, v12
 ; GFX10-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v2, s[0:1]
 ; GFX10-NEXT:    s_lshr_b64 s[6:7], s[6:7], 1
-; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v3, s[0:1]
-; GFX10-NEXT:    v_sub_nc_u32_e32 v8, 64, v13
-; GFX10-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v12
-; GFX10-NEXT:    v_lshrrev_b64 v[6:7], v13, s[8:9]
 ; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v12
+; GFX10-NEXT:    v_lshlrev_b64 v[6:7], v7, s[0:1]
+; GFX10-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v13
+; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v13, s[8:9]
+; GFX10-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX10-NEXT:    v_sub_nc_u32_e32 v2, 64, v13
 ; GFX10-NEXT:    v_lshlrev_b64 v[4:5], v12, s[0:1]
-; GFX10-NEXT:    v_or_b32_e32 v2, v2, v0
-; GFX10-NEXT:    v_add_nc_u32_e32 v0, 0xffffffc0, v13
-; GFX10-NEXT:    v_lshlrev_b64 v[8:9], v8, s[6:7]
-; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v10, s[0:1]
-; GFX10-NEXT:    v_or_b32_e32 v3, v3, v1
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s0, 64, v13
-; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v0, s[6:7]
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 0, v13
-; GFX10-NEXT:    v_or_b32_e32 v6, v6, v8
-; GFX10-NEXT:    v_or_b32_e32 v7, v7, v9
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v10, v2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v11, v3, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v13, s[6:7]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v6, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v12
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v7, s0
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s1, 64, v13
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 0, v12
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v0, vcc_lo
+; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v2, s[6:7]
+; GFX10-NEXT:    v_or_b32_e32 v2, v3, v1
+; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v14, s[6:7]
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v13
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s8, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v8, s2, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v10, s3, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s9, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s0
+; GFX10-NEXT:    v_or_b32_e32 v3, v8, v10
+; GFX10-NEXT:    v_or_b32_e32 v8, v9, v11
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s1
+; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v13, s[6:7]
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s3, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s8, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s9, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s1
 ; GFX10-NEXT:    v_or_b32_e32 v0, v4, v0
 ; GFX10-NEXT:    v_or_b32_e32 v1, v5, v1
 ; GFX10-NEXT:    v_or_b32_e32 v2, v6, v2
@@ -6297,51 +6297,56 @@ define amdgpu_ps <4 x float> @v_fshl_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
 ; GFX11-LABEL: v_fshl_i128_ssv:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    v_and_b32_e32 v12, 0x7f, v0
-; GFX11-NEXT:    v_not_b32_e32 v2, v0
-; GFX11-NEXT:    s_mov_b32 s8, 0
-; GFX11-NEXT:    s_lshr_b64 s[4:5], s[4:5], 1
+; GFX11-NEXT:    v_not_b32_e32 v6, v0
 ; GFX11-NEXT:    s_lshl_b32 s9, s6, 31
-; GFX11-NEXT:    v_lshlrev_b64 v[4:5], v12, s[0:1]
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v12
-; GFX11-NEXT:    v_and_b32_e32 v13, 0x7f, v2
-; GFX11-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
 ; GFX11-NEXT:    s_lshr_b64 s[6:7], s[6:7], 1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT:    v_dual_cndmask_b32 v4, 0, v4 :: v_dual_cndmask_b32 v5, 0, v5
-; GFX11-NEXT:    v_sub_nc_u32_e32 v3, 64, v12
+; GFX11-NEXT:    s_mov_b32 s8, 0
+; GFX11-NEXT:    v_sub_nc_u32_e32 v2, 64, v12
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v12, s[2:3]
-; GFX11-NEXT:    v_sub_nc_u32_e32 v8, 64, v13
-; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v12
-; GFX11-NEXT:    v_lshrrev_b64 v[6:7], v13, s[8:9]
-; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v3, s[0:1]
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s4, 0, v12
-; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v8, s[6:7]
-; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v10, s[0:1]
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v13
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v13
-; GFX11-NEXT:    v_or_b32_e32 v2, v2, v0
-; GFX11-NEXT:    v_add_nc_u32_e32 v0, 0xffffffc0, v13
-; GFX11-NEXT:    v_or_b32_e32 v3, v3, v1
-; GFX11-NEXT:    v_or_b32_e32 v6, v6, v8
-; GFX11-NEXT:    v_or_b32_e32 v7, v7, v9
-; GFX11-NEXT:    v_cndmask_b32_e32 v8, v10, v2, vcc_lo
-; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v0, s[6:7]
-; GFX11-NEXT:    v_cndmask_b32_e32 v10, v11, v3, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v12
+; GFX11-NEXT:    v_and_b32_e32 v13, 0x7f, v6
+; GFX11-NEXT:    v_add_nc_u32_e32 v7, 0xffffffc0, v12
+; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v2, s[0:1]
+; GFX11-NEXT:    s_lshr_b64 s[4:5], s[4:5], 1
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX11-NEXT:    v_lshlrev_b64 v[6:7], v7, s[0:1]
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX11-NEXT:    v_sub_nc_u32_e32 v2, 64, v13
+; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v2, s[6:7]
+; GFX11-NEXT:    v_or_b32_e32 v2, v3, v1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e32 v7, v7, v2, vcc_lo
+; GFX11-NEXT:    v_lshlrev_b64 v[4:5], v12, s[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e32 v6, v6, v0, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v12
+; GFX11-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc_lo
+; GFX11-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v13
+; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v13, s[8:9]
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v13
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s4, 0, v13
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc_lo
+; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v14, s[6:7]
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
+; GFX11-NEXT:    v_or_b32_e32 v3, v8, v10
+; GFX11-NEXT:    v_or_b32_e32 v8, v9, v11
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s3, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s1
 ; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v13, s[6:7]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v6, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v7, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v8, s2, s4
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v10, s3, s4
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s8, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s9, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_or_b32_e32 v2, v6, v2
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s8, s4
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s9, s4
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s1
 ; GFX11-NEXT:    v_or_b32_e32 v0, v4, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_or_b32_e32 v1, v5, v1
+; GFX11-NEXT:    v_or_b32_e32 v2, v6, v2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-NEXT:    v_or_b32_e32 v3, v7, v3
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.fshl.i128(i128 %lhs, i128 %rhs, i128 %amt)
@@ -6783,49 +6788,49 @@ define amdgpu_ps <4 x float> @v_fshl_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
 ; GFX10-LABEL: v_fshl_i128_vss:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_and_b32 s5, s4, 0x7f
-; GFX10-NEXT:    s_sub_i32 s6, s5, 64
 ; GFX10-NEXT:    s_sub_i32 s7, 64, s5
+; GFX10-NEXT:    s_sub_i32 s6, s5, 64
 ; GFX10-NEXT:    s_cmp_lt_u32 s5, 64
 ; GFX10-NEXT:    v_lshrrev_b64 v[4:5], s7, v[0:1]
+; GFX10-NEXT:    v_lshlrev_b64 v[6:7], s5, v[2:3]
 ; GFX10-NEXT:    s_cselect_b32 s8, 1, 0
 ; GFX10-NEXT:    s_cmp_eq_u32 s5, 0
-; GFX10-NEXT:    v_lshlrev_b64 v[6:7], s5, v[2:3]
-; GFX10-NEXT:    s_cselect_b32 s9, 1, 0
 ; GFX10-NEXT:    v_lshlrev_b64 v[8:9], s5, v[0:1]
-; GFX10-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
-; GFX10-NEXT:    s_mov_b32 s6, 0
-; GFX10-NEXT:    s_lshr_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT:    s_lshl_b32 s7, s2, 31
+; GFX10-NEXT:    s_cselect_b32 s9, 1, 0
 ; GFX10-NEXT:    s_and_b32 s5, 1, s8
-; GFX10-NEXT:    s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX10-NEXT:    s_andn2_b32 s6, 0x7f, s4
+; GFX10-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
 ; GFX10-NEXT:    v_or_b32_e32 v4, v4, v6
 ; GFX10-NEXT:    v_or_b32_e32 v5, v5, v7
 ; GFX10-NEXT:    s_and_b32 s5, 1, s9
-; GFX10-NEXT:    s_lshr_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT:    s_not_b32 s8, s4
-; GFX10-NEXT:    s_sub_i32 s10, s6, 64
-; GFX10-NEXT:    s_sub_i32 s7, 64, s6
-; GFX10-NEXT:    s_cmp_lt_u32 s6, 64
+; GFX10-NEXT:    s_mov_b32 s6, 0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v6, 0, v8, vcc_lo
-; GFX10-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX10-NEXT:    s_cmp_eq_u32 s6, 0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v7, 0, v9, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX10-NEXT:    s_lshr_b64 s[0:1], s[0:1], 1
+; GFX10-NEXT:    s_lshl_b32 s7, s2, 31
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
+; GFX10-NEXT:    s_andn2_b32 s5, 0x7f, s4
+; GFX10-NEXT:    s_or_b64 s[0:1], s[0:1], s[6:7]
+; GFX10-NEXT:    s_lshr_b64 s[2:3], s[2:3], 1
+; GFX10-NEXT:    s_not_b32 s8, s4
+; GFX10-NEXT:    s_sub_i32 s10, s5, 64
+; GFX10-NEXT:    s_sub_i32 s6, 64, s5
+; GFX10-NEXT:    s_cmp_lt_u32 s5, 64
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc_lo
+; GFX10-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX10-NEXT:    s_cmp_eq_u32 s5, 0
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v1, v3, vcc_lo
 ; GFX10-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX10-NEXT:    s_lshr_b64 s[4:5], s[0:1], s8
-; GFX10-NEXT:    s_lshl_b64 s[6:7], s[2:3], s7
+; GFX10-NEXT:    s_lshl_b64 s[6:7], s[2:3], s6
 ; GFX10-NEXT:    s_lshr_b64 s[8:9], s[2:3], s8
 ; GFX10-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GFX10-NEXT:    s_lshr_b64 s[2:3], s[2:3], s10
 ; GFX10-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc_lo
 ; GFX10-NEXT:    s_cselect_b64 s[2:3], s[4:5], s[2:3]
 ; GFX10-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v1, v3, vcc_lo
 ; GFX10-NEXT:    s_cselect_b64 s[0:1], s[0:1], s[2:3]
 ; GFX10-NEXT:    s_cmp_lg_u32 s11, 0
 ; GFX10-NEXT:    v_or_b32_e32 v0, s0, v6
@@ -6839,44 +6844,45 @@ define amdgpu_ps <4 x float> @v_fshl_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_and_b32 s5, s4, 0x7f
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_sub_i32 s6, s5, 64
 ; GFX11-NEXT:    s_sub_i32 s7, 64, s5
+; GFX11-NEXT:    s_sub_i32 s6, s5, 64
 ; GFX11-NEXT:    s_cmp_lt_u32 s5, 64
 ; GFX11-NEXT:    v_lshrrev_b64 v[4:5], s7, v[0:1]
+; GFX11-NEXT:    v_lshlrev_b64 v[6:7], s5, v[2:3]
 ; GFX11-NEXT:    s_cselect_b32 s8, 1, 0
 ; GFX11-NEXT:    s_cmp_eq_u32 s5, 0
-; GFX11-NEXT:    v_lshlrev_b64 v[6:7], s5, v[2:3]
-; GFX11-NEXT:    s_cselect_b32 s9, 1, 0
 ; GFX11-NEXT:    v_lshlrev_b64 v[8:9], s5, v[0:1]
-; GFX11-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
-; GFX11-NEXT:    s_mov_b32 s6, 0
-; GFX11-NEXT:    s_lshr_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT:    s_lshl_b32 s7, s2, 31
+; GFX11-NEXT:    s_cselect_b32 s9, 1, 0
 ; GFX11-NEXT:    s_and_b32 s5, 1, s8
-; GFX11-NEXT:    s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX11-NEXT:    s_and_not1_b32 s6, 0x7f, s4
+; GFX11-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
 ; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
 ; GFX11-NEXT:    v_or_b32_e32 v4, v4, v6
 ; GFX11-NEXT:    v_or_b32_e32 v5, v5, v7
 ; GFX11-NEXT:    s_and_b32 s5, 1, s9
-; GFX11-NEXT:    s_lshr_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT:    s_not_b32 s8, s4
-; GFX11-NEXT:    s_sub_i32 s10, s6, 64
-; GFX11-NEXT:    s_sub_i32 s7, 64, s6
-; GFX11-NEXT:    s_cmp_lt_u32 s6, 64
+; GFX11-NEXT:    s_mov_b32 s6, 0
 ; GFX11-NEXT:    v_dual_cndmask_b32 v6, 0, v8 :: v_dual_cndmask_b32 v7, 0, v9
-; GFX11-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX11-NEXT:    s_cmp_eq_u32 s6, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
+; GFX11-NEXT:    s_lshr_b64 s[0:1], s[0:1], 1
+; GFX11-NEXT:    s_lshl_b32 s7, s2, 31
 ; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
+; GFX11-NEXT:    s_and_not1_b32 s5, 0x7f, s4
+; GFX11-NEXT:    s_or_b64 s[0:1], s[0:1], s[6:7]
+; GFX11-NEXT:    s_lshr_b64 s[2:3], s[2:3], 1
+; GFX11-NEXT:    s_not_b32 s8, s4
+; GFX11-NEXT:    s_sub_i32 s10, s5, 64
+; GFX11-NEXT:    s_sub_i32 s6, 64, s5
+; GFX11-NEXT:    s_cmp_lt_u32 s5, 64
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v0, v2 :: v_dual_cndmask_b32 v3, v1, v3
+; GFX11-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX11-NEXT:    s_cmp_eq_u32 s5, 0
 ; GFX11-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX11-NEXT:    s_lshr_b64 s[4:5], s[0:1], s8
-; GFX11-NEXT:    s_lshl_b64 s[6:7], s[2:3], s7
+; GFX11-NEXT:    s_lshl_b64 s[6:7], s[2:3], s6
 ; GFX11-NEXT:    s_lshr_b64 s[8:9], s[2:3], s8
 ; GFX11-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GFX11-NEXT:    s_lshr_b64 s[2:3], s[2:3], s10
 ; GFX11-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v0, v2 :: v_dual_cndmask_b32 v3, v1, v3
 ; GFX11-NEXT:    s_cselect_b64 s[2:3], s[4:5], s[2:3]
 ; GFX11-NEXT:    s_cmp_lg_u32 s12, 0
 ; GFX11-NEXT:    s_cselect_b64 s[0:1], s[0:1], s[2:3]
@@ -7741,85 +7747,85 @@ define <2 x i128> @v_fshl_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
 ; GFX10-NEXT:    v_and_b32_e32 v27, 0x7f, v16
 ; GFX10-NEXT:    v_not_b32_e32 v21, v16
 ; GFX10-NEXT:    v_lshrrev_b64 v[8:9], 1, v[8:9]
-; GFX10-NEXT:    v_sub_nc_u32_e32 v17, 64, v27
+; GFX10-NEXT:    v_sub_nc_u32_e32 v18, 64, v27
 ; GFX10-NEXT:    v_and_b32_e32 v28, 0x7f, v21
-; GFX10-NEXT:    v_lshlrev_b64 v[18:19], v27, v[2:3]
+; GFX10-NEXT:    v_add_nc_u32_e32 v23, 0xffffffc0, v27
+; GFX10-NEXT:    v_lshlrev_b64 v[21:22], v27, v[2:3]
 ; GFX10-NEXT:    v_lshl_or_b32 v9, v10, 31, v9
+; GFX10-NEXT:    v_lshrrev_b64 v[18:19], v18, v[0:1]
 ; GFX10-NEXT:    v_lshrrev_b64 v[10:11], 1, v[10:11]
-; GFX10-NEXT:    v_lshrrev_b64 v[16:17], v17, v[0:1]
-; GFX10-NEXT:    v_add_nc_u32_e32 v29, 0xffffffc0, v27
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v25, 64, v28
-; GFX10-NEXT:    v_lshlrev_b64 v[21:22], v27, v[0:1]
-; GFX10-NEXT:    v_lshrrev_b64 v[23:24], v28, v[8:9]
+; GFX10-NEXT:    v_lshlrev_b64 v[16:17], v27, v[0:1]
+; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v23, v[0:1]
 ; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v27
-; GFX10-NEXT:    v_or_b32_e32 v18, v16, v18
-; GFX10-NEXT:    v_add_nc_u32_e32 v16, 0xffffffc0, v28
+; GFX10-NEXT:    v_or_b32_e32 v18, v18, v21
+; GFX10-NEXT:    v_add_nc_u32_e32 v21, 0xffffffc0, v28
+; GFX10-NEXT:    v_lshrrev_b64 v[23:24], v28, v[8:9]
 ; GFX10-NEXT:    v_lshlrev_b64 v[25:26], v25, v[10:11]
-; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v29, v[0:1]
-; GFX10-NEXT:    v_or_b32_e32 v19, v17, v19
-; GFX10-NEXT:    v_cndmask_b32_e32 v21, 0, v21, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b64 v[16:17], v16, v[10:11]
-; GFX10-NEXT:    v_cndmask_b32_e32 v22, 0, v22, vcc_lo
-; GFX10-NEXT:    v_or_b32_e32 v23, v23, v25
-; GFX10-NEXT:    v_cndmask_b32_e32 v18, v0, v18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v19, v1, v19, vcc_lo
-; GFX10-NEXT:    v_or_b32_e32 v24, v24, v26
-; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v28
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s5, 64, v28
+; GFX10-NEXT:    v_cndmask_b32_e32 v29, v0, v18, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v0, v19, v22
+; GFX10-NEXT:    v_lshrrev_b64 v[18:19], v21, v[10:11]
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v21, v23, v25
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v27
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 0, v28
+; GFX10-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v23, v1, v0, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v28
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v21, s5
+; GFX10-NEXT:    v_or_b32_e32 v22, v24, v26
 ; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v28, v[10:11]
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v23, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v17, v24, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v23, v19, v3, s4
-; GFX10-NEXT:    v_and_b32_e32 v24, 0x7f, v20
-; GFX10-NEXT:    v_cndmask_b32_e32 v25, 0, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v16, v8, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v10, v9, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v29, v2, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v18, v8, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v19, v22, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v22, v23, v3, s4
+; GFX10-NEXT:    v_and_b32_e32 v23, 0x7f, v20
+; GFX10-NEXT:    v_cndmask_b32_e64 v24, 0, v0, s5
+; GFX10-NEXT:    v_or_b32_e32 v0, v16, v2
 ; GFX10-NEXT:    v_not_b32_e32 v16, v20
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, 0, v0, vcc_lo
-; GFX10-NEXT:    v_or_b32_e32 v0, v21, v3
-; GFX10-NEXT:    v_or_b32_e32 v1, v22, v8
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v10, v9, vcc_lo
 ; GFX10-NEXT:    v_lshrrev_b64 v[8:9], 1, v[12:13]
-; GFX10-NEXT:    v_sub_nc_u32_e32 v3, 64, v24
-; GFX10-NEXT:    v_and_b32_e32 v22, 0x7f, v16
-; GFX10-NEXT:    v_or_b32_e32 v2, v2, v10
-; GFX10-NEXT:    v_lshlrev_b64 v[12:13], v24, v[6:7]
-; GFX10-NEXT:    v_lshlrev_b64 v[16:17], v24, v[4:5]
-; GFX10-NEXT:    v_lshrrev_b64 v[10:11], v3, v[4:5]
+; GFX10-NEXT:    v_sub_nc_u32_e32 v2, 64, v23
+; GFX10-NEXT:    v_cndmask_b32_e64 v25, 0, v1, s5
+; GFX10-NEXT:    v_and_b32_e32 v20, 0x7f, v16
+; GFX10-NEXT:    v_or_b32_e32 v1, v17, v3
+; GFX10-NEXT:    v_add_nc_u32_e32 v17, 0xffffffc0, v23
+; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v23, v[6:7]
 ; GFX10-NEXT:    v_lshl_or_b32 v9, v14, 31, v9
 ; GFX10-NEXT:    v_lshrrev_b64 v[14:15], 1, v[14:15]
-; GFX10-NEXT:    v_sub_nc_u32_e32 v20, 64, v22
-; GFX10-NEXT:    v_add_nc_u32_e32 v3, 0xffffffc0, v24
-; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v24
-; GFX10-NEXT:    v_or_b32_e32 v12, v10, v12
-; GFX10-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v22
-; GFX10-NEXT:    v_lshrrev_b64 v[18:19], v22, v[8:9]
-; GFX10-NEXT:    v_lshlrev_b64 v[20:21], v20, v[14:15]
-; GFX10-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GFX10-NEXT:    v_or_b32_e32 v5, v11, v13
-; GFX10-NEXT:    v_lshrrev_b64 v[10:11], v10, v[14:15]
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, 0, v16, vcc_lo
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s4, 64, v22
-; GFX10-NEXT:    v_or_b32_e32 v16, v18, v20
-; GFX10-NEXT:    v_or_b32_e32 v18, v19, v21
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v3, v12, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b64 v[3:4], v22, v[14:15]
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 0, v22
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v24
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v18, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, 0, v17, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v12, v6, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v10, v8, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v11, v9, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, 0, v3, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, 0, v4, s4
-; GFX10-NEXT:    v_or_b32_e32 v3, v23, v25
-; GFX10-NEXT:    v_or_b32_e32 v4, v13, v5
-; GFX10-NEXT:    v_or_b32_e32 v5, v14, v8
+; GFX10-NEXT:    v_sub_nc_u32_e32 v18, 64, v20
+; GFX10-NEXT:    v_lshlrev_b64 v[12:13], v23, v[4:5]
+; GFX10-NEXT:    v_lshlrev_b64 v[4:5], v17, v[4:5]
+; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v23
+; GFX10-NEXT:    v_or_b32_e32 v10, v2, v10
+; GFX10-NEXT:    v_add_nc_u32_e32 v26, 0xffffffc0, v20
+; GFX10-NEXT:    v_lshrrev_b64 v[16:17], v20, v[8:9]
+; GFX10-NEXT:    v_lshlrev_b64 v[18:19], v18, v[14:15]
+; GFX10-NEXT:    v_or_b32_e32 v2, v21, v24
+; GFX10-NEXT:    v_or_b32_e32 v11, v3, v11
+; GFX10-NEXT:    v_cndmask_b32_e32 v21, v4, v10, vcc_lo
+; GFX10-NEXT:    v_lshrrev_b64 v[3:4], v26, v[14:15]
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s5, 64, v20
+; GFX10-NEXT:    v_or_b32_e32 v10, v16, v18
+; GFX10-NEXT:    v_or_b32_e32 v16, v17, v19
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v23
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v20
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v10, s5
+; GFX10-NEXT:    v_lshrrev_b64 v[10:11], v20, v[14:15]
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, 0, v12, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v21, v6, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v3, v8, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v4, v9, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, 0, v10, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, 0, v11, s5
+; GFX10-NEXT:    v_or_b32_e32 v3, v22, v25
+; GFX10-NEXT:    v_or_b32_e32 v4, v12, v5
+; GFX10-NEXT:    v_or_b32_e32 v5, v13, v8
 ; GFX10-NEXT:    v_or_b32_e32 v6, v6, v9
 ; GFX10-NEXT:    v_or_b32_e32 v7, v7, v10
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
@@ -7830,92 +7836,93 @@ define <2 x i128> @v_fshl_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
 ; GFX11-NEXT:    v_and_b32_e32 v27, 0x7f, v16
 ; GFX11-NEXT:    v_not_b32_e32 v21, v16
 ; GFX11-NEXT:    v_lshrrev_b64 v[8:9], 1, v[8:9]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v27, v[0:1]
+; GFX11-NEXT:    v_sub_nc_u32_e32 v18, 64, v27
 ; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v27
-; GFX11-NEXT:    v_and_b32_e32 v28, 0x7f, v21
-; GFX11-NEXT:    v_lshlrev_b64 v[21:22], v27, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_add_nc_u32_e32 v23, 0xffffffc0, v27
 ; GFX11-NEXT:    v_lshl_or_b32 v9, v10, 31, v9
 ; GFX11-NEXT:    v_lshrrev_b64 v[10:11], 1, v[10:11]
-; GFX11-NEXT:    v_cndmask_b32_e32 v22, 0, v22, vcc_lo
-; GFX11-NEXT:    v_sub_nc_u32_e32 v17, 64, v27
-; GFX11-NEXT:    v_lshlrev_b64 v[18:19], v27, v[2:3]
+; GFX11-NEXT:    v_lshrrev_b64 v[18:19], v18, v[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc_lo
+; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v23, v[0:1]
+; GFX11-NEXT:    v_and_b32_e32 v28, 0x7f, v21
+; GFX11-NEXT:    v_lshlrev_b64 v[21:22], v27, v[2:3]
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v27
-; GFX11-NEXT:    v_cndmask_b32_e32 v21, 0, v21, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_lshrrev_b64 v[16:17], v17, v[0:1]
-; GFX11-NEXT:    v_or_b32_e32 v18, v16, v18
-; GFX11-NEXT:    v_add_nc_u32_e32 v29, 0xffffffc0, v27
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_or_b32_e32 v19, v17, v19
-; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v29, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_dual_cndmask_b32 v18, v0, v18 :: v_dual_cndmask_b32 v19, v1, v19
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_or_b32_e32 v18, v18, v21
+; GFX11-NEXT:    v_cndmask_b32_e32 v29, v0, v18, vcc_lo
 ; GFX11-NEXT:    v_sub_nc_u32_e32 v25, 64, v28
-; GFX11-NEXT:    v_add_nc_u32_e32 v16, 0xffffffc0, v28
+; GFX11-NEXT:    v_add_nc_u32_e32 v21, 0xffffffc0, v28
 ; GFX11-NEXT:    v_lshrrev_b64 v[23:24], v28, v[8:9]
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v28
-; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v28, v[10:11]
+; GFX11-NEXT:    v_or_b32_e32 v0, v19, v22
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v28
 ; GFX11-NEXT:    v_lshlrev_b64 v[25:26], v25, v[10:11]
-; GFX11-NEXT:    v_lshrrev_b64 v[16:17], v16, v[10:11]
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v28
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_or_b32_e32 v23, v23, v25
-; GFX11-NEXT:    v_or_b32_e32 v24, v24, v26
-; GFX11-NEXT:    v_dual_cndmask_b32 v25, 0, v1 :: v_dual_cndmask_b32 v16, v16, v23
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e32 v10, v17, v24, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v23, v19, v3, s0
-; GFX11-NEXT:    v_and_b32_e32 v24, 0x7f, v20
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v16, v8, s1
+; GFX11-NEXT:    v_lshrrev_b64 v[18:19], v21, v[10:11]
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v21, v23, v25
+; GFX11-NEXT:    v_cndmask_b32_e32 v23, v1, v0, vcc_lo
+; GFX11-NEXT:    v_or_b32_e32 v22, v24, v26
+; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v28, v[10:11]
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v10, v9, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v18, v18, v21, s1
+; GFX11-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v28
+; GFX11-NEXT:    v_cndmask_b32_e64 v21, v29, v2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v19, v22, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v22, v23, v3, s0
+; GFX11-NEXT:    v_and_b32_e32 v23, 0x7f, v20
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, v18, v8, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v24, 0, v0, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v25, 0, v1, s1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v23
+; GFX11-NEXT:    v_or_b32_e32 v0, v16, v2
 ; GFX11-NEXT:    v_not_b32_e32 v16, v20
-; GFX11-NEXT:    v_cndmask_b32_e32 v10, 0, v0, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v24
-; GFX11-NEXT:    v_or_b32_e32 v0, v21, v3
-; GFX11-NEXT:    v_or_b32_e32 v1, v22, v8
+; GFX11-NEXT:    v_cndmask_b32_e32 v3, v10, v9, vcc_lo
 ; GFX11-NEXT:    v_lshrrev_b64 v[8:9], 1, v[12:13]
-; GFX11-NEXT:    v_sub_nc_u32_e32 v3, 64, v24
-; GFX11-NEXT:    v_and_b32_e32 v22, 0x7f, v16
-; GFX11-NEXT:    v_or_b32_e32 v2, v2, v10
-; GFX11-NEXT:    v_lshlrev_b64 v[12:13], v24, v[6:7]
-; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v24, v[4:5]
-; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v3, v[4:5]
+; GFX11-NEXT:    v_sub_nc_u32_e32 v2, 64, v23
+; GFX11-NEXT:    v_lshlrev_b64 v[12:13], v23, v[4:5]
+; GFX11-NEXT:    v_and_b32_e32 v20, 0x7f, v16
+; GFX11-NEXT:    v_or_b32_e32 v1, v17, v3
+; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v23, v[6:7]
+; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v23
+; GFX11-NEXT:    v_add_nc_u32_e32 v17, 0xffffffc0, v23
 ; GFX11-NEXT:    v_lshl_or_b32 v9, v14, 31, v9
 ; GFX11-NEXT:    v_lshrrev_b64 v[14:15], 1, v[14:15]
-; GFX11-NEXT:    v_sub_nc_u32_e32 v20, 64, v22
-; GFX11-NEXT:    v_add_nc_u32_e32 v3, 0xffffffc0, v24
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v22
-; GFX11-NEXT:    v_or_b32_e32 v12, v10, v12
-; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v22
-; GFX11-NEXT:    v_lshrrev_b64 v[18:19], v22, v[8:9]
-; GFX11-NEXT:    v_lshlrev_b64 v[20:21], v20, v[14:15]
-; GFX11-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GFX11-NEXT:    v_or_b32_e32 v5, v11, v13
-; GFX11-NEXT:    v_cndmask_b32_e32 v13, 0, v16, vcc_lo
-; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v10, v[14:15]
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v22
-; GFX11-NEXT:    v_or_b32_e32 v16, v18, v20
-; GFX11-NEXT:    v_or_b32_e32 v18, v19, v21
-; GFX11-NEXT:    v_dual_cndmask_b32 v12, v3, v12 :: v_dual_cndmask_b32 v5, v4, v5
-; GFX11-NEXT:    v_lshrrev_b64 v[3:4], v22, v[14:15]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v24
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v18, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v14, 0, v17, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v12, v6, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v10, v8, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v11, v9, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, 0, v3, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, 0, v4, s0
-; GFX11-NEXT:    v_or_b32_e32 v3, v23, v25
-; GFX11-NEXT:    v_or_b32_e32 v4, v13, v5
-; GFX11-NEXT:    v_or_b32_e32 v5, v14, v8
+; GFX11-NEXT:    v_sub_nc_u32_e32 v18, 64, v20
+; GFX11-NEXT:    v_cndmask_b32_e32 v12, 0, v12, vcc_lo
+; GFX11-NEXT:    v_lshlrev_b64 v[4:5], v17, v[4:5]
+; GFX11-NEXT:    v_or_b32_e32 v10, v2, v10
+; GFX11-NEXT:    v_add_nc_u32_e32 v26, 0xffffffc0, v20
+; GFX11-NEXT:    v_lshrrev_b64 v[16:17], v20, v[8:9]
+; GFX11-NEXT:    v_lshlrev_b64 v[18:19], v18, v[14:15]
+; GFX11-NEXT:    v_or_b32_e32 v2, v21, v24
+; GFX11-NEXT:    v_or_b32_e32 v11, v3, v11
+; GFX11-NEXT:    v_cndmask_b32_e32 v21, v4, v10, vcc_lo
+; GFX11-NEXT:    v_lshrrev_b64 v[3:4], v26, v[14:15]
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v20
+; GFX11-NEXT:    v_or_b32_e32 v10, v16, v18
+; GFX11-NEXT:    v_or_b32_e32 v16, v17, v19
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v20
+; GFX11-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v10, s1
+; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v20, v[14:15]
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v21, v6, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v3, v8, s2
+; GFX11-NEXT:    v_or_b32_e32 v3, v22, v25
+; GFX11-NEXT:    v_cndmask_b32_e64 v8, v4, v9, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, 0, v10, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, 0, v11, s1
+; GFX11-NEXT:    v_or_b32_e32 v4, v12, v5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v5, v13, v8
 ; GFX11-NEXT:    v_or_b32_e32 v6, v6, v9
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-NEXT:    v_or_b32_e32 v7, v7, v10
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call <2 x i128> @llvm.fshl.v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %amt)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index fd89a46ecbf62..70fb340ccc479 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -6019,46 +6019,46 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
 ; GFX10-NEXT:    v_not_b32_e32 v9, v8
 ; GFX10-NEXT:    v_lshlrev_b64 v[2:3], 1, v[2:3]
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v10, 31, v1
-; GFX10-NEXT:    v_and_b32_e32 v19, 0x7f, v8
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], 1, v[0:1]
-; GFX10-NEXT:    v_and_b32_e32 v18, 0x7f, v9
+; GFX10-NEXT:    v_and_b32_e32 v21, 0x7f, v8
+; GFX10-NEXT:    v_and_b32_e32 v20, 0x7f, v9
 ; GFX10-NEXT:    v_or_b32_e32 v2, v2, v10
-; GFX10-NEXT:    v_sub_nc_u32_e32 v16, 64, v19
-; GFX10-NEXT:    v_add_nc_u32_e32 v21, 0xffffffc0, v19
-; GFX10-NEXT:    v_sub_nc_u32_e32 v10, 64, v18
-; GFX10-NEXT:    v_add_nc_u32_e32 v20, 0xffffffc0, v18
-; GFX10-NEXT:    v_lshlrev_b64 v[8:9], v18, v[2:3]
-; GFX10-NEXT:    v_lshrrev_b64 v[12:13], v19, v[4:5]
+; GFX10-NEXT:    v_sub_nc_u32_e32 v16, 64, v21
+; GFX10-NEXT:    v_sub_nc_u32_e32 v12, 64, v20
+; GFX10-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v20
+; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v20, v[2:3]
+; GFX10-NEXT:    v_lshlrev_b64 v[8:9], v20, v[0:1]
+; GFX10-NEXT:    v_add_nc_u32_e32 v18, 0xffffffc0, v21
+; GFX10-NEXT:    v_lshrrev_b64 v[12:13], v12, v[0:1]
+; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v14, v[0:1]
+; GFX10-NEXT:    v_lshrrev_b64 v[14:15], v21, v[4:5]
 ; GFX10-NEXT:    v_lshlrev_b64 v[16:17], v16, v[6:7]
-; GFX10-NEXT:    v_lshrrev_b64 v[10:11], v10, v[0:1]
-; GFX10-NEXT:    v_lshlrev_b64 v[14:15], v18, v[0:1]
-; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v20, v[0:1]
-; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v18
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s4, 64, v19
-; GFX10-NEXT:    v_or_b32_e32 v12, v12, v16
-; GFX10-NEXT:    v_or_b32_e32 v10, v10, v8
-; GFX10-NEXT:    v_or_b32_e32 v11, v11, v9
-; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v21, v[6:7]
-; GFX10-NEXT:    v_or_b32_e32 v13, v13, v17
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 0, v19
+; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v20
+; GFX10-NEXT:    v_lshrrev_b64 v[18:19], v18, v[6:7]
+; GFX10-NEXT:    v_or_b32_e32 v10, v12, v10
+; GFX10-NEXT:    v_or_b32_e32 v11, v13, v11
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s5, 64, v21
+; GFX10-NEXT:    v_or_b32_e32 v12, v15, v17
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v20
 ; GFX10-NEXT:    v_cndmask_b32_e32 v10, v0, v10, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v0, v14, v16
 ; GFX10-NEXT:    v_cndmask_b32_e32 v11, v1, v11, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v19, v[6:7]
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v12, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v9, v13, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, 0, v15, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, 0, v0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, 0, v1, s4
-; GFX10-NEXT:    v_or_b32_e32 v0, v14, v4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v21
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, 0, v8, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v18, v0, s5
+; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v21, v[6:7]
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v19, v12, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, 0, v9, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v13, v4, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, 0, v0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, 0, v1, s5
+; GFX10-NEXT:    v_or_b32_e32 v0, v8, v4
 ; GFX10-NEXT:    v_or_b32_e32 v1, v7, v5
 ; GFX10-NEXT:    v_or_b32_e32 v2, v2, v6
-; GFX10-NEXT:    v_or_b32_e32 v3, v3, v8
+; GFX10-NEXT:    v_or_b32_e32 v3, v3, v9
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_fshr_i128:
@@ -6069,49 +6069,54 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v10, 31, v1
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], 1, v[0:1]
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_and_b32_e32 v18, 0x7f, v9
+; GFX11-NEXT:    v_and_b32_e32 v20, 0x7f, v9
 ; GFX11-NEXT:    v_or_b32_e32 v2, v2, v10
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_sub_nc_u32_e32 v12, 64, v20
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v20
+; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v20, v[2:3]
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_lshrrev_b64 v[12:13], v12, v[0:1]
+; GFX11-NEXT:    v_or_b32_e32 v10, v12, v10
+; GFX11-NEXT:    v_and_b32_e32 v21, 0x7f, v8
+; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v20, v[0:1]
+; GFX11-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v20
+; GFX11-NEXT:    v_or_b32_e32 v11, v13, v11
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v10, 64, v18
-; GFX11-NEXT:    v_lshlrev_b64 v[14:15], v18, v[0:1]
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v18
-; GFX11-NEXT:    v_and_b32_e32 v19, 0x7f, v8
-; GFX11-NEXT:    v_add_nc_u32_e32 v20, 0xffffffc0, v18
-; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v18, v[2:3]
-; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v10, v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc_lo
-; GFX11-NEXT:    v_sub_nc_u32_e32 v16, 64, v19
-; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v20, v[0:1]
-; GFX11-NEXT:    v_lshrrev_b64 v[12:13], v19, v[4:5]
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v19
-; GFX11-NEXT:    v_or_b32_e32 v10, v10, v8
-; GFX11-NEXT:    v_add_nc_u32_e32 v21, 0xffffffc0, v19
-; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v16, v[6:7]
-; GFX11-NEXT:    v_or_b32_e32 v11, v11, v9
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v19
+; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v14, v[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e32 v8, 0, v8, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v10, v0, v10, vcc_lo
-; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v21, v[6:7]
-; GFX11-NEXT:    v_or_b32_e32 v12, v12, v16
-; GFX11-NEXT:    v_or_b32_e32 v13, v13, v17
+; GFX11-NEXT:    v_sub_nc_u32_e32 v16, 64, v21
+; GFX11-NEXT:    v_add_nc_u32_e32 v18, 0xffffffc0, v21
+; GFX11-NEXT:    v_lshrrev_b64 v[14:15], v21, v[4:5]
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v21
 ; GFX11-NEXT:    v_cndmask_b32_e32 v11, v1, v11, vcc_lo
-; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v19, v[6:7]
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v12, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v9, v13, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0, v15, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, 0, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, 0, v1, s0
+; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v16, v[6:7]
+; GFX11-NEXT:    v_lshrrev_b64 v[18:19], v18, v[6:7]
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v20
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v21
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v0, v14, v16
+; GFX11-NEXT:    v_or_b32_e32 v12, v15, v17
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v18, v0, s1
+; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v21, v[6:7]
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v19, v12, s1
+; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0, v9, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v13, v4, s2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, 0, v0, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, 0, v1, s1
+; GFX11-NEXT:    v_or_b32_e32 v0, v8, v4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_or_b32_e32 v0, v14, v4
 ; GFX11-NEXT:    v_or_b32_e32 v1, v7, v5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_or_b32_e32 v2, v2, v6
-; GFX11-NEXT:    v_or_b32_e32 v3, v3, v8
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v3, v3, v9
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
   ret i128 %result
@@ -6279,100 +6284,99 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
 ; GFX10-LABEL: v_fshr_i128_ssv:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_not_b32_e32 v1, v0
-; GFX10-NEXT:    v_and_b32_e32 v13, 0x7f, v0
 ; GFX10-NEXT:    s_mov_b32 s9, 0
 ; GFX10-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
 ; GFX10-NEXT:    s_lshr_b32 s8, s1, 31
+; GFX10-NEXT:    v_and_b32_e32 v13, 0x7f, v0
 ; GFX10-NEXT:    v_and_b32_e32 v12, 0x7f, v1
-; GFX10-NEXT:    v_sub_nc_u32_e32 v8, 64, v13
-; GFX10-NEXT:    s_lshl_b64 s[0:1], s[0:1], 1
+; GFX10-NEXT:    s_lshl_b64 s[10:11], s[0:1], 1
 ; GFX10-NEXT:    s_or_b64 s[8:9], s[2:3], s[8:9]
-; GFX10-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v13
+; GFX10-NEXT:    v_sub_nc_u32_e32 v10, 64, v13
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, 64, v12
+; GFX10-NEXT:    v_add_nc_u32_e32 v6, 0xffffffc0, v12
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v12, s[8:9]
-; GFX10-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v12
-; GFX10-NEXT:    v_lshrrev_b64 v[4:5], v13, s[4:5]
-; GFX10-NEXT:    v_lshlrev_b64 v[8:9], v8, s[6:7]
-; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v2, s[0:1]
+; GFX10-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v13
 ; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v12
-; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v10, s[0:1]
-; GFX10-NEXT:    v_lshlrev_b64 v[6:7], v12, s[0:1]
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s0, 64, v13
-; GFX10-NEXT:    v_or_b32_e32 v4, v4, v8
-; GFX10-NEXT:    v_or_b32_e32 v2, v2, v0
-; GFX10-NEXT:    v_or_b32_e32 v3, v3, v1
+; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v2, s[10:11]
+; GFX10-NEXT:    v_lshlrev_b64 v[6:7], v6, s[10:11]
+; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v13, s[4:5]
+; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v10, s[6:7]
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s1, 64, v13
+; GFX10-NEXT:    v_lshlrev_b64 v[4:5], v12, s[10:11]
+; GFX10-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX10-NEXT:    v_or_b32_e32 v2, v3, v1
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 0, v12
+; GFX10-NEXT:    v_or_b32_e32 v3, v8, v10
+; GFX10-NEXT:    v_or_b32_e32 v8, v9, v11
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v0, vcc_lo
 ; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v14, s[6:7]
-; GFX10-NEXT:    v_or_b32_e32 v5, v5, v9
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 0, v13
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v10, v2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v11, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v2, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 0, v13
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s8, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s1
 ; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v13, s[6:7]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 0, v12
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, 0, v7, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v8, s8, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v10, s9, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s0
-; GFX10-NEXT:    v_or_b32_e32 v0, v6, v0
-; GFX10-NEXT:    v_or_b32_e32 v1, v4, v1
-; GFX10-NEXT:    v_or_b32_e32 v2, v5, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s9, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s1
+; GFX10-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX10-NEXT:    v_or_b32_e32 v1, v5, v1
+; GFX10-NEXT:    v_or_b32_e32 v2, v6, v2
 ; GFX10-NEXT:    v_or_b32_e32 v3, v7, v3
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: v_fshr_i128_ssv:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    v_not_b32_e32 v1, v0
-; GFX11-NEXT:    s_lshr_b32 s8, s1, 31
-; GFX11-NEXT:    s_lshl_b64 s[0:1], s[0:1], 1
 ; GFX11-NEXT:    s_mov_b32 s9, 0
 ; GFX11-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
+; GFX11-NEXT:    s_lshr_b32 s8, s1, 31
+; GFX11-NEXT:    v_and_b32_e32 v13, 0x7f, v0
 ; GFX11-NEXT:    v_and_b32_e32 v12, 0x7f, v1
+; GFX11-NEXT:    s_lshl_b64 s[10:11], s[0:1], 1
 ; GFX11-NEXT:    s_or_b64 s[8:9], s[2:3], s[8:9]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_lshlrev_b64 v[6:7], v12, s[0:1]
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v12
-; GFX11-NEXT:    v_and_b32_e32 v13, 0x7f, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_sub_nc_u32_e32 v10, 64, v13
 ; GFX11-NEXT:    v_sub_nc_u32_e32 v2, 64, v12
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v12, s[8:9]
-; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v12
-; GFX11-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc_lo
-; GFX11-NEXT:    v_sub_nc_u32_e32 v8, 64, v13
-; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v2, s[0:1]
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v12
+; GFX11-NEXT:    v_add_nc_u32_e32 v6, 0xffffffc0, v12
 ; GFX11-NEXT:    v_add_nc_u32_e32 v14, 0xffffffc0, v13
-; GFX11-NEXT:    v_lshrrev_b64 v[4:5], v13, s[4:5]
-; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v10, s[0:1]
-; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v8, s[6:7]
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v13
-; GFX11-NEXT:    v_or_b32_e32 v2, v2, v0
-; GFX11-NEXT:    v_or_b32_e32 v3, v3, v1
+; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v2, s[10:11]
+; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v13, s[4:5]
+; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v10, s[6:7]
+; GFX11-NEXT:    v_lshlrev_b64 v[6:7], v6, s[10:11]
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v13
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v13
+; GFX11-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX11-NEXT:    v_or_b32_e32 v2, v3, v1
+; GFX11-NEXT:    v_or_b32_e32 v3, v8, v10
+; GFX11-NEXT:    v_or_b32_e32 v8, v9, v11
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e32 v6, v6, v0, vcc_lo
 ; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v14, s[6:7]
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v13
-; GFX11-NEXT:    v_or_b32_e32 v4, v4, v8
-; GFX11-NEXT:    v_or_b32_e32 v5, v5, v9
-; GFX11-NEXT:    v_cndmask_b32_e32 v8, v10, v2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v10, v11, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v7, v7, v2, vcc_lo
+; GFX11-NEXT:    v_lshlrev_b64 v[4:5], v12, s[10:11]
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v12
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s1
 ; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v13, s[6:7]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v12
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v4, 0, v7, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s1
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, 0, v4 :: v_dual_cndmask_b32 v5, 0, v5
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s8, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s9, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v8, s8, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v10, s9, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s0
-; GFX11-NEXT:    v_or_b32_e32 v0, v6, v0
+; GFX11-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX11-NEXT:    v_or_b32_e32 v1, v5, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_or_b32_e32 v1, v4, v1
-; GFX11-NEXT:    v_or_b32_e32 v2, v5, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT:    v_or_b32_e32 v2, v6, v2
 ; GFX11-NEXT:    v_or_b32_e32 v3, v7, v3
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
@@ -6824,44 +6828,44 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 31, v1
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], 1, v[0:1]
 ; GFX10-NEXT:    s_andn2_b32 s5, 0x7f, s4
-; GFX10-NEXT:    s_sub_i32 s6, s5, 64
-; GFX10-NEXT:    v_or_b32_e32 v2, v2, v4
 ; GFX10-NEXT:    s_sub_i32 s7, 64, s5
+; GFX10-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX10-NEXT:    s_sub_i32 s6, s5, 64
 ; GFX10-NEXT:    s_cmp_lt_u32 s5, 64
 ; GFX10-NEXT:    v_lshrrev_b64 v[4:5], s7, v[0:1]
 ; GFX10-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX10-NEXT:    s_cmp_eq_u32 s5, 0
 ; GFX10-NEXT:    v_lshlrev_b64 v[6:7], s5, v[2:3]
-; GFX10-NEXT:    s_cselect_b32 s9, 1, 0
+; GFX10-NEXT:    s_cmp_eq_u32 s5, 0
 ; GFX10-NEXT:    v_lshlrev_b64 v[8:9], s5, v[0:1]
+; GFX10-NEXT:    s_cselect_b32 s9, 1, 0
 ; GFX10-NEXT:    s_and_b32 s5, 1, s8
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
-; GFX10-NEXT:    s_and_b32 s5, s4, 0x7f
 ; GFX10-NEXT:    v_or_b32_e32 v4, v4, v6
 ; GFX10-NEXT:    v_or_b32_e32 v5, v5, v7
-; GFX10-NEXT:    s_and_b32 s6, 1, s9
-; GFX10-NEXT:    s_sub_i32 s10, s5, 64
-; GFX10-NEXT:    s_sub_i32 s8, 64, s5
-; GFX10-NEXT:    s_cmp_lt_u32 s5, 64
+; GFX10-NEXT:    s_and_b32 s5, 1, s9
+; GFX10-NEXT:    s_and_b32 s6, s4, 0x7f
 ; GFX10-NEXT:    v_cndmask_b32_e32 v6, 0, v8, vcc_lo
-; GFX10-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX10-NEXT:    s_cmp_eq_u32 s5, 0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v7, 0, v9, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s6
+; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
+; GFX10-NEXT:    s_sub_i32 s10, s6, 64
+; GFX10-NEXT:    s_sub_i32 s5, 64, s6
+; GFX10-NEXT:    s_cmp_lt_u32 s6, 64
+; GFX10-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX10-NEXT:    s_cmp_eq_u32 s6, 0
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc_lo
 ; GFX10-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX10-NEXT:    s_lshr_b64 s[6:7], s[0:1], s4
-; GFX10-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
+; GFX10-NEXT:    s_lshl_b64 s[8:9], s[2:3], s5
 ; GFX10-NEXT:    s_lshr_b64 s[4:5], s[2:3], s4
 ; GFX10-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9]
 ; GFX10-NEXT:    s_lshr_b64 s[2:3], s[2:3], s10
 ; GFX10-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v1, v3, vcc_lo
 ; GFX10-NEXT:    s_cselect_b64 s[2:3], s[6:7], s[2:3]
 ; GFX10-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v1, v3, vcc_lo
 ; GFX10-NEXT:    s_cselect_b64 s[0:1], s[0:1], s[2:3]
 ; GFX10-NEXT:    s_cmp_lg_u32 s11, 0
 ; GFX10-NEXT:    v_or_b32_e32 v0, s0, v6
@@ -6878,39 +6882,40 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], 1, v[0:1]
 ; GFX11-NEXT:    s_and_not1_b32 s5, 0x7f, s4
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    s_sub_i32 s6, s5, 64
-; GFX11-NEXT:    v_or_b32_e32 v2, v2, v4
 ; GFX11-NEXT:    s_sub_i32 s7, 64, s5
+; GFX11-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX11-NEXT:    s_sub_i32 s6, s5, 64
 ; GFX11-NEXT:    s_cmp_lt_u32 s5, 64
 ; GFX11-NEXT:    v_lshrrev_b64 v[4:5], s7, v[0:1]
 ; GFX11-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX11-NEXT:    s_cmp_eq_u32 s5, 0
 ; GFX11-NEXT:    v_lshlrev_b64 v[6:7], s5, v[2:3]
-; GFX11-NEXT:    s_cselect_b32 s9, 1, 0
+; GFX11-NEXT:    s_cmp_eq_u32 s5, 0
 ; GFX11-NEXT:    v_lshlrev_b64 v[8:9], s5, v[0:1]
+; GFX11-NEXT:    s_cselect_b32 s9, 1, 0
 ; GFX11-NEXT:    s_and_b32 s5, 1, s8
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
 ; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
-; GFX11-NEXT:    s_and_b32 s5, s4, 0x7f
 ; GFX11-NEXT:    v_or_b32_e32 v4, v4, v6
 ; GFX11-NEXT:    v_or_b32_e32 v5, v5, v7
-; GFX11-NEXT:    s_and_b32 s6, 1, s9
-; GFX11-NEXT:    s_sub_i32 s10, s5, 64
-; GFX11-NEXT:    s_sub_i32 s8, 64, s5
-; GFX11-NEXT:    s_cmp_lt_u32 s5, 64
+; GFX11-NEXT:    s_and_b32 s5, 1, s9
+; GFX11-NEXT:    s_and_b32 s6, s4, 0x7f
 ; GFX11-NEXT:    v_dual_cndmask_b32 v6, 0, v8 :: v_dual_cndmask_b32 v7, 0, v9
-; GFX11-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX11-NEXT:    s_cmp_eq_u32 s5, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
-; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s6
+; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
+; GFX11-NEXT:    s_sub_i32 s10, s6, 64
+; GFX11-NEXT:    s_sub_i32 s5, 64, s6
+; GFX11-NEXT:    s_cmp_lt_u32 s6, 64
+; GFX11-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX11-NEXT:    s_cmp_eq_u32 s6, 0
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v0, v2 :: v_dual_cndmask_b32 v3, v1, v3
 ; GFX11-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX11-NEXT:    s_lshr_b64 s[6:7], s[0:1], s4
-; GFX11-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
+; GFX11-NEXT:    s_lshl_b64 s[8:9], s[2:3], s5
 ; GFX11-NEXT:    s_lshr_b64 s[4:5], s[2:3], s4
 ; GFX11-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9]
 ; GFX11-NEXT:    s_lshr_b64 s[2:3], s[2:3], s10
 ; GFX11-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v0, v2 :: v_dual_cndmask_b32 v3, v1, v3
 ; GFX11-NEXT:    s_cselect_b64 s[2:3], s[6:7], s[2:3]
 ; GFX11-NEXT:    s_cmp_lg_u32 s12, 0
 ; GFX11-NEXT:    s_cselect_b64 s[0:1], s[0:1], s[2:3]
@@ -7789,7 +7794,6 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v17, 31, v1
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], 1, v[0:1]
 ; GFX10-NEXT:    v_add_nc_u32_e32 v27, 0xffffffc0, v26
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s4, 64, v26
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v18, 64, v25
 ; GFX10-NEXT:    v_or_b32_e32 v2, v2, v17
 ; GFX10-NEXT:    v_add_nc_u32_e32 v19, 0xffffffc0, v25
@@ -7798,6 +7802,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
 ; GFX10-NEXT:    v_lshrrev_b64 v[17:18], v18, v[0:1]
 ; GFX10-NEXT:    v_lshlrev_b64 v[21:22], v25, v[2:3]
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v19, v[0:1]
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v25
 ; GFX10-NEXT:    v_cndmask_b32_e32 v23, 0, v23, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v24, 0, v24, vcc_lo
 ; GFX10-NEXT:    v_or_b32_e32 v22, v18, v22
@@ -7808,65 +7813,65 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
 ; GFX10-NEXT:    v_lshlrev_b64 v[18:19], v18, v[10:11]
 ; GFX10-NEXT:    v_cndmask_b32_e32 v21, v0, v21, vcc_lo
 ; GFX10-NEXT:    v_lshrrev_b64 v[0:1], v27, v[10:11]
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v25
+; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v26
+; GFX10-NEXT:    v_cndmask_b32_e64 v22, v22, v3, s4
 ; GFX10-NEXT:    v_or_b32_e32 v16, v16, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v21, v2, s4
 ; GFX10-NEXT:    v_or_b32_e32 v17, v17, v19
-; GFX10-NEXT:    v_cndmask_b32_e32 v18, v21, v2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v22, v22, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v26
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v16, s4
-; GFX10-NEXT:    v_not_b32_e32 v16, v20
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v17, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v26
 ; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v26, v[10:11]
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc_lo
-; GFX10-NEXT:    v_and_b32_e32 v25, 0x7f, v16
-; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 31, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
+; GFX10-NEXT:    v_not_b32_e32 v16, v20
+; GFX10-NEXT:    v_lshrrev_b32_e32 v10, 31, v5
 ; GFX10-NEXT:    v_lshlrev_b64 v[4:5], 1, v[4:5]
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v8, s4
+; GFX10-NEXT:    v_and_b32_e32 v25, 0x7f, v16
+; GFX10-NEXT:    v_or_b32_e32 v6, v6, v10
+; GFX10-NEXT:    v_and_b32_e32 v20, 0x7f, v20
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v9, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v26, 0, v2, vcc_lo
+; GFX10-NEXT:    v_sub_nc_u32_e32 v8, 64, v25
+; GFX10-NEXT:    v_cndmask_b32_e32 v27, 0, v3, vcc_lo
+; GFX10-NEXT:    v_add_nc_u32_e32 v16, 0xffffffc0, v25
+; GFX10-NEXT:    v_sub_nc_u32_e32 v18, 64, v20
+; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v25, v[4:5]
+; GFX10-NEXT:    v_lshrrev_b64 v[2:3], v8, v[4:5]
+; GFX10-NEXT:    v_lshlrev_b64 v[8:9], v25, v[6:7]
+; GFX10-NEXT:    v_lshlrev_b64 v[4:5], v16, v[4:5]
 ; GFX10-NEXT:    v_or_b32_e32 v0, v23, v0
-; GFX10-NEXT:    v_sub_nc_u32_e32 v9, 64, v25
-; GFX10-NEXT:    v_or_b32_e32 v6, v6, v8
-; GFX10-NEXT:    v_and_b32_e32 v23, 0x7f, v20
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v26, 0, v3, s4
-; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v9, v[4:5]
-; GFX10-NEXT:    v_lshlrev_b64 v[10:11], v25, v[6:7]
-; GFX10-NEXT:    v_sub_nc_u32_e32 v20, 64, v23
-; GFX10-NEXT:    v_add_nc_u32_e32 v3, 0xffffffc0, v25
-; GFX10-NEXT:    v_or_b32_e32 v2, v18, v2
-; GFX10-NEXT:    v_lshlrev_b64 v[16:17], v25, v[4:5]
-; GFX10-NEXT:    v_lshrrev_b64 v[18:19], v23, v[12:13]
-; GFX10-NEXT:    v_or_b32_e32 v10, v8, v10
-; GFX10-NEXT:    v_add_nc_u32_e32 v8, 0xffffffc0, v23
-; GFX10-NEXT:    v_lshlrev_b64 v[20:21], v20, v[14:15]
 ; GFX10-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v25
-; GFX10-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GFX10-NEXT:    v_or_b32_e32 v5, v9, v11
-; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v8, v[14:15]
-; GFX10-NEXT:    v_cmp_gt_u32_e64 s4, 64, v23
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, 0, v16, vcc_lo
-; GFX10-NEXT:    v_or_b32_e32 v16, v18, v20
-; GFX10-NEXT:    v_or_b32_e32 v18, v19, v21
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v3, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc_lo
+; GFX10-NEXT:    v_add_nc_u32_e32 v23, 0xffffffc0, v20
+; GFX10-NEXT:    v_lshrrev_b64 v[16:17], v20, v[12:13]
+; GFX10-NEXT:    v_or_b32_e32 v8, v2, v8
+; GFX10-NEXT:    v_lshlrev_b64 v[18:19], v18, v[14:15]
+; GFX10-NEXT:    v_or_b32_e32 v2, v21, v26
+; GFX10-NEXT:    v_or_b32_e32 v9, v3, v9
+; GFX10-NEXT:    v_cmp_gt_u32_e64 s5, 64, v20
+; GFX10-NEXT:    v_cndmask_b32_e32 v21, v4, v8, vcc_lo
 ; GFX10-NEXT:    v_lshrrev_b64 v[3:4], v23, v[14:15]
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 0, v23
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v25
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v18, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, 0, v17, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v8, v16, v18
+; GFX10-NEXT:    v_or_b32_e32 v16, v17, v19
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 0, v25
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 0, v20
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v8, s5
+; GFX10-NEXT:    v_lshrrev_b64 v[8:9], v20, v[14:15]
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, 0, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v21, v6, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v3, v12, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v4, v13, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, 0, v8, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, 0, v9, s5
 ; GFX10-NEXT:    v_or_b32_e32 v1, v24, v1
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v10, v6, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v8, v12, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v9, v13, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, 0, v3, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, 0, v4, s4
-; GFX10-NEXT:    v_or_b32_e32 v3, v22, v26
-; GFX10-NEXT:    v_or_b32_e32 v4, v11, v5
-; GFX10-NEXT:    v_or_b32_e32 v5, v14, v8
-; GFX10-NEXT:    v_or_b32_e32 v6, v6, v9
-; GFX10-NEXT:    v_or_b32_e32 v7, v7, v10
+; GFX10-NEXT:    v_or_b32_e32 v3, v22, v27
+; GFX10-NEXT:    v_or_b32_e32 v4, v10, v5
+; GFX10-NEXT:    v_or_b32_e32 v5, v11, v12
+; GFX10-NEXT:    v_or_b32_e32 v6, v6, v8
+; GFX10-NEXT:    v_or_b32_e32 v7, v7, v9
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_fshr_v2i128:
@@ -7879,95 +7884,93 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
 ; GFX11-NEXT:    v_and_b32_e32 v25, 0x7f, v17
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v17, 31, v1
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], 1, v[0:1]
-; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v25
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_sub_nc_u32_e32 v18, 64, v25
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_or_b32_e32 v2, v2, v17
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v25
 ; GFX11-NEXT:    v_lshlrev_b64 v[23:24], v25, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_dual_cndmask_b32 v23, 0, v23 :: v_dual_and_b32 v26, 0x7f, v16
-; GFX11-NEXT:    v_cndmask_b32_e32 v24, 0, v24, vcc_lo
-; GFX11-NEXT:    v_sub_nc_u32_e32 v18, 64, v25
-; GFX11-NEXT:    v_lshlrev_b64 v[21:22], v25, v[2:3]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v26
+; GFX11-NEXT:    v_and_b32_e32 v26, 0x7f, v16
 ; GFX11-NEXT:    v_lshrrev_b64 v[17:18], v18, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_lshlrev_b64 v[21:22], v25, v[2:3]
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_dual_cndmask_b32 v24, 0, v24 :: v_dual_add_nc_u32 v19, 0xffffffc0, v25
+; GFX11-NEXT:    v_cndmask_b32_e32 v23, 0, v23, vcc_lo
 ; GFX11-NEXT:    v_or_b32_e32 v22, v18, v22
-; GFX11-NEXT:    v_add_nc_u32_e32 v19, 0xffffffc0, v25
-; GFX11-NEXT:    v_or_b32_e32 v21, v17, v21
 ; GFX11-NEXT:    v_sub_nc_u32_e32 v18, 64, v26
+; GFX11-NEXT:    v_or_b32_e32 v21, v17, v21
+; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v19, v[0:1]
 ; GFX11-NEXT:    v_lshrrev_b64 v[16:17], v26, v[8:9]
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v19, v[0:1]
 ; GFX11-NEXT:    v_lshlrev_b64 v[18:19], v18, v[10:11]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_dual_cndmask_b32 v22, v1, v22 :: v_dual_cndmask_b32 v21, v0, v21
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v25
-; GFX11-NEXT:    v_add_nc_u32_e32 v27, 0xffffffc0, v26
+; GFX11-NEXT:    v_dual_cndmask_b32 v21, v0, v21 :: v_dual_cndmask_b32 v22, v1, v22
+; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v26
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_or_b32_e32 v16, v16, v18
+; GFX11-NEXT:    v_add_nc_u32_e32 v27, 0xffffffc0, v26
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v25
 ; GFX11-NEXT:    v_or_b32_e32 v17, v17, v19
-; GFX11-NEXT:    v_cndmask_b32_e32 v22, v22, v3, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_lshrrev_b64 v[0:1], v27, v[10:11]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v16, s0
-; GFX11-NEXT:    v_not_b32_e32 v16, v20
-; GFX11-NEXT:    v_cndmask_b32_e32 v18, v21, v2, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v17, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e64 v21, v21, v2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v22, v22, v3, s0
 ; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v26, v[10:11]
-; GFX11-NEXT:    v_and_b32_e32 v25, 0x7f, v16
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v8 :: v_dual_cndmask_b32 v1, v1, v9
-; GFX11-NEXT:    v_lshrrev_b32_e32 v8, 31, v5
+; GFX11-NEXT:    v_lshrrev_b32_e32 v10, 31, v5
 ; GFX11-NEXT:    v_lshlrev_b64 v[4:5], 1, v[4:5]
-; GFX11-NEXT:    v_sub_nc_u32_e32 v9, 64, v25
-; GFX11-NEXT:    v_cndmask_b32_e64 v26, 0, v3, s0
-; GFX11-NEXT:    v_add_nc_u32_e32 v3, 0xffffffc0, v25
-; GFX11-NEXT:    v_or_b32_e32 v6, v6, v8
-; GFX11-NEXT:    v_or_b32_e32 v0, v23, v0
-; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v9, v[4:5]
-; GFX11-NEXT:    v_lshlrev_b64 v[16:17], v25, v[4:5]
-; GFX11-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v25, v[6:7]
+; GFX11-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
+; GFX11-NEXT:    v_not_b32_e32 v16, v20
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v26
+; GFX11-NEXT:    v_or_b32_e32 v6, v6, v10
+; GFX11-NEXT:    v_dual_cndmask_b32 v1, v1, v17 :: v_dual_and_b32 v20, 0x7f, v20
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_dual_cndmask_b32 v26, 0, v2 :: v_dual_and_b32 v25, 0x7f, v16
+; GFX11-NEXT:    v_cndmask_b32_e32 v27, 0, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v8, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v9, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT:    v_lshlrev_b64 v[10:11], v25, v[4:5]
 ; GFX11-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 64, v25
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v25
+; GFX11-NEXT:    v_sub_nc_u32_e32 v18, 64, v20
+; GFX11-NEXT:    v_or_b32_e32 v0, v23, v0
+; GFX11-NEXT:    v_add_nc_u32_e32 v23, 0xffffffc0, v20
+; GFX11-NEXT:    v_cmp_gt_u32_e64 s1, 64, v20
+; GFX11-NEXT:    v_cndmask_b32_e32 v10, 0, v10, vcc_lo
+; GFX11-NEXT:    v_sub_nc_u32_e32 v8, 64, v25
+; GFX11-NEXT:    v_add_nc_u32_e32 v16, 0xffffffc0, v25
+; GFX11-NEXT:    v_lshlrev_b64 v[18:19], v18, v[14:15]
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 0, v25
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 0, v20
+; GFX11-NEXT:    v_lshrrev_b64 v[2:3], v8, v[4:5]
+; GFX11-NEXT:    v_lshlrev_b64 v[8:9], v25, v[6:7]
+; GFX11-NEXT:    v_lshlrev_b64 v[4:5], v16, v[4:5]
+; GFX11-NEXT:    v_lshrrev_b64 v[16:17], v20, v[12:13]
+; GFX11-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc_lo
 ; GFX11-NEXT:    v_or_b32_e32 v1, v24, v1
-; GFX11-NEXT:    v_or_b32_e32 v10, v8, v10
-; GFX11-NEXT:    v_and_b32_e32 v23, 0x7f, v20
-; GFX11-NEXT:    v_or_b32_e32 v2, v18, v2
-; GFX11-NEXT:    v_or_b32_e32 v5, v9, v11
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_dual_cndmask_b32 v11, 0, v16 :: v_dual_cndmask_b32 v10, v3, v10
-; GFX11-NEXT:    v_sub_nc_u32_e32 v20, 64, v23
-; GFX11-NEXT:    v_add_nc_u32_e32 v8, 0xffffffc0, v23
-; GFX11-NEXT:    v_lshrrev_b64 v[18:19], v23, v[12:13]
-; GFX11-NEXT:    v_cmp_gt_u32_e64 s0, 64, v23
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc_lo
-; GFX11-NEXT:    v_lshlrev_b64 v[20:21], v20, v[14:15]
-; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v8, v[14:15]
+; GFX11-NEXT:    v_or_b32_e32 v8, v2, v8
+; GFX11-NEXT:    v_or_b32_e32 v2, v21, v26
+; GFX11-NEXT:    v_or_b32_e32 v9, v3, v9
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e32 v21, v4, v8, vcc_lo
 ; GFX11-NEXT:    v_lshrrev_b64 v[3:4], v23, v[14:15]
-; GFX11-NEXT:    v_cndmask_b32_e32 v14, 0, v17, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 0, v23
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v10, v6, s2
-; GFX11-NEXT:    v_or_b32_e32 v16, v18, v20
-; GFX11-NEXT:    v_or_b32_e32 v18, v19, v21
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, 0, v4, s0
+; GFX11-NEXT:    v_or_b32_e32 v8, v16, v18
+; GFX11-NEXT:    v_or_b32_e32 v16, v17, v19
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v21, v6, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v8, s1
+; GFX11-NEXT:    v_lshrrev_b64 v[8:9], v20, v[14:15]
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v5, v7, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v3, v12, s2
+; GFX11-NEXT:    v_or_b32_e32 v3, v22, v27
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v4, v13, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v8, 0, v8, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, 0, v9, s1
+; GFX11-NEXT:    v_or_b32_e32 v4, v10, v5
+; GFX11-NEXT:    v_or_b32_e32 v5, v11, v12
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v18, s0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_or_b32_e32 v7, v7, v10
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v8, v12, s1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v9, v13, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, 0, v3, s0
-; GFX11-NEXT:    v_or_b32_e32 v3, v22, v26
-; GFX11-NEXT:    v_or_b32_e32 v4, v11, v5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_or_b32_e32 v5, v14, v8
-; GFX11-NEXT:    v_or_b32_e32 v6, v6, v9
+; GFX11-NEXT:    v_or_b32_e32 v6, v6, v8
+; GFX11-NEXT:    v_or_b32_e32 v7, v7, v9
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call <2 x i128> @llvm.fshr.v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %amt)
   ret <2 x i128> %result
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
index 4ae98ff1edf6c..2eb7486a2684d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
@@ -261,8 +261,8 @@ define amdgpu_ps void @insertelement_s_v2i16_v_s(ptr addrspace(4) inreg %ptr, i1
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff, v0
 ; GFX11-NEXT:    s_lshl_b32 s1, s1, 4
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_lshl_b32 s2, 0xffff, s1
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_and_not1_b32 s0, s0, s2
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
@@ -632,8 +632,8 @@ define amdgpu_ps void @insertelement_v_v2i16_v_s(ptr addrspace(1) %ptr, i16 %val
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v2, s0, v0
 ; GFX11-NEXT:    s_lshl_b32 s0, 0xffff, s0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_not_b32 s0, s0
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    v_and_or_b32 v2, v3, s0, v2
 ; GFX11-NEXT:    global_store_b32 v[0:1], v2, off
@@ -906,8 +906,8 @@ define amdgpu_ps void @insertelement_v_v4i16_s_s(ptr addrspace(1) %ptr, i16 inre
 ; GFX11-NEXT:    v_and_or_b32 v4, v2, s2, s1
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_endpgm
   %vec = load <4 x i16>, ptr addrspace(1 ) %ptr
@@ -1440,8 +1440,8 @@ define amdgpu_ps void @insertelement_v_v4i16_s_v(ptr addrspace(1) %ptr, i16 inre
 ; GFX11-NEXT:    v_and_or_b32 v4, v4, v3, v2
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_endpgm
   %vec = load <4 x i16>, ptr addrspace(1) %ptr
@@ -1685,8 +1685,8 @@ define amdgpu_ps void @insertelement_v_v4i16_v_v(ptr addrspace(1) %ptr, i16 %val
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_endpgm
   %vec = load <4 x i16>, ptr addrspace(1) %ptr
@@ -2387,8 +2387,8 @@ define amdgpu_ps void @insertelement_s_v8i16_s_v(ptr addrspace(4) inreg %ptr, i1
 ; GFX11-NEXT:    v_and_or_b32 v7, v7, v5, v4
 ; GFX11-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v5, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v7, s2
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v7, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s1
 ; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
@@ -2573,8 +2573,8 @@ define amdgpu_ps void @insertelement_s_v8i16_v_v(ptr addrspace(4) inreg %ptr, i1
 ; GFX11-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v5, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v7, s2
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v7, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s1
 ; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
@@ -2726,11 +2726,12 @@ define amdgpu_ps void @insertelement_v_v8i16_s_v(ptr addrspace(1) %ptr, i16 inre
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v5, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_and_or_b32 v9, v2, v7, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v7, 0
-; GFX11-NEXT:    v_dual_mov_b32 v8, 0 :: v_dual_cndmask_b32 v1, v4, v9
+; GFX11-NEXT:    v_mov_b32_e32 v8, 0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v3, v9, s2
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v4, v9, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v5, v9, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v6, v9, s1
 ; GFX11-NEXT:    global_store_b128 v[7:8], v[0:3], off
@@ -4128,9 +4129,9 @@ define amdgpu_ps void @insertelement_s_v16i16_v_v(ptr addrspace(4) inreg %ptr, i
 ; GFX11-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX11-NEXT:    v_mov_b32_e32 v6, s14
 ; GFX11-NEXT:    v_mov_b32_e32 v8, 0
-; GFX11-NEXT:    v_mov_b32_e32 v9, 0
-; GFX11-NEXT:    v_dual_cndmask_b32 v1, v1, v13 :: v_dual_mov_b32 v10, 16
+; GFX11-NEXT:    v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v10, 16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v13, s6
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v13, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v13, s1
 ; GFX11-NEXT:    v_mov_b32_e32 v11, 0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
index 6e005a6d8c96f..1701a9cc7f09b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
@@ -1048,8 +1048,8 @@ define amdgpu_ps void @insertelement_s_v4i8_v_s(ptr addrspace(4) inreg %ptr, i8
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xff, v0
 ; GFX11-NEXT:    s_lshl_b32 s1, s1, 3
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_lshl_b32 s2, 0xff, s1
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_and_not1_b32 s0, s0, s2
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
@@ -1419,8 +1419,8 @@ define amdgpu_ps void @insertelement_v_v4i8_v_s(ptr addrspace(1) %ptr, i8 %val,
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v2, s0, v0
 ; GFX11-NEXT:    s_lshl_b32 s0, 0xff, s0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_not_b32 s0, s0
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    v_and_or_b32 v2, v3, s0, v2
 ; GFX11-NEXT:    global_store_b32 v[0:1], v2, off
@@ -1769,8 +1769,8 @@ define amdgpu_ps void @insertelement_v_v8i8_s_s(ptr addrspace(1) %ptr, i8 inreg
 ; GFX11-NEXT:    v_and_or_b32 v4, v2, s2, s1
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_endpgm
   %vec = load <8 x i8>, ptr addrspace(1 ) %ptr
@@ -2303,8 +2303,8 @@ define amdgpu_ps void @insertelement_v_v8i8_s_v(ptr addrspace(1) %ptr, i8 inreg
 ; GFX11-NEXT:    v_and_or_b32 v4, v4, v3, v2
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_endpgm
   %vec = load <8 x i8>, ptr addrspace(1) %ptr
@@ -2548,8 +2548,8 @@ define amdgpu_ps void @insertelement_v_v8i8_v_v(ptr addrspace(1) %ptr, i8 %val,
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_endpgm
   %vec = load <8 x i8>, ptr addrspace(1) %ptr
@@ -3250,8 +3250,8 @@ define amdgpu_ps void @insertelement_s_v16i8_s_v(ptr addrspace(4) inreg %ptr, i8
 ; GFX11-NEXT:    v_and_or_b32 v7, v7, v5, v4
 ; GFX11-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v5, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v7, s2
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v7, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s1
 ; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
@@ -3436,8 +3436,8 @@ define amdgpu_ps void @insertelement_s_v16i8_v_v(ptr addrspace(4) inreg %ptr, i8
 ; GFX11-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v5, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, v7, s2
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v7, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s1
 ; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
@@ -3589,11 +3589,12 @@ define amdgpu_ps void @insertelement_v_v16i8_s_v(ptr addrspace(1) %ptr, i8 inreg
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v5, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_and_or_b32 v9, v2, v7, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v7, 0
-; GFX11-NEXT:    v_dual_mov_b32 v8, 0 :: v_dual_cndmask_b32 v1, v4, v9
+; GFX11-NEXT:    v_mov_b32_e32 v8, 0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v3, v9, s2
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v4, v9, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v5, v9, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v6, v9, s1
 ; GFX11-NEXT:    global_store_b128 v[7:8], v[0:3], off
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
index 5eca04c02a9f9..8134eb3ca2afc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
@@ -2498,21 +2498,19 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
-; GFX11-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v16 :: v_dual_cndmask_b32 v5, v5, v17
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
 ; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
-; GFX11-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
+; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v16 :: v_dual_cndmask_b32 v9, v9, v17
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
-; GFX11-NEXT:    v_dual_cndmask_b32 v13, v13, v17 :: v_dual_cndmask_b32 v12, v12, v16
+; GFX11-NEXT:    v_dual_cndmask_b32 v12, v12, v16 :: v_dual_cndmask_b32 v13, v13, v17
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
index b3865eefb4f54..95c476f9889b9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
@@ -1023,12 +1023,12 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
 ; GFX11-NEXT:    s_mov_b32 s10, 0x45004800
 ; GFX11-NEXT:    v_mov_b32_e32 v6, 0xb36211c6
 ; GFX11-NEXT:    v_bfrev_b32_e32 v7, 4.0
-; GFX11-NEXT:    v_mov_b32_e32 v3, s8
-; GFX11-NEXT:    v_dual_mov_b32 v5, s10 :: v_dual_mov_b32 v4, s9
+; GFX11-NEXT:    v_dual_mov_b32 v3, s8 :: v_dual_mov_b32 v4, s9
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX11-NEXT:    v_dual_mov_b32 v5, s10 :: v_dual_mov_b32 v0, s6
+; GFX11-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX11-NEXT:    s_mov_b32 s6, 2.0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
 ; GFX11-NEXT:    flat_load_b32 v8, v[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
index 7fa0d23e55938..9957b23d1e693 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
@@ -1673,10 +1673,10 @@ define i65 @v_lshr_i65(i65 %value, i65 %amount) {
 ; GFX11-NEXT:    v_add_nc_u32_e32 v10, 0xffffffc0, v3
 ; GFX11-NEXT:    v_lshrrev_b64 v[10:11], v10, v[4:5]
 ; GFX11-NEXT:    v_lshrrev_b64 v[4:5], v3, v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v11, v6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e32 v2, v10, v2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v5, v1, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, v11, v6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v5, v1, s0
 ; GFX11-NEXT:    v_cndmask_b32_e32 v2, 0, v4, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = lshr i65 %value, %amount
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
index 455446aa38c60..88e14c3de0e9a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
@@ -2078,64 +2078,64 @@ define i256 @v_mul_i256(i256 %num, i256 %den) {
 ; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v0, v14, 0
 ; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v0, v12, 0
 ; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v1, v13, v[16:17]
-; GFX7-NEXT:    v_mul_lo_u32 v28, v4, v11
-; GFX7-NEXT:    v_mul_lo_u32 v27, v5, v10
+; GFX7-NEXT:    v_mad_u64_u32 v[20:21], s[6:7], v0, v10, 0
 ; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v2, v12, v[16:17]
-; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v3, v11, v[16:17]
-; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v4, v10, v[16:17]
 ; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v1, v11, v[18:19]
-; GFX7-NEXT:    v_cndmask_b32_e64 v20, 0, 1, s[4:5]
-; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v5, v9, v[16:17]
+; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v3, v11, v[16:17]
 ; GFX7-NEXT:    v_mad_u64_u32 v[18:19], vcc, v2, v10, v[18:19]
-; GFX7-NEXT:    v_addc_u32_e32 v20, vcc, 0, v20, vcc
+; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v4, v10, v[16:17]
+; GFX7-NEXT:    v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GFX7-NEXT:    v_addc_u32_e32 v22, vcc, 0, v22, vcc
+; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[8:9], v5, v9, v[16:17]
 ; GFX7-NEXT:    v_mad_u64_u32 v[18:19], vcc, v3, v9, v[18:19]
-; GFX7-NEXT:    v_addc_u32_e32 v20, vcc, 0, v20, vcc
-; GFX7-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v0, v10, 0
+; GFX7-NEXT:    v_addc_u32_e32 v22, vcc, 0, v22, vcc
 ; GFX7-NEXT:    v_mad_u64_u32 v[18:19], vcc, v4, v8, v[18:19]
 ; GFX7-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v6, v8, v[16:17]
-; GFX7-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v1, v9, v[21:22]
-; GFX7-NEXT:    v_addc_u32_e32 v25, vcc, 0, v20, vcc
-; GFX7-NEXT:    v_mov_b32_e32 v20, v18
+; GFX7-NEXT:    v_mad_u64_u32 v[20:21], s[6:7], v1, v9, v[20:21]
+; GFX7-NEXT:    v_addc_u32_e32 v23, vcc, 0, v22, vcc
+; GFX7-NEXT:    v_mov_b32_e32 v22, v18
 ; GFX7-NEXT:    v_mov_b32_e32 v18, v19
 ; GFX7-NEXT:    v_mov_b32_e32 v19, v16
 ; GFX7-NEXT:    v_mad_u64_u32 v[18:19], vcc, v0, v13, v[18:19]
 ; GFX7-NEXT:    v_mul_lo_u32 v16, v6, v9
-; GFX7-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GFX7-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v2, v8, v[21:22]
-; GFX7-NEXT:    v_addc_u32_e64 v26, s[4:5], 0, v6, s[4:5]
-; GFX7-NEXT:    v_mad_u64_u32 v[23:24], s[4:5], v1, v12, v[18:19]
-; GFX7-NEXT:    v_mov_b32_e32 v19, v22
-; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[12:13], v0, v11, v[19:20]
-; GFX7-NEXT:    v_mad_u64_u32 v[22:23], s[6:7], v2, v11, v[23:24]
-; GFX7-NEXT:    v_mul_lo_u32 v24, v3, v12
-; GFX7-NEXT:    v_mad_u64_u32 v[11:12], s[8:9], v3, v10, v[22:23]
-; GFX7-NEXT:    v_mul_lo_u32 v22, v2, v13
-; GFX7-NEXT:    v_mad_u64_u32 v[12:13], s[10:11], v4, v9, v[11:12]
-; GFX7-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[12:13]
-; GFX7-NEXT:    v_mad_u64_u32 v[10:11], s[12:13], v1, v10, v[18:19]
-; GFX7-NEXT:    v_addc_u32_e64 v4, s[12:13], 0, v4, s[12:13]
-; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[12:13], v2, v9, v[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[6:7]
+; GFX7-NEXT:    v_mad_u64_u32 v[20:21], s[4:5], v2, v8, v[20:21]
+; GFX7-NEXT:    v_addc_u32_e64 v24, s[4:5], 0, v6, s[4:5]
+; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v1, v12, v[18:19]
+; GFX7-NEXT:    v_mad_u64_u32 v[21:22], s[10:11], v0, v11, v[21:22]
+; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[6:7], v2, v11, v[18:19]
+; GFX7-NEXT:    v_mul_lo_u32 v26, v4, v11
+; GFX7-NEXT:    v_mul_lo_u32 v27, v3, v12
+; GFX7-NEXT:    v_mad_u64_u32 v[11:12], s[8:9], v3, v10, v[18:19]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[10:11]
+; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[10:11], v1, v10, v[21:22]
+; GFX7-NEXT:    v_mul_lo_u32 v25, v5, v10
+; GFX7-NEXT:    v_mul_lo_u32 v28, v2, v13
+; GFX7-NEXT:    v_mad_u64_u32 v[12:13], s[12:13], v4, v9, v[11:12]
 ; GFX7-NEXT:    v_mad_u64_u32 v[10:11], s[14:15], v0, v8, 0
-; GFX7-NEXT:    v_addc_u32_e64 v2, s[12:13], 0, v4, s[12:13]
+; GFX7-NEXT:    v_addc_u32_e64 v22, s[10:11], 0, v6, s[10:11]
+; GFX7-NEXT:    v_mad_u64_u32 v[18:19], s[10:11], v2, v9, v[18:19]
+; GFX7-NEXT:    v_mov_b32_e32 v21, v20
 ; GFX7-NEXT:    v_mov_b32_e32 v20, v11
 ; GFX7-NEXT:    v_mad_u64_u32 v[20:21], s[16:17], v0, v9, v[20:21]
-; GFX7-NEXT:    v_mad_u64_u32 v[3:4], s[12:13], v3, v8, v[18:19]
+; GFX7-NEXT:    v_addc_u32_e64 v2, s[10:11], 0, v22, s[10:11]
+; GFX7-NEXT:    v_mad_u64_u32 v[3:4], s[10:11], v3, v8, v[18:19]
 ; GFX7-NEXT:    v_mad_u64_u32 v[5:6], s[14:15], v5, v8, v[12:13]
-; GFX7-NEXT:    v_addc_u32_e64 v11, s[12:13], 0, v2, s[12:13]
+; GFX7-NEXT:    v_addc_u32_e64 v11, s[10:11], 0, v2, s[10:11]
 ; GFX7-NEXT:    v_mul_lo_u32 v9, v1, v14
 ; GFX7-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s[16:17]
-; GFX7-NEXT:    v_mad_u64_u32 v[1:2], s[12:13], v1, v8, v[20:21]
-; GFX7-NEXT:    v_addc_u32_e64 v3, s[12:13], v12, v3, s[12:13]
+; GFX7-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v1, v8, v[20:21]
+; GFX7-NEXT:    v_addc_u32_e64 v3, s[10:11], v12, v3, s[10:11]
 ; GFX7-NEXT:    v_mul_lo_u32 v0, v0, v15
-; GFX7-NEXT:    v_addc_u32_e64 v4, s[12:13], v26, v4, s[12:13]
-; GFX7-NEXT:    v_addc_u32_e64 v5, s[12:13], v11, v5, s[12:13]
-; GFX7-NEXT:    v_addc_u32_e64 v6, s[12:13], v25, v6, s[12:13]
-; GFX7-NEXT:    v_addc_u32_e64 v0, s[12:13], v17, v0, s[12:13]
-; GFX7-NEXT:    v_addc_u32_e64 v0, s[12:13], v0, v9, s[14:15]
-; GFX7-NEXT:    v_addc_u32_e64 v0, s[10:11], v0, v22, s[10:11]
-; GFX7-NEXT:    v_addc_u32_e64 v0, s[8:9], v0, v24, s[8:9]
-; GFX7-NEXT:    v_addc_u32_e64 v0, s[6:7], v0, v28, s[6:7]
-; GFX7-NEXT:    v_addc_u32_e64 v0, s[4:5], v0, v27, s[4:5]
+; GFX7-NEXT:    v_addc_u32_e64 v4, s[10:11], v24, v4, s[10:11]
+; GFX7-NEXT:    v_addc_u32_e64 v5, s[10:11], v11, v5, s[10:11]
+; GFX7-NEXT:    v_addc_u32_e64 v6, s[10:11], v23, v6, s[10:11]
+; GFX7-NEXT:    v_addc_u32_e64 v0, s[10:11], v17, v0, s[10:11]
+; GFX7-NEXT:    v_addc_u32_e64 v0, s[10:11], v0, v9, s[14:15]
+; GFX7-NEXT:    v_addc_u32_e64 v0, s[10:11], v0, v28, s[12:13]
+; GFX7-NEXT:    v_addc_u32_e64 v0, s[8:9], v0, v27, s[8:9]
+; GFX7-NEXT:    v_addc_u32_e64 v0, s[6:7], v0, v26, s[6:7]
+; GFX7-NEXT:    v_addc_u32_e64 v0, s[4:5], v0, v25, s[4:5]
 ; GFX7-NEXT:    v_addc_u32_e32 v0, vcc, v0, v16, vcc
 ; GFX7-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], v7, v8, v[0:1]
 ; GFX7-NEXT:    v_mov_b32_e32 v0, v10
@@ -2147,64 +2147,64 @@ define i256 @v_mul_i256(i256 %num, i256 %den) {
 ; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v0, v14, 0
 ; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v0, v12, 0
 ; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v1, v13, v[16:17]
-; GFX8-NEXT:    v_mul_lo_u32 v28, v4, v11
-; GFX8-NEXT:    v_mul_lo_u32 v27, v5, v10
+; GFX8-NEXT:    v_mad_u64_u32 v[20:21], s[6:7], v0, v10, 0
 ; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v2, v12, v[16:17]
-; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v3, v11, v[16:17]
-; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v4, v10, v[16:17]
 ; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v1, v11, v[18:19]
-; GFX8-NEXT:    v_cndmask_b32_e64 v20, 0, 1, s[4:5]
-; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v5, v9, v[16:17]
+; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v3, v11, v[16:17]
 ; GFX8-NEXT:    v_mad_u64_u32 v[18:19], vcc, v2, v10, v[18:19]
-; GFX8-NEXT:    v_addc_u32_e32 v20, vcc, 0, v20, vcc
+; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v4, v10, v[16:17]
+; GFX8-NEXT:    v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GFX8-NEXT:    v_addc_u32_e32 v22, vcc, 0, v22, vcc
+; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[8:9], v5, v9, v[16:17]
 ; GFX8-NEXT:    v_mad_u64_u32 v[18:19], vcc, v3, v9, v[18:19]
-; GFX8-NEXT:    v_addc_u32_e32 v20, vcc, 0, v20, vcc
-; GFX8-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v0, v10, 0
+; GFX8-NEXT:    v_addc_u32_e32 v22, vcc, 0, v22, vcc
 ; GFX8-NEXT:    v_mad_u64_u32 v[18:19], vcc, v4, v8, v[18:19]
 ; GFX8-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v6, v8, v[16:17]
-; GFX8-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v1, v9, v[21:22]
-; GFX8-NEXT:    v_addc_u32_e32 v25, vcc, 0, v20, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v20, v18
+; GFX8-NEXT:    v_mad_u64_u32 v[20:21], s[6:7], v1, v9, v[20:21]
+; GFX8-NEXT:    v_addc_u32_e32 v23, vcc, 0, v22, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v22, v18
 ; GFX8-NEXT:    v_mov_b32_e32 v18, v19
 ; GFX8-NEXT:    v_mov_b32_e32 v19, v16
 ; GFX8-NEXT:    v_mad_u64_u32 v[18:19], vcc, v0, v13, v[18:19]
 ; GFX8-NEXT:    v_mul_lo_u32 v16, v6, v9
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GFX8-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v2, v8, v[21:22]
-; GFX8-NEXT:    v_addc_u32_e64 v26, s[4:5], 0, v6, s[4:5]
-; GFX8-NEXT:    v_mad_u64_u32 v[23:24], s[4:5], v1, v12, v[18:19]
-; GFX8-NEXT:    v_mov_b32_e32 v19, v22
-; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[12:13], v0, v11, v[19:20]
-; GFX8-NEXT:    v_mad_u64_u32 v[22:23], s[6:7], v2, v11, v[23:24]
-; GFX8-NEXT:    v_mul_lo_u32 v24, v3, v12
-; GFX8-NEXT:    v_mad_u64_u32 v[11:12], s[8:9], v3, v10, v[22:23]
-; GFX8-NEXT:    v_mul_lo_u32 v22, v2, v13
-; GFX8-NEXT:    v_mad_u64_u32 v[12:13], s[10:11], v4, v9, v[11:12]
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[12:13]
-; GFX8-NEXT:    v_mad_u64_u32 v[10:11], s[12:13], v1, v10, v[18:19]
-; GFX8-NEXT:    v_addc_u32_e64 v4, s[12:13], 0, v4, s[12:13]
-; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[12:13], v2, v9, v[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[6:7]
+; GFX8-NEXT:    v_mad_u64_u32 v[20:21], s[4:5], v2, v8, v[20:21]
+; GFX8-NEXT:    v_addc_u32_e64 v24, s[4:5], 0, v6, s[4:5]
+; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v1, v12, v[18:19]
+; GFX8-NEXT:    v_mad_u64_u32 v[21:22], s[10:11], v0, v11, v[21:22]
+; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[6:7], v2, v11, v[18:19]
+; GFX8-NEXT:    v_mul_lo_u32 v26, v4, v11
+; GFX8-NEXT:    v_mul_lo_u32 v27, v3, v12
+; GFX8-NEXT:    v_mad_u64_u32 v[11:12], s[8:9], v3, v10, v[18:19]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[10:11]
+; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[10:11], v1, v10, v[21:22]
+; GFX8-NEXT:    v_mul_lo_u32 v25, v5, v10
+; GFX8-NEXT:    v_mul_lo_u32 v28, v2, v13
+; GFX8-NEXT:    v_mad_u64_u32 v[12:13], s[12:13], v4, v9, v[11:12]
 ; GFX8-NEXT:    v_mad_u64_u32 v[10:11], s[14:15], v0, v8, 0
-; GFX8-NEXT:    v_addc_u32_e64 v2, s[12:13], 0, v4, s[12:13]
+; GFX8-NEXT:    v_addc_u32_e64 v22, s[10:11], 0, v6, s[10:11]
+; GFX8-NEXT:    v_mad_u64_u32 v[18:19], s[10:11], v2, v9, v[18:19]
+; GFX8-NEXT:    v_mov_b32_e32 v21, v20
 ; GFX8-NEXT:    v_mov_b32_e32 v20, v11
 ; GFX8-NEXT:    v_mad_u64_u32 v[20:21], s[16:17], v0, v9, v[20:21]
-; GFX8-NEXT:    v_mad_u64_u32 v[3:4], s[12:13], v3, v8, v[18:19]
+; GFX8-NEXT:    v_addc_u32_e64 v2, s[10:11], 0, v22, s[10:11]
+; GFX8-NEXT:    v_mad_u64_u32 v[3:4], s[10:11], v3, v8, v[18:19]
 ; GFX8-NEXT:    v_mad_u64_u32 v[5:6], s[14:15], v5, v8, v[12:13]
-; GFX8-NEXT:    v_addc_u32_e64 v11, s[12:13], 0, v2, s[12:13]
+; GFX8-NEXT:    v_addc_u32_e64 v11, s[10:11], 0, v2, s[10:11]
 ; GFX8-NEXT:    v_mul_lo_u32 v9, v1, v14
 ; GFX8-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s[16:17]
-; GFX8-NEXT:    v_mad_u64_u32 v[1:2], s[12:13], v1, v8, v[20:21]
-; GFX8-NEXT:    v_addc_u32_e64 v3, s[12:13], v12, v3, s[12:13]
+; GFX8-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v1, v8, v[20:21]
+; GFX8-NEXT:    v_addc_u32_e64 v3, s[10:11], v12, v3, s[10:11]
 ; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v15
-; GFX8-NEXT:    v_addc_u32_e64 v4, s[12:13], v26, v4, s[12:13]
-; GFX8-NEXT:    v_addc_u32_e64 v5, s[12:13], v11, v5, s[12:13]
-; GFX8-NEXT:    v_addc_u32_e64 v6, s[12:13], v25, v6, s[12:13]
-; GFX8-NEXT:    v_addc_u32_e64 v0, s[12:13], v17, v0, s[12:13]
-; GFX8-NEXT:    v_addc_u32_e64 v0, s[12:13], v0, v9, s[14:15]
-; GFX8-NEXT:    v_addc_u32_e64 v0, s[10:11], v0, v22, s[10:11]
-; GFX8-NEXT:    v_addc_u32_e64 v0, s[8:9], v0, v24, s[8:9]
-; GFX8-NEXT:    v_addc_u32_e64 v0, s[6:7], v0, v28, s[6:7]
-; GFX8-NEXT:    v_addc_u32_e64 v0, s[4:5], v0, v27, s[4:5]
+; GFX8-NEXT:    v_addc_u32_e64 v4, s[10:11], v24, v4, s[10:11]
+; GFX8-NEXT:    v_addc_u32_e64 v5, s[10:11], v11, v5, s[10:11]
+; GFX8-NEXT:    v_addc_u32_e64 v6, s[10:11], v23, v6, s[10:11]
+; GFX8-NEXT:    v_addc_u32_e64 v0, s[10:11], v17, v0, s[10:11]
+; GFX8-NEXT:    v_addc_u32_e64 v0, s[10:11], v0, v9, s[14:15]
+; GFX8-NEXT:    v_addc_u32_e64 v0, s[10:11], v0, v28, s[12:13]
+; GFX8-NEXT:    v_addc_u32_e64 v0, s[8:9], v0, v27, s[8:9]
+; GFX8-NEXT:    v_addc_u32_e64 v0, s[6:7], v0, v26, s[6:7]
+; GFX8-NEXT:    v_addc_u32_e64 v0, s[4:5], v0, v25, s[4:5]
 ; GFX8-NEXT:    v_addc_u32_e32 v0, vcc, v0, v16, vcc
 ; GFX8-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], v7, v8, v[0:1]
 ; GFX8-NEXT:    v_mov_b32_e32 v0, v10
@@ -2216,64 +2216,64 @@ define i256 @v_mul_i256(i256 %num, i256 %den) {
 ; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v0, v14, 0
 ; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v0, v12, 0
 ; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v1, v13, v[16:17]
-; GFX9-NEXT:    v_mul_lo_u32 v28, v4, v11
-; GFX9-NEXT:    v_mul_lo_u32 v27, v5, v10
+; GFX9-NEXT:    v_mad_u64_u32 v[20:21], s[6:7], v0, v10, 0
 ; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v2, v12, v[16:17]
-; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v3, v11, v[16:17]
-; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v4, v10, v[16:17]
 ; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v1, v11, v[18:19]
-; GFX9-NEXT:    v_cndmask_b32_e64 v20, 0, 1, s[4:5]
-; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v5, v9, v[16:17]
+; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v3, v11, v[16:17]
 ; GFX9-NEXT:    v_mad_u64_u32 v[18:19], vcc, v2, v10, v[18:19]
-; GFX9-NEXT:    v_addc_co_u32_e32 v20, vcc, 0, v20, vcc
+; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[6:7], v4, v10, v[16:17]
+; GFX9-NEXT:    v_cndmask_b32_e64 v22, 0, 1, s[4:5]
+; GFX9-NEXT:    v_addc_co_u32_e32 v22, vcc, 0, v22, vcc
+; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[8:9], v5, v9, v[16:17]
 ; GFX9-NEXT:    v_mad_u64_u32 v[18:19], vcc, v3, v9, v[18:19]
-; GFX9-NEXT:    v_addc_co_u32_e32 v20, vcc, 0, v20, vcc
-; GFX9-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v0, v10, 0
+; GFX9-NEXT:    v_addc_co_u32_e32 v22, vcc, 0, v22, vcc
 ; GFX9-NEXT:    v_mad_u64_u32 v[18:19], vcc, v4, v8, v[18:19]
 ; GFX9-NEXT:    v_mad_u64_u32 v[16:17], s[4:5], v6, v8, v[16:17]
-; GFX9-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v1, v9, v[21:22]
-; GFX9-NEXT:    v_addc_co_u32_e32 v25, vcc, 0, v20, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v20, v18
+; GFX9-NEXT:    v_mad_u64_u32 v[20:21], s[6:7], v1, v9, v[20:21]
+; GFX9-NEXT:    v_addc_co_u32_e32 v23, vcc, 0, v22, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v22, v18
 ; GFX9-NEXT:    v_mov_b32_e32 v18, v19
 ; GFX9-NEXT:    v_mov_b32_e32 v19, v16
 ; GFX9-NEXT:    v_mad_u64_u32 v[18:19], vcc, v0, v13, v[18:19]
 ; GFX9-NEXT:    v_mul_lo_u32 v16, v6, v9
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GFX9-NEXT:    v_mad_u64_u32 v[21:22], s[4:5], v2, v8, v[21:22]
-; GFX9-NEXT:    v_addc_co_u32_e64 v26, s[4:5], 0, v6, s[4:5]
-; GFX9-NEXT:    v_mad_u64_u32 v[23:24], s[4:5], v1, v12, v[18:19]
-; GFX9-NEXT:    v_mov_b32_e32 v19, v22
-; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[12:13], v0, v11, v[19:20]
-; GFX9-NEXT:    v_mad_u64_u32 v[22:23], s[6:7], v2, v11, v[23:24]
-; GFX9-NEXT:    v_mul_lo_u32 v24, v3, v12
-; GFX9-NEXT:    v_mad_u64_u32 v[11:12], s[8:9], v3, v10, v[22:23]
-; GFX9-NEXT:    v_mul_lo_u32 v22, v2, v13
-; GFX9-NEXT:    v_mad_u64_u32 v[12:13], s[10:11], v4, v9, v[11:12]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[12:13]
-; GFX9-NEXT:    v_mad_u64_u32 v[10:11], s[12:13], v1, v10, v[18:19]
-; GFX9-NEXT:    v_addc_co_u32_e64 v4, s[12:13], 0, v4, s[12:13]
-; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[12:13], v2, v9, v[10:11]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[6:7]
+; GFX9-NEXT:    v_mad_u64_u32 v[20:21], s[4:5], v2, v8, v[20:21]
+; GFX9-NEXT:    v_addc_co_u32_e64 v24, s[4:5], 0, v6, s[4:5]
+; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[4:5], v1, v12, v[18:19]
+; GFX9-NEXT:    v_mad_u64_u32 v[21:22], s[10:11], v0, v11, v[21:22]
+; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[6:7], v2, v11, v[18:19]
+; GFX9-NEXT:    v_mul_lo_u32 v26, v4, v11
+; GFX9-NEXT:    v_mul_lo_u32 v27, v3, v12
+; GFX9-NEXT:    v_mad_u64_u32 v[11:12], s[8:9], v3, v10, v[18:19]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[10:11]
+; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[10:11], v1, v10, v[21:22]
+; GFX9-NEXT:    v_mul_lo_u32 v25, v5, v10
+; GFX9-NEXT:    v_mul_lo_u32 v28, v2, v13
+; GFX9-NEXT:    v_mad_u64_u32 v[12:13], s[12:13], v4, v9, v[11:12]
 ; GFX9-NEXT:    v_mad_u64_u32 v[10:11], s[14:15], v0, v8, 0
-; GFX9-NEXT:    v_addc_co_u32_e64 v2, s[12:13], 0, v4, s[12:13]
+; GFX9-NEXT:    v_addc_co_u32_e64 v22, s[10:11], 0, v6, s[10:11]
+; GFX9-NEXT:    v_mad_u64_u32 v[18:19], s[10:11], v2, v9, v[18:19]
+; GFX9-NEXT:    v_mov_b32_e32 v21, v20
 ; GFX9-NEXT:    v_mov_b32_e32 v20, v11
 ; GFX9-NEXT:    v_mad_u64_u32 v[20:21], s[16:17], v0, v9, v[20:21]
-; GFX9-NEXT:    v_mad_u64_u32 v[3:4], s[12:13], v3, v8, v[18:19]
+; GFX9-NEXT:    v_addc_co_u32_e64 v2, s[10:11], 0, v22, s[10:11]
+; GFX9-NEXT:    v_mad_u64_u32 v[3:4], s[10:11], v3, v8, v[18:19]
 ; GFX9-NEXT:    v_mad_u64_u32 v[5:6], s[14:15], v5, v8, v[12:13]
-; GFX9-NEXT:    v_addc_co_u32_e64 v11, s[12:13], 0, v2, s[12:13]
+; GFX9-NEXT:    v_addc_co_u32_e64 v11, s[10:11], 0, v2, s[10:11]
 ; GFX9-NEXT:    v_mul_lo_u32 v9, v1, v14
 ; GFX9-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s[16:17]
-; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[12:13], v1, v8, v[20:21]
-; GFX9-NEXT:    v_addc_co_u32_e64 v3, s[12:13], v12, v3, s[12:13]
+; GFX9-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v1, v8, v[20:21]
+; GFX9-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v12, v3, s[10:11]
 ; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v15
-; GFX9-NEXT:    v_addc_co_u32_e64 v4, s[12:13], v26, v4, s[12:13]
-; GFX9-NEXT:    v_addc_co_u32_e64 v5, s[12:13], v11, v5, s[12:13]
-; GFX9-NEXT:    v_addc_co_u32_e64 v6, s[12:13], v25, v6, s[12:13]
-; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[12:13], v17, v0, s[12:13]
-; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[12:13], v0, v9, s[14:15]
-; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[10:11], v0, v22, s[10:11]
-; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[8:9], v0, v24, s[8:9]
-; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[6:7], v0, v28, s[6:7]
-; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[4:5], v0, v27, s[4:5]
+; GFX9-NEXT:    v_addc_co_u32_e64 v4, s[10:11], v24, v4, s[10:11]
+; GFX9-NEXT:    v_addc_co_u32_e64 v5, s[10:11], v11, v5, s[10:11]
+; GFX9-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v23, v6, s[10:11]
+; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[10:11], v17, v0, s[10:11]
+; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[10:11], v0, v9, s[14:15]
+; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[10:11], v0, v28, s[12:13]
+; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[8:9], v0, v27, s[8:9]
+; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[6:7], v0, v26, s[6:7]
+; GFX9-NEXT:    v_addc_co_u32_e64 v0, s[4:5], v0, v25, s[4:5]
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v0, v16, vcc
 ; GFX9-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], v7, v8, v[0:1]
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v10
@@ -2476,12 +2476,11 @@ define i256 @v_mul_i256(i256 %num, i256 %den) {
 ; GFX12-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s2
 ; GFX12-NEXT:    v_mul_lo_u32 v24, v2, v13
-; GFX12-NEXT:    v_mov_b32_e32 v13, v1
 ; GFX12-NEXT:    v_mad_co_u64_u32 v[11:12], s2, v17, v10, v[14:15]
 ; GFX12-NEXT:    v_mad_co_u64_u32 v[18:19], s3, v3, v10, v[18:19]
 ; GFX12-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-NEXT:    v_add_co_ci_u32_e64 v6, null, 0, v6, s2
-; GFX12-NEXT:    v_mov_b32_e32 v14, v21
+; GFX12-NEXT:    v_dual_mov_b32 v13, v1 :: v_dual_mov_b32 v14, v21
 ; GFX12-NEXT:    v_mad_co_u64_u32 v[1:2], s2, v2, v9, v[11:12]
 ; GFX12-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-NEXT:    v_add_co_ci_u32_e64 v6, null, 0, v6, s2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
index 110c17bd5dbd2..832f066adaa84 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -5037,17 +5037,17 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s1, s[4:5], 0
 ; GFX10-NEXT:    s_ashr_i32 s4, s9, 31
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s9
-; GFX10-NEXT:    s_add_i32 s5, s4, 0x80000000
-; GFX10-NEXT:    s_xor_b32 s8, s1, s0
+; GFX10-NEXT:    s_add_i32 s8, s4, 0x80000000
+; GFX10-NEXT:    s_xor_b32 s5, s1, s0
 ; GFX10-NEXT:    s_add_u32 s0, s2, s6
 ; GFX10-NEXT:    s_addc_u32 s1, s3, s7
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s2, s[0:1], s[2:3]
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s3, s[6:7], 0
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s5
 ; GFX10-NEXT:    s_ashr_i32 s4, s1, 31
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s5
 ; GFX10-NEXT:    s_add_i32 s0, s4, 0x80000000
 ; GFX10-NEXT:    s_xor_b32 s1, s3, s2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s4, s1
@@ -5066,16 +5066,16 @@ define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s1, s[4:5], 0
 ; GFX11-NEXT:    s_ashr_i32 s4, s9, 31
-; GFX11-NEXT:    s_add_i32 s5, s4, 0x80000000
-; GFX11-NEXT:    s_xor_b32 s8, s1, s0
+; GFX11-NEXT:    s_add_i32 s8, s4, 0x80000000
+; GFX11-NEXT:    s_xor_b32 s5, s1, s0
 ; GFX11-NEXT:    s_add_u32 s0, s2, s6
 ; GFX11-NEXT:    s_addc_u32 s1, s3, s7
 ; GFX11-NEXT:    v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s2, s[0:1], s[2:3]
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s3, s[6:7], 0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s8
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s5
 ; GFX11-NEXT:    s_ashr_i32 s4, s1, 31
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s8
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s5
 ; GFX11-NEXT:    s_add_i32 s0, s4, 0x80000000
 ; GFX11-NEXT:    s_xor_b32 s1, s3, s2
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s4, s1
@@ -5450,8 +5450,8 @@ define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
 ; GFX11-NEXT:    v_add_nc_u32_e32 v6, 0x80000000, v3
 ; GFX11-NEXT:    v_and_b32_e32 v2, 1, v2
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
 ; GFX11-NEXT:    v_dual_cndmask_b32 v2, v4, v3 :: v_dual_cndmask_b32 v3, v5, v6
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
@@ -5611,8 +5611,8 @@ define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
 ; GFX11-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v4, v2 :: v_dual_cndmask_b32 v3, v7, v3
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc_lo
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
@@ -6145,8 +6145,8 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX10-NEXT:    s_addc_u32 s16, s2, s10
 ; GFX10-NEXT:    v_cmp_lt_u64_e64 s0, s[8:9], s[0:1]
 ; GFX10-NEXT:    s_addc_u32 s17, s3, s11
-; GFX10-NEXT:    v_mov_b32_e32 v4, s9
 ; GFX10-NEXT:    s_cmp_eq_u64 s[16:17], s[2:3]
+; GFX10-NEXT:    v_mov_b32_e32 v4, s17
 ; GFX10-NEXT:    s_cselect_b32 s18, 1, 0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s0, s[16:17], s[2:3]
@@ -6176,7 +6176,7 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s4, s[2:3], s[6:7]
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s6, s[14:15], 0
 ; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX10-NEXT:    v_mov_b32_e32 v6, s1
+; GFX10-NEXT:    v_mov_b32_e32 v6, s2
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s3
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s4
 ; GFX10-NEXT:    s_and_b32 s4, 1, s12
@@ -6188,31 +6188,31 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 s4, 0, s5
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc_lo
 ; GFX10-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX10-NEXT:    v_mov_b32_e32 v0, s16
+; GFX10-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v3, 0, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX10-NEXT:    s_ashr_i32 s4, s3, 31
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s11, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s10, vcc_lo
 ; GFX10-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GFX10-NEXT:    v_mov_b32_e32 v2, s17
+; GFX10-NEXT:    v_mov_b32_e32 v2, s16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s10, vcc_lo
 ; GFX10-NEXT:    s_add_i32 s0, s4, 0x80000000
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v4
 ; GFX10-NEXT:    v_and_b32_e32 v1, 1, v1
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s10, vcc_lo
 ; GFX10-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX10-NEXT:    v_mov_b32_e32 v1, s2
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX10-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s0, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v3
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v5
-; GFX10-NEXT:    v_readfirstlane_b32 s5, v6
-; GFX10-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX10-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
@@ -6247,12 +6247,14 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX11-NEXT:    s_addc_u32 s3, s7, s15
 ; GFX11-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX11-NEXT:    s_cmp_eq_u64 s[2:3], s[6:7]
-; GFX11-NEXT:    v_dual_mov_b32 v6, s1 :: v_dual_mov_b32 v7, s3
+; GFX11-NEXT:    v_mov_b32_e32 v4, s17
+; GFX11-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s4
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s4, s[2:3], s[6:7]
-; GFX11-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s6, s[14:15], 0
-; GFX11-NEXT:    v_dual_mov_b32 v5, s0 :: v_dual_and_b32 v0, 1, v0
+; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT:    v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s3
+; GFX11-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s4
 ; GFX11-NEXT:    s_and_b32 s4, 1, s12
 ; GFX11-NEXT:    s_cmp_eq_u64 s[14:15], 0
@@ -6265,30 +6267,29 @@ define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v3, 0, s4
 ; GFX11-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_mov_b32_e32 v0, s16
 ; GFX11-NEXT:    s_ashr_i32 s4, s3, 31
 ; GFX11-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GFX11-NEXT:    v_mov_b32_e32 v4, s9
-; GFX11-NEXT:    v_mov_b32_e32 v2, s17
+; GFX11-NEXT:    v_mov_b32_e32 v0, s9
+; GFX11-NEXT:    v_mov_b32_e32 v2, s16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s10, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, s11, vcc_lo
 ; GFX11-NEXT:    v_and_b32_e32 v1, 1, v1
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, s10, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s10, vcc_lo
 ; GFX11-NEXT:    s_add_i32 s0, s4, 0x80000000
+; GFX11-NEXT:    v_readfirstlane_b32 s3, v4
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX11-NEXT:    v_mov_b32_e32 v1, s2
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v4
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX11-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX11-NEXT:    v_mov_b32_e32 v1, s1
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, s4, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s0, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v3
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v5
-; GFX11-NEXT:    v_readfirstlane_b32 s5, v6
-; GFX11-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll
index b59f85b2dfa38..02f8d0bf3c3df 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdivrem.ll
@@ -813,30 +813,30 @@ define amdgpu_kernel void @sdivrem_v2i32(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, s0, v2
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v3, s7, v3
+; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v2
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v6, s4, v2
-; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s3, v3
-; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s4, v2
+; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s3, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s3, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc_lo
-; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s0
 ; GFX10-NEXT:    v_add_nc_u32_e32 v4, 1, v0
-; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s4, v2
-; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s3, v3
+; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
+; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s3, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v6, s4, v2
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s3, v3
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT:    v_mov_b32_e32 v4, 0
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s0
 ; GFX10-NEXT:    s_xor_b32 s0, s6, s2
 ; GFX10-NEXT:    v_xor_b32_e32 v0, s1, v0
 ; GFX10-NEXT:    v_xor_b32_e32 v1, s0, v1
 ; GFX10-NEXT:    v_xor_b32_e32 v2, s5, v2
 ; GFX10-NEXT:    v_xor_b32_e32 v3, s6, v3
-; GFX10-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v0, s1, v0
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v1, s0, v1
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v2, s5, v2
@@ -2568,8 +2568,8 @@ define amdgpu_kernel void @sdivrem_v2i8(ptr addrspace(1) %out0, ptr addrspace(1)
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, s4, v2
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[8:9], 0x0
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v3, s0, v3
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s1, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s1, v2
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s1, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s3, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s3, v3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
@@ -2581,13 +2581,13 @@ define amdgpu_kernel void @sdivrem_v2i8(ptr addrspace(1) %out0, ptr addrspace(1)
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s1, v2
 ; GFX10-NEXT:    v_add_nc_u32_e32 v6, 1, v1
 ; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s3, v3
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s3, v3
+; GFX10-NEXT:    s_xor_b32 s1, s11, s2
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v4, s3, v3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc_lo
-; GFX10-NEXT:    s_xor_b32 s1, s11, s2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s0
 ; GFX10-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s0
 ; GFX10-NEXT:    v_xor_b32_e32 v2, s11, v2
 ; GFX10-NEXT:    s_xor_b32 s0, s12, s10
 ; GFX10-NEXT:    v_mov_b32_e32 v4, 0xff
@@ -2981,8 +2981,8 @@ define amdgpu_kernel void @sdivrem_v2i16(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, s4, v2
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[8:9], 0x0
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v3, s0, v3
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s2, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v2
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s2, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s1, v3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v4, s1, v3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
index e4079a89c6e0f..2673ac4fb5bae 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -5043,17 +5043,17 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
 ; GFX10-NEXT:    v_cmp_gt_i64_e64 s1, s[4:5], 0
 ; GFX10-NEXT:    s_ashr_i32 s4, s9, 31
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s9
-; GFX10-NEXT:    s_add_i32 s5, s4, 0x80000000
-; GFX10-NEXT:    s_xor_b32 s8, s1, s0
+; GFX10-NEXT:    s_add_i32 s8, s4, 0x80000000
+; GFX10-NEXT:    s_xor_b32 s5, s1, s0
 ; GFX10-NEXT:    s_sub_u32 s0, s2, s6
 ; GFX10-NEXT:    s_subb_u32 s1, s3, s7
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s2, s[0:1], s[2:3]
 ; GFX10-NEXT:    v_cmp_gt_i64_e64 s3, s[6:7], 0
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s5
 ; GFX10-NEXT:    s_ashr_i32 s4, s1, 31
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s5
 ; GFX10-NEXT:    s_add_i32 s0, s4, 0x80000000
 ; GFX10-NEXT:    s_xor_b32 s1, s3, s2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s4, s1
@@ -5072,16 +5072,16 @@ define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inre
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
 ; GFX11-NEXT:    v_cmp_gt_i64_e64 s1, s[4:5], 0
 ; GFX11-NEXT:    s_ashr_i32 s4, s9, 31
-; GFX11-NEXT:    s_add_i32 s5, s4, 0x80000000
-; GFX11-NEXT:    s_xor_b32 s8, s1, s0
+; GFX11-NEXT:    s_add_i32 s8, s4, 0x80000000
+; GFX11-NEXT:    s_xor_b32 s5, s1, s0
 ; GFX11-NEXT:    s_sub_u32 s0, s2, s6
 ; GFX11-NEXT:    s_subb_u32 s1, s3, s7
 ; GFX11-NEXT:    v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
 ; GFX11-NEXT:    v_cmp_lt_i64_e64 s2, s[0:1], s[2:3]
 ; GFX11-NEXT:    v_cmp_gt_i64_e64 s3, s[6:7], 0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s8
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s5
 ; GFX11-NEXT:    s_ashr_i32 s4, s1, 31
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s5, s8
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, s5
 ; GFX11-NEXT:    s_add_i32 s0, s4, 0x80000000
 ; GFX11-NEXT:    s_xor_b32 s1, s3, s2
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s4, s1
@@ -5263,20 +5263,20 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s1
 ; GFX10-NEXT:    s_add_i32 s1, s0, 0x80000000
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v3, v2, vcc_lo
-; GFX10-NEXT:    v_mov_b32_e32 v2, s9
+; GFX10-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s11
 ; GFX10-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s8
 ; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX10-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX10-NEXT:    v_mov_b32_e32 v0, s10
+; GFX10-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s0, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s0, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s1, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
@@ -5305,19 +5305,19 @@ define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc_lo
 ; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s1
 ; GFX11-NEXT:    s_add_i32 s1, s0, 0x80000000
-; GFX11-NEXT:    v_dual_cndmask_b32 v1, v3, v2 :: v_dual_mov_b32 v2, s9
+; GFX11-NEXT:    v_dual_cndmask_b32 v1, v3, v2 :: v_dual_mov_b32 v2, s10
 ; GFX11-NEXT:    v_mov_b32_e32 v3, s11
 ; GFX11-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX11-NEXT:    v_dual_mov_b32 v1, s8 :: v_dual_and_b32 v0, 1, v0
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_mov_b32_e32 v0, s10
+; GFX11-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s0, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s0, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s1, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v2
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.ssub.sat.i128(i128 %lhs, i128 %rhs)
@@ -5474,8 +5474,8 @@ define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
 ; GFX11-NEXT:    v_xor_b32_e32 v0, v0, v8
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-NEXT:    v_dual_cndmask_b32 v2, v6, v2 :: v_dual_cndmask_b32 v3, v7, v3
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.ssub.sat.i128(i128 %lhs, i128 %rhs)
@@ -5646,8 +5646,8 @@ define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
 ; GFX11-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-NEXT:    v_dual_cndmask_b32 v2, v6, v2 :: v_dual_cndmask_b32 v3, v7, v3
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call i128 @llvm.ssub.sat.i128(i128 %lhs, i128 %rhs)
@@ -6237,7 +6237,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX10-NEXT:    s_subb_u32 s3, s7, s15
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX10-NEXT:    s_cmp_eq_u64 s[2:3], s[6:7]
-; GFX10-NEXT:    v_mov_b32_e32 v6, s1
+; GFX10-NEXT:    v_mov_b32_e32 v6, s2
 ; GFX10-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s4
 ; GFX10-NEXT:    v_cmp_lt_i64_e64 s4, s[2:3], s[6:7]
@@ -6260,29 +6260,29 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
 ; GFX10-NEXT:    v_cndmask_b32_e32 v2, v4, v3, vcc_lo
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s18
-; GFX10-NEXT:    v_mov_b32_e32 v4, s19
 ; GFX10-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX10-NEXT:    v_mov_b32_e32 v0, s16
+; GFX10-NEXT:    v_mov_b32_e32 v0, s19
+; GFX10-NEXT:    v_mov_b32_e32 v4, s17
 ; GFX10-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GFX10-NEXT:    v_mov_b32_e32 v2, s17
+; GFX10-NEXT:    v_mov_b32_e32 v2, s16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s8, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s8, vcc_lo
-; GFX10-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s8, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s9, vcc_lo
+; GFX10-NEXT:    v_and_b32_e32 v1, 1, v1
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s8, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v4
 ; GFX10-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX10-NEXT:    v_mov_b32_e32 v1, s2
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX10-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s0, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v3
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v5
-; GFX10-NEXT:    v_readfirstlane_b32 s5, v6
-; GFX10-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX10-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
@@ -6317,7 +6317,7 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX11-NEXT:    v_cmp_lt_u64_e64 s4, s[0:1], s[4:5]
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, v3, v2, vcc_lo
 ; GFX11-NEXT:    s_subb_u32 s3, s7, s15
-; GFX11-NEXT:    v_dual_mov_b32 v6, s1 :: v_dual_mov_b32 v7, s3
+; GFX11-NEXT:    v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s3
 ; GFX11-NEXT:    s_cmp_eq_u64 s[2:3], s[6:7]
 ; GFX11-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s4
@@ -6335,34 +6335,34 @@ define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128>
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX11-NEXT:    s_and_b32 s5, 1, s5
 ; GFX11-NEXT:    s_ashr_i32 s4, s3, 31
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s6
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc_lo
 ; GFX11-NEXT:    v_cmp_ne_u32_e64 vcc_lo, 0, s5
 ; GFX11-NEXT:    s_add_i32 s0, s4, 0x80000000
 ; GFX11-NEXT:    v_dual_cndmask_b32 v2, v4, v3 :: v_dual_mov_b32 v3, s18
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_mov_b32_e32 v0, s16
+; GFX11-NEXT:    v_mov_b32_e32 v4, s17
 ; GFX11-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GFX11-NEXT:    v_mov_b32_e32 v4, s19
-; GFX11-NEXT:    v_mov_b32_e32 v2, s17
+; GFX11-NEXT:    v_mov_b32_e32 v0, s19
+; GFX11-NEXT:    v_mov_b32_e32 v2, s16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s8, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s8, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, s9, vcc_lo
 ; GFX11-NEXT:    v_and_b32_e32 v1, 1, v1
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, s8, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s8, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s8, vcc_lo
+; GFX11-NEXT:    v_readfirstlane_b32 s3, v4
 ; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX11-NEXT:    v_mov_b32_e32 v1, s2
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v4
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX11-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX11-NEXT:    v_mov_b32_e32 v1, s1
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, s4, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s4, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s0, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v3
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v5
-; GFX11-NEXT:    v_readfirstlane_b32 s5, v6
-; GFX11-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
 ; GFX11-NEXT:    ; return to shader part epilog
   %result = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
index 018e5fb6ee3b8..1a6d26142208f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
@@ -1937,39 +1937,39 @@ define <2 x i64> @v_udiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
 ; GISEL-NEXT:    v_mul_lo_u32 v18, v10, v7
 ; GISEL-NEXT:    v_mul_hi_u32 v19, v9, v7
 ; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v15, v6
-; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v18, v13
-; GISEL-NEXT:    v_mul_lo_u32 v15, v11, v17
-; GISEL-NEXT:    v_mul_hi_u32 v18, v7, v17
-; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v13, v19
-; GISEL-NEXT:    v_mul_lo_u32 v19, v7, v13
-; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v15, v19
-; GISEL-NEXT:    v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v15, v18
 ; GISEL-NEXT:    v_mul_lo_u32 v15, v8, v14
+; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v18, v13
 ; GISEL-NEXT:    v_mul_hi_u32 v18, v12, v14
 ; GISEL-NEXT:    v_mul_hi_u32 v14, v8, v14
-; GISEL-NEXT:    v_mul_hi_u32 v17, v11, v17
-; GISEL-NEXT:    v_add_i32_e64 v16, s[4:5], v6, v16
+; GISEL-NEXT:    v_add_i32_e32 v16, vcc, v6, v16
 ; GISEL-NEXT:    v_mul_lo_u32 v6, v12, v16
-; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v15, v6
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, s[4:5]
+; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v15, v6
+; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v18
+; GISEL-NEXT:    v_mul_lo_u32 v6, v11, v17
+; GISEL-NEXT:    v_mul_hi_u32 v18, v7, v17
+; GISEL-NEXT:    v_mul_hi_u32 v17, v11, v17
+; GISEL-NEXT:    v_add_i32_e64 v13, s[4:5], v13, v19
+; GISEL-NEXT:    v_mul_lo_u32 v19, v7, v13
+; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v19
+; GISEL-NEXT:    v_cndmask_b32_e64 v19, 0, 1, s[4:5]
 ; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v18
 ; GISEL-NEXT:    v_mul_lo_u32 v6, v8, v16
-; GISEL-NEXT:    v_cndmask_b32_e64 v18, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v15, s[4:5], v15, v18
+; GISEL-NEXT:    v_mul_lo_u32 v18, v11, v13
+; GISEL-NEXT:    v_add_i32_e64 v17, s[6:7], v18, v17
+; GISEL-NEXT:    v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v15, v18
 ; GISEL-NEXT:    v_mul_hi_u32 v18, v12, v16
-; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v14
-; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v18, s[4:5], v6, v18
-; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v14, s[4:5], v14, v6
+; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v14
+; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v18, vcc, v6, v18
 ; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v14, vcc, v14, v6
+; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
 ; GISEL-NEXT:    v_add_i32_e32 v19, vcc, v19, v6
-; GISEL-NEXT:    v_mul_lo_u32 v6, v11, v13
-; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v17
-; GISEL-NEXT:    v_mul_hi_u32 v17, v7, v13
-; GISEL-NEXT:    v_cndmask_b32_e64 v20, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v17, vcc, v6, v17
+; GISEL-NEXT:    v_mul_hi_u32 v6, v7, v13
+; GISEL-NEXT:    v_cndmask_b32_e64 v20, 0, 1, s[6:7]
+; GISEL-NEXT:    v_add_i32_e32 v17, vcc, v17, v6
 ; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
 ; GISEL-NEXT:    v_add_i32_e32 v20, vcc, v20, v6
 ; GISEL-NEXT:    v_and_b32_e32 v6, 0xffffff, v0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
index ff0114cfc3ddb..1aaf3122cc00d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
@@ -506,17 +506,17 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
 ; GFX10-NEXT:    v_sub_co_u32 v10, s0, v6, s18
 ; GFX10-NEXT:    v_subrev_co_ci_u32_e64 v0, s0, 0, v0, s0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v13, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc_lo
 ; GFX10-NEXT:    v_cmp_ne_u32_e64 s0, 0, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v0, vcc_lo
-; GFX10-NEXT:    v_mov_b32_e32 v10, 0
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v4, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v6, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v9, v0, vcc_lo
+; GFX10-NEXT:    v_mov_b32_e32 v9, 0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v5, v2, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, v4, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v7, v6, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v8, v9, s0
-; GFX10-NEXT:    global_store_dwordx2 v10, v[0:1], s[12:13]
-; GFX10-NEXT:    global_store_dwordx2 v10, v[2:3], s[14:15]
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v7, v4, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v8, v6, s0
+; GFX10-NEXT:    global_store_dwordx2 v9, v[0:1], s[12:13]
+; GFX10-NEXT:    global_store_dwordx2 v9, v[2:3], s[14:15]
 ; GFX10-NEXT:    s_endpgm
   %div = udiv i64 %x, %y
   store i64 %div, ptr addrspace(1) %out0
@@ -663,24 +663,24 @@ define amdgpu_kernel void @udivrem_v2i32(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, s16, v2
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v3, s17, v3
+; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s18, v2
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v6, s18, v2
-; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s19, v3
-; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s18, v2
+; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s19, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s19, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc_lo
-; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s0
 ; GFX10-NEXT:    v_add_nc_u32_e32 v4, 1, v0
-; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s18, v2
-; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s19, v3
+; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s18, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
+; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s19, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v6, s18, v2
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s19, v3
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s0
 ; GFX10-NEXT:    global_store_dwordx2 v8, v[0:1], s[12:13]
 ; GFX10-NEXT:    global_store_dwordx2 v8, v[2:3], s[14:15]
 ; GFX10-NEXT:    s_endpgm
@@ -1532,10 +1532,12 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v1, s7
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v2, s4
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v3, s6
-; GFX10-NEXT:    s_sub_u32 s0, 0, s4
+; GFX10-NEXT:    s_sub_u32 s1, 0, s4
 ; GFX10-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GFX10-NEXT:    v_mul_f32_e32 v1, 0x4f800000, v1
-; GFX10-NEXT:    s_subb_u32 s1, 0, s5
+; GFX10-NEXT:    s_subb_u32 s2, 0, s5
+; GFX10-NEXT:    s_sub_u32 s3, 0, s6
+; GFX10-NEXT:    s_subb_u32 s10, 0, s7
 ; GFX10-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_rcp_iflag_f32_e32 v0, v0
@@ -1554,17 +1556,15 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_add_f32_e32 v1, v3, v1
 ; GFX10-NEXT:    v_cvt_u32_f32_e32 v7, v0
 ; GFX10-NEXT:    v_cvt_u32_f32_e32 v8, v1
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s2, s0, v7, 0
-; GFX10-NEXT:    s_sub_u32 s2, 0, s6
-; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s3, s2, v8, 0
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, s1, v7, 0
+; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s0, s3, v8, 0
 ; GFX10-NEXT:    v_mul_hi_u32 v11, v9, v0
-; GFX10-NEXT:    v_mad_u64_u32 v[4:5], s3, s0, v9, v[1:2]
-; GFX10-NEXT:    v_mad_u64_u32 v[5:6], s3, s2, v10, v[3:4]
+; GFX10-NEXT:    v_mad_u64_u32 v[4:5], s0, s1, v9, v[1:2]
+; GFX10-NEXT:    v_mad_u64_u32 v[5:6], s0, s3, v10, v[3:4]
 ; GFX10-NEXT:    v_mul_lo_u32 v6, v9, v0
-; GFX10-NEXT:    s_subb_u32 s3, 0, s7
-; GFX10-NEXT:    v_mad_u64_u32 v[3:4], s10, s1, v7, v[4:5]
+; GFX10-NEXT:    v_mad_u64_u32 v[3:4], s0, s2, v7, v[4:5]
 ; GFX10-NEXT:    v_mul_hi_u32 v4, v7, v0
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s10, s3, v8, v[5:6]
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, s10, v8, v[5:6]
 ; GFX10-NEXT:    v_mul_lo_u32 v1, v10, v2
 ; GFX10-NEXT:    v_mul_hi_u32 v5, v8, v2
 ; GFX10-NEXT:    v_mul_hi_u32 v2, v10, v2
@@ -1576,45 +1576,45 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_mul_hi_u32 v17, v8, v0
 ; GFX10-NEXT:    v_mul_hi_u32 v3, v9, v3
 ; GFX10-NEXT:    v_mul_hi_u32 v0, v10, v0
-; GFX10-NEXT:    v_add_co_u32 v6, s10, v6, v12
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v11, s10, v13, v11
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v1, s10, v1, v15
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v2, s10, v16, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v4, s10, v6, v4
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v6, s10, v11, v14
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v1, s10, v1, v5
+; GFX10-NEXT:    v_add_co_u32 v6, s0, v6, v12
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s0
+; GFX10-NEXT:    v_add_co_u32 v11, s0, v13, v11
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, 0, 1, s0
+; GFX10-NEXT:    v_add_co_u32 v1, s0, v1, v15
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, 0, 1, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v16, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, 0, 1, s0
+; GFX10-NEXT:    v_add_co_u32 v4, s0, v6, v4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s0
+; GFX10-NEXT:    v_add_co_u32 v6, s0, v11, v14
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, 0, 1, s0
+; GFX10-NEXT:    v_add_co_u32 v1, s0, v1, v5
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
 ; GFX10-NEXT:    v_add_nc_u32_e32 v4, v12, v4
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v2, s10, v2, v17
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s10
-; GFX10-NEXT:    v_add_co_u32 v4, s10, v6, v4
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v2, v17
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s0
 ; GFX10-NEXT:    v_add_nc_u32_e32 v1, v15, v1
+; GFX10-NEXT:    v_add_co_u32 v4, s0, v6, v4
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s0
 ; GFX10-NEXT:    v_add_nc_u32_e32 v11, v13, v11
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s10
+; GFX10-NEXT:    v_add_co_u32 v1, s0, v2, v1
 ; GFX10-NEXT:    v_add_nc_u32_e32 v5, v16, v5
-; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v4
-; GFX10-NEXT:    v_add_co_u32 v1, s10, v2, v1
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; GFX10-NEXT:    v_add3_u32 v3, v11, v6, v3
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s10
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, v9, v3, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v4
+; GFX10-NEXT:    v_add_co_u32 v8, s0, v8, v1
 ; GFX10-NEXT:    v_add3_u32 v2, v5, v2, v0
-; GFX10-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v1
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s10, s0, v7, 0
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, v10, v2, vcc_lo
-; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s10, s2, v8, 0
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, v9, v3, vcc_lo
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s11, s1, v7, 0
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v10, vcc_lo, v10, v2, s0
+; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s0, s3, v8, 0
 ; GFX10-NEXT:    v_mul_hi_u32 v11, v9, v0
-; GFX10-NEXT:    v_mad_u64_u32 v[4:5], s0, s0, v9, v[1:2]
-; GFX10-NEXT:    v_mad_u64_u32 v[5:6], s0, s2, v10, v[3:4]
+; GFX10-NEXT:    v_mad_u64_u32 v[4:5], s0, s1, v9, v[1:2]
+; GFX10-NEXT:    v_mad_u64_u32 v[5:6], s0, s3, v10, v[3:4]
 ; GFX10-NEXT:    v_mul_lo_u32 v6, v9, v0
-; GFX10-NEXT:    v_mad_u64_u32 v[3:4], s0, s1, v7, v[4:5]
+; GFX10-NEXT:    v_mad_u64_u32 v[3:4], s0, s2, v7, v[4:5]
 ; GFX10-NEXT:    v_mul_hi_u32 v4, v7, v0
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, s3, v8, v[5:6]
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, s10, v8, v[5:6]
 ; GFX10-NEXT:    v_mul_lo_u32 v1, v10, v2
 ; GFX10-NEXT:    v_mul_hi_u32 v5, v8, v2
 ; GFX10-NEXT:    v_mul_hi_u32 v2, v10, v2
@@ -1652,13 +1652,13 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; GFX10-NEXT:    v_add3_u32 v3, v11, v6, v3
 ; GFX10-NEXT:    v_add_co_u32 v4, vcc_lo, v7, v4
+; GFX10-NEXT:    v_add_co_u32 v1, s0, v8, v1
 ; GFX10-NEXT:    v_add3_u32 v0, v5, v2, v0
 ; GFX10-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, v9, v3, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v1, vcc_lo, v8, v1
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v0, vcc_lo, v10, v0, vcc_lo
 ; GFX10-NEXT:    v_mul_lo_u32 v3, s17, v4
-; GFX10-NEXT:    v_mul_lo_u32 v8, s16, v2
 ; GFX10-NEXT:    v_mul_hi_u32 v5, s16, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v0, vcc_lo, v10, v0, s0
+; GFX10-NEXT:    v_mul_lo_u32 v8, s16, v2
 ; GFX10-NEXT:    v_mul_hi_u32 v4, s17, v4
 ; GFX10-NEXT:    v_mul_lo_u32 v9, s17, v2
 ; GFX10-NEXT:    v_mul_lo_u32 v6, s19, v1
@@ -2063,8 +2063,8 @@ define amdgpu_kernel void @udivrem_v2i8(ptr addrspace(1) %out0, ptr addrspace(1)
 ; GFX10-NEXT:    v_add_nc_u32_e32 v5, 1, v1
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, s3, v2
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v3, s0, v3
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v6, s2, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v2
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v6, s2, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s1, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v7, s1, v3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
@@ -2377,8 +2377,8 @@ define amdgpu_kernel void @udivrem_v2i16(ptr addrspace(1) %out0, ptr addrspace(1
 ; GFX10-NEXT:    v_add_nc_u32_e32 v6, 1, v1
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, s3, v2
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v3, s0, v3
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s2, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v2
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v5, s2, v2
 ; GFX10-NEXT:    v_cmp_le_u32_e64 s0, s1, v3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v4, s1, v3
@@ -2508,8 +2508,8 @@ define amdgpu_kernel void @udivrem_i3(ptr addrspace(1) %out0, ptr addrspace(1) %
 ; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v0
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v1, s0, v1
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v3, s4, v1
 ; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v1
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v3, s4, v1
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
 ; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v0
@@ -2629,8 +2629,8 @@ define amdgpu_kernel void @udivrem_i27(ptr addrspace(1) %out0, ptr addrspace(1)
 ; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v0
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v1, s0, v1
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-NEXT:    v_subrev_nc_u32_e32 v3, s4, v1
 ; GFX10-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v1
+; GFX10-NEXT:    v_subrev_nc_u32_e32 v3, s4, v1
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
 ; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
index 51d5253f87920..f6a228614a27e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
@@ -2352,39 +2352,39 @@ define <2 x i64> @v_urem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
 ; GISEL-NEXT:    v_mul_lo_u32 v18, v10, v7
 ; GISEL-NEXT:    v_mul_hi_u32 v19, v9, v7
 ; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v15, v6
-; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v18, v13
-; GISEL-NEXT:    v_mul_lo_u32 v15, v11, v17
-; GISEL-NEXT:    v_mul_hi_u32 v18, v7, v17
-; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v13, v19
-; GISEL-NEXT:    v_mul_lo_u32 v19, v7, v13
-; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v15, v19
-; GISEL-NEXT:    v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v15, v18
 ; GISEL-NEXT:    v_mul_lo_u32 v15, v8, v14
+; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v18, v13
 ; GISEL-NEXT:    v_mul_hi_u32 v18, v12, v14
 ; GISEL-NEXT:    v_mul_hi_u32 v14, v8, v14
-; GISEL-NEXT:    v_mul_hi_u32 v17, v11, v17
-; GISEL-NEXT:    v_add_i32_e64 v16, s[4:5], v6, v16
+; GISEL-NEXT:    v_add_i32_e32 v16, vcc, v6, v16
 ; GISEL-NEXT:    v_mul_lo_u32 v6, v12, v16
-; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v15, v6
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, s[4:5]
+; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v15, v6
+; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v18
+; GISEL-NEXT:    v_mul_lo_u32 v6, v11, v17
+; GISEL-NEXT:    v_mul_hi_u32 v18, v7, v17
+; GISEL-NEXT:    v_mul_hi_u32 v17, v11, v17
+; GISEL-NEXT:    v_add_i32_e64 v13, s[4:5], v13, v19
+; GISEL-NEXT:    v_mul_lo_u32 v19, v7, v13
+; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v19
+; GISEL-NEXT:    v_cndmask_b32_e64 v19, 0, 1, s[4:5]
 ; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v18
 ; GISEL-NEXT:    v_mul_lo_u32 v6, v8, v16
-; GISEL-NEXT:    v_cndmask_b32_e64 v18, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v15, s[4:5], v15, v18
+; GISEL-NEXT:    v_mul_lo_u32 v18, v11, v13
+; GISEL-NEXT:    v_add_i32_e64 v17, s[6:7], v18, v17
+; GISEL-NEXT:    v_cndmask_b32_e64 v18, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v15, v18
 ; GISEL-NEXT:    v_mul_hi_u32 v18, v12, v16
-; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v14
-; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v18, s[4:5], v6, v18
-; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v14, s[4:5], v14, v6
+; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v14
+; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v18, vcc, v6, v18
 ; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; GISEL-NEXT:    v_add_i32_e32 v14, vcc, v14, v6
+; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[4:5]
 ; GISEL-NEXT:    v_add_i32_e32 v19, vcc, v19, v6
-; GISEL-NEXT:    v_mul_lo_u32 v6, v11, v13
-; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v17
-; GISEL-NEXT:    v_mul_hi_u32 v17, v7, v13
-; GISEL-NEXT:    v_cndmask_b32_e64 v20, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v17, vcc, v6, v17
+; GISEL-NEXT:    v_mul_hi_u32 v6, v7, v13
+; GISEL-NEXT:    v_cndmask_b32_e64 v20, 0, 1, s[6:7]
+; GISEL-NEXT:    v_add_i32_e32 v17, vcc, v17, v6
 ; GISEL-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
 ; GISEL-NEXT:    v_add_i32_e32 v20, vcc, v20, v6
 ; GISEL-NEXT:    v_and_b32_e32 v6, 0xffffff, v0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 8788dc2c059d6..8426419c9352c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -5741,59 +5741,60 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:112
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:96
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:92
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:104
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:96
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:92
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:88
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:84
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:88
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:84
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:80
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:76
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:76
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:72
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:68
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:388
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:64
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:388
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:60
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:56
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:52
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:48
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:44
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:40
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:36
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:32
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:28
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
@@ -5806,9 +5807,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v11
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 8, v13
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v15
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v15
 ; GCN-NEXT:    v_lshlrev_b32_e32 v54, 8, v17
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v19
@@ -5822,6 +5821,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v27
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v43, 8, v29
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v14
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20
@@ -5839,38 +5839,37 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:116
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v12
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v10
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v12
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v45, 8, v5
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 8, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v22
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v20
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v24
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v22
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v18
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v20
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v18
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v14
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v16
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v10
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v8
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v6
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:104
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v4
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v4
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:100
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:100
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:108
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -5883,17 +5882,17 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:128
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:136
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:132
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:140
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:152
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:148
@@ -5917,11 +5916,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:168
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:164
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:164
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:172
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
@@ -5931,7 +5930,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v51, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
@@ -5944,11 +5943,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:200
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:196
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:196
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:204
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
@@ -5971,11 +5970,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:228
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:236
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
@@ -5983,7 +5982,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:248
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:244
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v62, 8, v1
@@ -5998,11 +5997,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:264
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:260
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:260
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:268
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
@@ -6025,14 +6024,14 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:296
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:292
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:300
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:312
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:308
@@ -6052,11 +6051,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:328
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:324
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:324
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:332
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
@@ -6067,7 +6066,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
@@ -6091,18 +6090,20 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:376
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 8, v1
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:372
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:364
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 8, v4
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v3
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
 ; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
@@ -6135,7 +6136,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v3
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    v_or_b32_e32 v2, v2, v38
@@ -6156,9 +6157,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v4
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v4, v5, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v55, v4
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xffff, v3
 ; GCN-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
@@ -6208,7 +6207,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v8, v9, v8
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xffff, v7
@@ -6228,11 +6227,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v9
 ; GCN-NEXT:    v_or_b32_e32 v9, v9, v53
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v10
 ; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v10, v11, v10
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xffff, v9
@@ -6266,7 +6265,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v12, v40
+; GCN-NEXT:    v_or_b32_e32 v12, v12, v35
 ; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
@@ -6289,7 +6288,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v14, v15, v14
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xffff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v13, v14
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GCN-NEXT:    v_or_b32_e32 v14, v14, v50
@@ -6308,7 +6307,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v15, v15, v16
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v16
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
@@ -6323,7 +6322,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xff, v17
 ; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
@@ -6342,7 +6341,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v18
 ; GCN-NEXT:    v_lshlrev_b32_e32 v18, 16, v18
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v18, v19, v18
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xffff, v17
@@ -6363,11 +6362,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v21, v58
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v22, v22, v62
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v23, 0xff, v23
 ; GCN-NEXT:    v_or_b32_e32 v23, v23, v59
@@ -6390,7 +6389,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
-; GCN-NEXT:    v_or_b32_e32 v28, v28, v35
+; GCN-NEXT:    v_or_b32_e32 v28, v28, v40
 ; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xff, v29
@@ -6457,7 +6456,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v35, v36, v35
-; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
@@ -6482,19 +6481,21 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v39
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 16, v39
-; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v39, v48, v39
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v48
 ; GCN-NEXT:    v_lshlrev_b32_e32 v48, 16, v48
-; GCN-NEXT:    v_or_b32_e32 v48, v55, v48
+; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v48, v49, v48
 ; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v49
 ; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v49
-; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v54, v49
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xffff, v18
@@ -6661,8 +6662,6 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr47
-; GCN-NEXT:    ; implicit-def: $vgpr32
-; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr54
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
@@ -6687,7 +6686,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr41
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
-; GCN-NEXT:    ; implicit-def: $vgpr40
+; GCN-NEXT:    ; implicit-def: $vgpr35
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr63
@@ -6738,7 +6737,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr52
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
-; GCN-NEXT:    ; implicit-def: $vgpr35
+; GCN-NEXT:    ; implicit-def: $vgpr40
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
 ; GCN-NEXT:    ; implicit-def: $vgpr33
@@ -6746,6 +6745,8 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; kill: killed $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr44
 ; GCN-NEXT:    ; implicit-def: $vgpr36
+; GCN-NEXT:    ; kill: killed $vgpr36
+; GCN-NEXT:    ; implicit-def: $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr55
 ; GCN-NEXT:    ; kill: killed $vgpr55
 ; GCN-NEXT:    ; implicit-def: $vgpr55
@@ -6766,7 +6767,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v56, v1
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
@@ -6818,13 +6819,13 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v12, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v40, v12
+; GCN-NEXT:    v_or_b32_e32 v12, v35, v12
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v63, v13
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v14, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
@@ -6870,12 +6871,12 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v58, v21
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v25, v62, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
@@ -6884,7 +6885,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v37, v32, v22
+; GCN-NEXT:    v_or_b32_e32 v35, v32, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -6904,7 +6905,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v56, v35, v22
+; GCN-NEXT:    v_or_b32_e32 v56, v40, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -6947,9 +6948,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v63, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v63, v55, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -6957,7 +6956,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v36, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v37, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -6979,7 +6978,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v0, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
@@ -6990,12 +6989,12 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v51, v23, v22
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v52, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -7038,7 +7037,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v27, v26
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v27, vcc, 3, v27
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
@@ -7046,7 +7045,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v27, v28, v27
-; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v28, vcc, 3, v28
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
@@ -7059,7 +7058,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v30
 ; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
 ; GCN-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v30, v31, v30
 ; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
@@ -7091,17 +7090,17 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v34
 ; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
 ; GCN-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v34, v35, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v34, v36, v34
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v35
-; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v35
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
+; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v36
+; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
+; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v35, v48, v35
+; GCN-NEXT:    v_or_b32_e32 v36, v48, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v48, vcc, 3, v48
@@ -7115,7 +7114,6 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v53, vcc, 3, v53
 ; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v53
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
-; GCN-NEXT:    v_mov_b32_e32 v0, v55
 ; GCN-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v53, v55, v53
@@ -7127,7 +7125,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v55, v40, v55
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v40, vcc, 3, v40
 ; GCN-NEXT:    v_and_b32_e32 v40, 0xff, v40
@@ -7156,21 +7154,23 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v44, vcc, 3, v44
 ; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v44
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v44
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v44, v46, v44
+; GCN-NEXT:    v_or_b32_e32 v44, v0, v44
 ; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v46, vcc, 3, v46
 ; GCN-NEXT:    v_and_b32_e32 v46, 0xff, v46
 ; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v46
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v46, v0, v46
 ; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v47, vcc, 3, v47
 ; GCN-NEXT:    v_and_b32_e32 v47, 0xff, v47
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v47
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v47, v0, v47
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
@@ -7209,7 +7209,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, s7, v21
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, s7, v25
 ; GCN-NEXT:    v_add_i32_e32 v29, vcc, s7, v29
-; GCN-NEXT:    v_add_i32_e32 v37, vcc, s7, v37
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, s7, v35
 ; GCN-NEXT:    v_add_i32_e32 v50, vcc, s7, v50
 ; GCN-NEXT:    v_add_i32_e32 v41, vcc, s7, v41
 ; GCN-NEXT:    v_add_i32_e32 v45, vcc, s7, v45
@@ -7237,7 +7237,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xffff, v21
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xffff, v25
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xffff, v29
-; GCN-NEXT:    v_and_b32_e32 v37, 0xffff, v37
+; GCN-NEXT:    v_and_b32_e32 v35, 0xffff, v35
 ; GCN-NEXT:    v_and_b32_e32 v50, 0xffff, v50
 ; GCN-NEXT:    v_and_b32_e32 v41, 0xffff, v41
 ; GCN-NEXT:    v_and_b32_e32 v45, 0xffff, v45
@@ -7245,7 +7245,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xffff, v58
 ; GCN-NEXT:    v_and_b32_e32 v59, 0xffff, v59
 ; GCN-NEXT:    v_and_b32_e32 v57, 0xffff, v57
-; GCN-NEXT:    v_or_b32_e32 v4, v36, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v37, v4
 ; GCN-NEXT:    v_or_b32_e32 v5, v38, v5
 ; GCN-NEXT:    v_or_b32_e32 v6, v39, v6
 ; GCN-NEXT:    v_or_b32_e32 v7, v49, v7
@@ -7263,9 +7263,9 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v19, v32, v19
 ; GCN-NEXT:    v_or_b32_e32 v20, v33, v20
 ; GCN-NEXT:    v_or_b32_e32 v21, v34, v21
-; GCN-NEXT:    v_or_b32_e32 v22, v35, v25
+; GCN-NEXT:    v_or_b32_e32 v22, v36, v25
 ; GCN-NEXT:    v_or_b32_e32 v23, v48, v29
-; GCN-NEXT:    v_or_b32_e32 v24, v53, v37
+; GCN-NEXT:    v_or_b32_e32 v24, v53, v35
 ; GCN-NEXT:    v_or_b32_e32 v25, v55, v50
 ; GCN-NEXT:    v_or_b32_e32 v26, v40, v41
 ; GCN-NEXT:    v_or_b32_e32 v27, v42, v45
@@ -14967,43 +14967,42 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v37, v18, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v20, 0xffff, v20, v32
 ; GFX11-TRUE16-NEXT:    v_dual_add_f32 v37, 0x40c00000, v38 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v36, 0x400000, v18
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v38, 16, v16
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v48, 0x400000, v37
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v19, 0xffff, v19, v33
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v18, v35, v36, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v38
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v38, v17, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v35, v37, 16, 1
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v39, v36, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v38, v17, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v51, 0x400000, v36
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v35, v37, 0x7fff
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v39, v39, v36, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v17, v38, v49, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v16
-; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v18.l, v18.h
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v17.l, v17.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v36, v39, v51, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v18, 0xffff, v18, v34
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v36.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v35, v35, v48, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v17, 0xffff, v17, v35
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v16, v38, v49, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v16, 0xffff, v36, v16
 ; GFX11-TRUE16-NEXT:  .LBB9_2: ; %end
 ; GFX11-TRUE16-NEXT:    s_or_b32 exec_lo, exec_lo, s0
@@ -15164,15 +15163,15 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v34, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v37, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v33, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v34, 0x40c00000, v34 :: v_dual_add_f32 v35, 0x40c00000, v37
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v38, v34, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v32, v35, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
@@ -15521,17 +15520,16 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v39, 16, v16
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v18, v36, v37, vcc_lo
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v39
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v38, v35, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v38, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v39, v17, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v36, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v49, 0x400000, v36
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v18, v18, v34, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v37, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v38, v39, v17, 0x7fff
@@ -15539,7 +15537,7 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v36, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v38, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v37, v16, 0x7fff
@@ -24129,59 +24127,60 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:112
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:96
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:92
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:104
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:96
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:92
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:88
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:84
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:88
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:84
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:80
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:76
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:76
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:72
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:68
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:388
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:64
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:388
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:60
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:56
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:52
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:48
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:44
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:40
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:36
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:32
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:28
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
@@ -24194,9 +24193,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v11
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 8, v13
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v15
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v15
 ; GCN-NEXT:    v_lshlrev_b32_e32 v54, 8, v17
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v19
@@ -24210,6 +24207,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v27
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v43, 8, v29
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v14
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20
@@ -24227,38 +24225,37 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:116
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v12
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v10
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v12
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v45, 8, v5
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 8, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v22
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v20
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v24
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v22
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v18
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v20
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v18
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v14
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v16
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v10
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v8
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v6
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:104
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v4
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v4
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:100
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:100
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:108
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -24271,17 +24268,17 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:128
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:136
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:132
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:140
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:152
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:148
@@ -24305,11 +24302,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:168
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:164
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:164
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:172
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
@@ -24319,7 +24316,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v51, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
@@ -24332,11 +24329,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:200
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:196
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:196
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:204
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
@@ -24359,11 +24356,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:228
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:236
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
@@ -24371,7 +24368,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:248
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:244
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v62, 8, v1
@@ -24386,11 +24383,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:264
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:260
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:260
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:268
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
@@ -24413,14 +24410,14 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:296
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:292
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:300
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:312
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:308
@@ -24440,11 +24437,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:328
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:324
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:324
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:332
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
@@ -24455,7 +24452,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
@@ -24479,18 +24476,20 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:376
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 8, v1
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:372
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:364
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 8, v4
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v3
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
 ; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
@@ -24523,7 +24522,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v3
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    v_or_b32_e32 v2, v2, v38
@@ -24544,9 +24543,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v4
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v4, v5, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v55, v4
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xffff, v3
 ; GCN-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
@@ -24596,7 +24593,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v8, v9, v8
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xffff, v7
@@ -24616,11 +24613,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v9
 ; GCN-NEXT:    v_or_b32_e32 v9, v9, v53
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v10
 ; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v10, v11, v10
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xffff, v9
@@ -24654,7 +24651,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v12, v40
+; GCN-NEXT:    v_or_b32_e32 v12, v12, v35
 ; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
@@ -24677,7 +24674,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v14, v15, v14
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xffff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v13, v14
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GCN-NEXT:    v_or_b32_e32 v14, v14, v50
@@ -24696,7 +24693,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v15, v15, v16
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v16
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
@@ -24711,7 +24708,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xff, v17
 ; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
@@ -24730,7 +24727,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v18
 ; GCN-NEXT:    v_lshlrev_b32_e32 v18, 16, v18
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v18, v19, v18
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xffff, v17
@@ -24751,11 +24748,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v21, v58
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v22, v22, v62
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v23, 0xff, v23
 ; GCN-NEXT:    v_or_b32_e32 v23, v23, v59
@@ -24778,7 +24775,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
-; GCN-NEXT:    v_or_b32_e32 v28, v28, v35
+; GCN-NEXT:    v_or_b32_e32 v28, v28, v40
 ; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xff, v29
@@ -24845,7 +24842,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v35, v36, v35
-; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
@@ -24870,19 +24867,21 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v39
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 16, v39
-; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v39, v48, v39
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v48
 ; GCN-NEXT:    v_lshlrev_b32_e32 v48, 16, v48
-; GCN-NEXT:    v_or_b32_e32 v48, v55, v48
+; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v48, v49, v48
 ; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v49
 ; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v49
-; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v54, v49
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xffff, v18
@@ -25049,8 +25048,6 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr47
-; GCN-NEXT:    ; implicit-def: $vgpr32
-; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr54
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
@@ -25075,7 +25072,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr41
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
-; GCN-NEXT:    ; implicit-def: $vgpr40
+; GCN-NEXT:    ; implicit-def: $vgpr35
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr63
@@ -25126,7 +25123,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr52
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
-; GCN-NEXT:    ; implicit-def: $vgpr35
+; GCN-NEXT:    ; implicit-def: $vgpr40
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
 ; GCN-NEXT:    ; implicit-def: $vgpr33
@@ -25134,6 +25131,8 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; kill: killed $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr44
 ; GCN-NEXT:    ; implicit-def: $vgpr36
+; GCN-NEXT:    ; kill: killed $vgpr36
+; GCN-NEXT:    ; implicit-def: $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr55
 ; GCN-NEXT:    ; kill: killed $vgpr55
 ; GCN-NEXT:    ; implicit-def: $vgpr55
@@ -25154,7 +25153,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v56, v1
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
@@ -25206,13 +25205,13 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v12, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v40, v12
+; GCN-NEXT:    v_or_b32_e32 v12, v35, v12
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v63, v13
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v14, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
@@ -25258,12 +25257,12 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v58, v21
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v25, v62, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
@@ -25272,7 +25271,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v37, v32, v22
+; GCN-NEXT:    v_or_b32_e32 v35, v32, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -25292,7 +25291,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v56, v35, v22
+; GCN-NEXT:    v_or_b32_e32 v56, v40, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -25335,9 +25334,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v63, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v63, v55, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -25345,7 +25342,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v36, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v37, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -25367,7 +25364,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v0, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
@@ -25378,12 +25375,12 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v51, v23, v22
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v52, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -25426,7 +25423,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v27, v26
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v27, vcc, 3, v27
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
@@ -25434,7 +25431,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v27, v28, v27
-; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v28, vcc, 3, v28
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
@@ -25447,7 +25444,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v30
 ; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
 ; GCN-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v30, v31, v30
 ; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
@@ -25479,17 +25476,17 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v34
 ; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
 ; GCN-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v34, v35, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v34, v36, v34
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v35
-; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v35
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
+; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v36
+; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
+; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v35, v48, v35
+; GCN-NEXT:    v_or_b32_e32 v36, v48, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v48, vcc, 3, v48
@@ -25503,7 +25500,6 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v53, vcc, 3, v53
 ; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v53
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
-; GCN-NEXT:    v_mov_b32_e32 v0, v55
 ; GCN-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v53, v55, v53
@@ -25515,7 +25511,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v55, v40, v55
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v40, vcc, 3, v40
 ; GCN-NEXT:    v_and_b32_e32 v40, 0xff, v40
@@ -25544,21 +25540,23 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v44, vcc, 3, v44
 ; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v44
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v44
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v44, v46, v44
+; GCN-NEXT:    v_or_b32_e32 v44, v0, v44
 ; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v46, vcc, 3, v46
 ; GCN-NEXT:    v_and_b32_e32 v46, 0xff, v46
 ; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v46
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v46, v0, v46
 ; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v47, vcc, 3, v47
 ; GCN-NEXT:    v_and_b32_e32 v47, 0xff, v47
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v47
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v47, v0, v47
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
@@ -25597,7 +25595,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, s7, v21
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, s7, v25
 ; GCN-NEXT:    v_add_i32_e32 v29, vcc, s7, v29
-; GCN-NEXT:    v_add_i32_e32 v37, vcc, s7, v37
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, s7, v35
 ; GCN-NEXT:    v_add_i32_e32 v50, vcc, s7, v50
 ; GCN-NEXT:    v_add_i32_e32 v41, vcc, s7, v41
 ; GCN-NEXT:    v_add_i32_e32 v45, vcc, s7, v45
@@ -25625,7 +25623,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xffff, v21
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xffff, v25
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xffff, v29
-; GCN-NEXT:    v_and_b32_e32 v37, 0xffff, v37
+; GCN-NEXT:    v_and_b32_e32 v35, 0xffff, v35
 ; GCN-NEXT:    v_and_b32_e32 v50, 0xffff, v50
 ; GCN-NEXT:    v_and_b32_e32 v41, 0xffff, v41
 ; GCN-NEXT:    v_and_b32_e32 v45, 0xffff, v45
@@ -25633,7 +25631,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xffff, v58
 ; GCN-NEXT:    v_and_b32_e32 v59, 0xffff, v59
 ; GCN-NEXT:    v_and_b32_e32 v57, 0xffff, v57
-; GCN-NEXT:    v_or_b32_e32 v4, v36, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v37, v4
 ; GCN-NEXT:    v_or_b32_e32 v5, v38, v5
 ; GCN-NEXT:    v_or_b32_e32 v6, v39, v6
 ; GCN-NEXT:    v_or_b32_e32 v7, v49, v7
@@ -25651,9 +25649,9 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v19, v32, v19
 ; GCN-NEXT:    v_or_b32_e32 v20, v33, v20
 ; GCN-NEXT:    v_or_b32_e32 v21, v34, v21
-; GCN-NEXT:    v_or_b32_e32 v22, v35, v25
+; GCN-NEXT:    v_or_b32_e32 v22, v36, v25
 ; GCN-NEXT:    v_or_b32_e32 v23, v48, v29
-; GCN-NEXT:    v_or_b32_e32 v24, v53, v37
+; GCN-NEXT:    v_or_b32_e32 v24, v53, v35
 ; GCN-NEXT:    v_or_b32_e32 v25, v55, v50
 ; GCN-NEXT:    v_or_b32_e32 v26, v40, v41
 ; GCN-NEXT:    v_or_b32_e32 v27, v42, v45
@@ -33339,43 +33337,42 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v37, v18, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v20, 0xffff, v20, v32
 ; GFX11-TRUE16-NEXT:    v_dual_add_f32 v37, 0x40c00000, v38 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v36, 0x400000, v18
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v38, 16, v16
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v48, 0x400000, v37
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v19, 0xffff, v19, v33
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v18, v35, v36, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v38
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v38, v17, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v35, v37, 16, 1
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v39, v36, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v38, v17, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v51, 0x400000, v36
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v35, v37, 0x7fff
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v39, v39, v36, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v17, v38, v49, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v16
-; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v18.l, v18.h
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v17.l, v17.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v36, v39, v51, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v18, 0xffff, v18, v34
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v36.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v35, v35, v48, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v17, 0xffff, v17, v35
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v16, v38, v49, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v16, 0xffff, v36, v16
 ; GFX11-TRUE16-NEXT:  .LBB21_2: ; %end
 ; GFX11-TRUE16-NEXT:    s_or_b32 exec_lo, exec_lo, s0
@@ -33536,15 +33533,15 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v34, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v37, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v33, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v34, 0x40c00000, v34 :: v_dual_add_f32 v35, 0x40c00000, v37
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v38, v34, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v32, v35, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
@@ -33893,17 +33890,16 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v39, 16, v16
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v18, v36, v37, vcc_lo
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v39
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v38, v35, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v38, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v39, v17, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v36, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v49, 0x400000, v36
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v18, v18, v34, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v37, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v38, v39, v17, 0x7fff
@@ -33911,7 +33907,7 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v36, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v38, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v37, v16, 0x7fff
@@ -42111,59 +42107,60 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:112
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:96
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:92
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:104
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:96
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:92
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:88
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:84
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:88
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:84
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:80
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:76
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:76
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:72
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:68
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:388
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:64
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:388
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:60
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:56
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:52
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:48
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:44
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:40
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:36
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:32
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:28
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
@@ -42176,9 +42173,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v11
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 8, v13
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v15
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v15
 ; GCN-NEXT:    v_lshlrev_b32_e32 v54, 8, v17
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v19
@@ -42192,6 +42187,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v27
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v43, 8, v29
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v14
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20
@@ -42209,38 +42205,37 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:116
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v12
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v10
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v12
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v45, 8, v5
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 8, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v22
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v20
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v24
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v22
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v18
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v20
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v18
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v14
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v16
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v10
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v8
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v6
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:104
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v4
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v4
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:100
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:100
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:108
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -42253,17 +42248,17 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:128
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:136
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:132
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:140
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:152
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:148
@@ -42287,11 +42282,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:168
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:164
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:164
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:172
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
@@ -42301,7 +42296,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v51, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
@@ -42314,11 +42309,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:200
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:196
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:196
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:204
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
@@ -42341,11 +42336,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:228
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:236
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
@@ -42353,7 +42348,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:248
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:244
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v62, 8, v1
@@ -42368,11 +42363,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:264
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:260
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:260
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:268
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
@@ -42395,14 +42390,14 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:296
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:292
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:300
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:312
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:308
@@ -42422,11 +42417,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:328
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:324
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:324
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:332
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
@@ -42437,7 +42432,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
@@ -42461,18 +42456,20 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:376
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 8, v1
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:372
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:364
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 8, v4
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v3
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
 ; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
@@ -42505,7 +42502,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v3
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    v_or_b32_e32 v2, v2, v38
@@ -42526,9 +42523,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v4
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v4, v5, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v55, v4
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xffff, v3
 ; GCN-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
@@ -42578,7 +42573,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v8, v9, v8
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xffff, v7
@@ -42598,11 +42593,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v9
 ; GCN-NEXT:    v_or_b32_e32 v9, v9, v53
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v10
 ; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v10, v11, v10
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xffff, v9
@@ -42636,7 +42631,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v12, v40
+; GCN-NEXT:    v_or_b32_e32 v12, v12, v35
 ; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
@@ -42659,7 +42654,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v14, v15, v14
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xffff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v13, v14
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GCN-NEXT:    v_or_b32_e32 v14, v14, v50
@@ -42678,7 +42673,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v15, v15, v16
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v16
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
@@ -42693,7 +42688,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xff, v17
 ; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
@@ -42712,7 +42707,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v18
 ; GCN-NEXT:    v_lshlrev_b32_e32 v18, 16, v18
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v18, v19, v18
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xffff, v17
@@ -42733,11 +42728,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v21, v58
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v22, v22, v62
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v23, 0xff, v23
 ; GCN-NEXT:    v_or_b32_e32 v23, v23, v59
@@ -42760,7 +42755,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
-; GCN-NEXT:    v_or_b32_e32 v28, v28, v35
+; GCN-NEXT:    v_or_b32_e32 v28, v28, v40
 ; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xff, v29
@@ -42827,7 +42822,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v35, v36, v35
-; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
@@ -42852,19 +42847,21 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v39
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 16, v39
-; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v39, v48, v39
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v48
 ; GCN-NEXT:    v_lshlrev_b32_e32 v48, 16, v48
-; GCN-NEXT:    v_or_b32_e32 v48, v55, v48
+; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v48, v49, v48
 ; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v49
 ; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v49
-; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v54, v49
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xffff, v18
@@ -43031,8 +43028,6 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr47
-; GCN-NEXT:    ; implicit-def: $vgpr32
-; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr54
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
@@ -43057,7 +43052,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr41
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
-; GCN-NEXT:    ; implicit-def: $vgpr40
+; GCN-NEXT:    ; implicit-def: $vgpr35
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr63
@@ -43108,7 +43103,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr52
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
-; GCN-NEXT:    ; implicit-def: $vgpr35
+; GCN-NEXT:    ; implicit-def: $vgpr40
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
 ; GCN-NEXT:    ; implicit-def: $vgpr33
@@ -43116,6 +43111,8 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; kill: killed $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr44
 ; GCN-NEXT:    ; implicit-def: $vgpr36
+; GCN-NEXT:    ; kill: killed $vgpr36
+; GCN-NEXT:    ; implicit-def: $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr55
 ; GCN-NEXT:    ; kill: killed $vgpr55
 ; GCN-NEXT:    ; implicit-def: $vgpr55
@@ -43136,7 +43133,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v56, v1
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
@@ -43188,13 +43185,13 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v12, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v40, v12
+; GCN-NEXT:    v_or_b32_e32 v12, v35, v12
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v63, v13
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v14, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
@@ -43240,12 +43237,12 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v58, v21
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v25, v62, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
@@ -43254,7 +43251,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v37, v32, v22
+; GCN-NEXT:    v_or_b32_e32 v35, v32, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -43274,7 +43271,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v56, v35, v22
+; GCN-NEXT:    v_or_b32_e32 v56, v40, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -43317,9 +43314,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v63, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v63, v55, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -43327,7 +43322,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v36, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v37, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -43349,7 +43344,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v0, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
@@ -43360,12 +43355,12 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v51, v23, v22
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v52, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -43408,7 +43403,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v27, v26
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v27, vcc, 3, v27
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
@@ -43416,7 +43411,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v27, v28, v27
-; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v28, vcc, 3, v28
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
@@ -43429,7 +43424,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v30
 ; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
 ; GCN-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v30, v31, v30
 ; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
@@ -43461,17 +43456,17 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v34
 ; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
 ; GCN-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v34, v35, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v34, v36, v34
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v35
-; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v35
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
+; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v36
+; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
+; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v35, v48, v35
+; GCN-NEXT:    v_or_b32_e32 v36, v48, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v48, vcc, 3, v48
@@ -43485,7 +43480,6 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v53, vcc, 3, v53
 ; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v53
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
-; GCN-NEXT:    v_mov_b32_e32 v0, v55
 ; GCN-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v53, v55, v53
@@ -43497,7 +43491,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v55, v40, v55
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v40, vcc, 3, v40
 ; GCN-NEXT:    v_and_b32_e32 v40, 0xff, v40
@@ -43526,21 +43520,23 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v44, vcc, 3, v44
 ; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v44
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v44
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v44, v46, v44
+; GCN-NEXT:    v_or_b32_e32 v44, v0, v44
 ; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v46, vcc, 3, v46
 ; GCN-NEXT:    v_and_b32_e32 v46, 0xff, v46
 ; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v46
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v46, v0, v46
 ; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v47, vcc, 3, v47
 ; GCN-NEXT:    v_and_b32_e32 v47, 0xff, v47
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v47
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v47, v0, v47
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
@@ -43579,7 +43575,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, s7, v21
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, s7, v25
 ; GCN-NEXT:    v_add_i32_e32 v29, vcc, s7, v29
-; GCN-NEXT:    v_add_i32_e32 v37, vcc, s7, v37
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, s7, v35
 ; GCN-NEXT:    v_add_i32_e32 v50, vcc, s7, v50
 ; GCN-NEXT:    v_add_i32_e32 v41, vcc, s7, v41
 ; GCN-NEXT:    v_add_i32_e32 v45, vcc, s7, v45
@@ -43607,7 +43603,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xffff, v21
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xffff, v25
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xffff, v29
-; GCN-NEXT:    v_and_b32_e32 v37, 0xffff, v37
+; GCN-NEXT:    v_and_b32_e32 v35, 0xffff, v35
 ; GCN-NEXT:    v_and_b32_e32 v50, 0xffff, v50
 ; GCN-NEXT:    v_and_b32_e32 v41, 0xffff, v41
 ; GCN-NEXT:    v_and_b32_e32 v45, 0xffff, v45
@@ -43615,7 +43611,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xffff, v58
 ; GCN-NEXT:    v_and_b32_e32 v59, 0xffff, v59
 ; GCN-NEXT:    v_and_b32_e32 v57, 0xffff, v57
-; GCN-NEXT:    v_or_b32_e32 v4, v36, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v37, v4
 ; GCN-NEXT:    v_or_b32_e32 v5, v38, v5
 ; GCN-NEXT:    v_or_b32_e32 v6, v39, v6
 ; GCN-NEXT:    v_or_b32_e32 v7, v49, v7
@@ -43633,9 +43629,9 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v19, v32, v19
 ; GCN-NEXT:    v_or_b32_e32 v20, v33, v20
 ; GCN-NEXT:    v_or_b32_e32 v21, v34, v21
-; GCN-NEXT:    v_or_b32_e32 v22, v35, v25
+; GCN-NEXT:    v_or_b32_e32 v22, v36, v25
 ; GCN-NEXT:    v_or_b32_e32 v23, v48, v29
-; GCN-NEXT:    v_or_b32_e32 v24, v53, v37
+; GCN-NEXT:    v_or_b32_e32 v24, v53, v35
 ; GCN-NEXT:    v_or_b32_e32 v25, v55, v50
 ; GCN-NEXT:    v_or_b32_e32 v26, v40, v41
 ; GCN-NEXT:    v_or_b32_e32 v27, v42, v45
@@ -51345,43 +51341,42 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v37, v18, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v20, 0xffff, v20, v32
 ; GFX11-TRUE16-NEXT:    v_dual_add_f32 v37, 0x40c00000, v38 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v36, 0x400000, v18
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v38, 16, v16
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v48, 0x400000, v37
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v19, 0xffff, v19, v33
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v18, v35, v36, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v38
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v38, v17, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v35, v37, 16, 1
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v39, v36, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v38, v17, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v51, 0x400000, v36
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v35, v37, 0x7fff
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v39, v39, v36, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v17, v38, v49, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v16
-; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v18.l, v18.h
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v17.l, v17.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v36, v39, v51, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v18, 0xffff, v18, v34
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v36.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v35, v35, v48, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v17, 0xffff, v17, v35
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v16, v38, v49, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v16, 0xffff, v36, v16
 ; GFX11-TRUE16-NEXT:  .LBB31_2: ; %end
 ; GFX11-TRUE16-NEXT:    s_or_b32 exec_lo, exec_lo, s0
@@ -51542,15 +51537,15 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v34, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v37, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v33, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v34, 0x40c00000, v34 :: v_dual_add_f32 v35, 0x40c00000, v37
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v38, v34, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v32, v35, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
@@ -51899,17 +51894,16 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v39, 16, v16
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v18, v36, v37, vcc_lo
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v39
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v38, v35, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v38, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v39, v17, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v36, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v49, 0x400000, v36
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v18, v18, v34, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v37, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v38, v39, v17, 0x7fff
@@ -51917,7 +51911,7 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v36, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v38, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v37, v16, 0x7fff
@@ -59732,59 +59726,60 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:112
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:96
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:92
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:104
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:96
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:92
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:88
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:84
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:88
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:84
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:80
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:76
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:76
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:72
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:68
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:388
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:64
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:388
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:60
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:56
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:52
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:48
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:44
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:40
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:36
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:32
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:28
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
@@ -59797,9 +59792,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v11
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 8, v13
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v15
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v15
 ; GCN-NEXT:    v_lshlrev_b32_e32 v54, 8, v17
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v19
@@ -59813,6 +59806,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v27
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v43, 8, v29
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v14
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20
@@ -59830,38 +59824,37 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:116
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v12
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v10
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v12
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v45, 8, v5
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 8, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v22
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v20
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v24
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v42, 8, v22
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v18
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v20
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v18
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v14
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v16
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v10
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v8
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v6
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:104
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v4
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v4
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:100
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:100
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v50, 8, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:108
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -59874,17 +59867,17 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:128
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:136
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:132
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:144
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:140
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:152
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:148
@@ -59908,11 +59901,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:168
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:164
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:164
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:176
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:172
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
@@ -59922,7 +59915,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v51, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
@@ -59935,11 +59928,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:200
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:196
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:196
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:204
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
@@ -59962,11 +59955,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:228
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:236
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
@@ -59974,7 +59967,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:248
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:244
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v62, 8, v1
@@ -59989,11 +59982,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:264
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:260
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:260
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:268
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
@@ -60016,14 +60009,14 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:296
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:292
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:300
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:312
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:308
@@ -60043,11 +60036,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:328
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:324
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:324
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:332
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
@@ -60058,7 +60051,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 8, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 8, v1
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
@@ -60082,18 +60075,20 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:376
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 8, v1
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:372
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:364
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 24, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 8, v4
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 24, v3
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
 ; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
@@ -60126,7 +60121,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v3
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    v_or_b32_e32 v2, v2, v38
@@ -60147,9 +60142,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v4
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v4, v5, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v55, v4
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xffff, v3
 ; GCN-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
@@ -60199,7 +60192,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v8, v9, v8
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xffff, v7
@@ -60219,11 +60212,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v9
 ; GCN-NEXT:    v_or_b32_e32 v9, v9, v53
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v10
 ; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v10, v11, v10
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xffff, v9
@@ -60257,7 +60250,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v12, v40
+; GCN-NEXT:    v_or_b32_e32 v12, v12, v35
 ; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
@@ -60280,7 +60273,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v14, v15, v14
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xffff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v13, v14
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GCN-NEXT:    v_or_b32_e32 v14, v14, v50
@@ -60299,7 +60292,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v15, v15, v16
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v16
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
@@ -60314,7 +60307,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xff, v17
 ; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
@@ -60333,7 +60326,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v18
 ; GCN-NEXT:    v_lshlrev_b32_e32 v18, 16, v18
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v18, v19, v18
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xffff, v17
@@ -60354,11 +60347,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v21, v58
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v22, v22, v62
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v23, 0xff, v23
 ; GCN-NEXT:    v_or_b32_e32 v23, v23, v59
@@ -60381,7 +60374,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
-; GCN-NEXT:    v_or_b32_e32 v28, v28, v35
+; GCN-NEXT:    v_or_b32_e32 v28, v28, v40
 ; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xff, v29
@@ -60448,7 +60441,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v35, v36, v35
-; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
@@ -60473,19 +60466,21 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v39
 ; GCN-NEXT:    v_lshlrev_b32_e32 v39, 16, v39
-; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v39, v48, v39
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v48
 ; GCN-NEXT:    v_lshlrev_b32_e32 v48, 16, v48
-; GCN-NEXT:    v_or_b32_e32 v48, v55, v48
+; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v48, v49, v48
 ; GCN-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v49
 ; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v49
-; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v54, v49
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xffff, v18
@@ -60652,8 +60647,6 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr47
-; GCN-NEXT:    ; implicit-def: $vgpr32
-; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr54
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
@@ -60678,7 +60671,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr41
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
-; GCN-NEXT:    ; implicit-def: $vgpr40
+; GCN-NEXT:    ; implicit-def: $vgpr35
 ; GCN-NEXT:    ; implicit-def: $vgpr32
 ; GCN-NEXT:    ; kill: killed $vgpr32
 ; GCN-NEXT:    ; implicit-def: $vgpr63
@@ -60729,7 +60722,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr52
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
-; GCN-NEXT:    ; implicit-def: $vgpr35
+; GCN-NEXT:    ; implicit-def: $vgpr40
 ; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; kill: killed $vgpr33
 ; GCN-NEXT:    ; implicit-def: $vgpr33
@@ -60737,6 +60730,8 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; kill: killed $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr44
 ; GCN-NEXT:    ; implicit-def: $vgpr36
+; GCN-NEXT:    ; kill: killed $vgpr36
+; GCN-NEXT:    ; implicit-def: $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr55
 ; GCN-NEXT:    ; kill: killed $vgpr55
 ; GCN-NEXT:    ; implicit-def: $vgpr55
@@ -60757,7 +60752,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v56, v1
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
@@ -60809,13 +60804,13 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v12, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v40, v12
+; GCN-NEXT:    v_or_b32_e32 v12, v35, v12
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
 ; GCN-NEXT:    v_or_b32_e32 v13, v63, v13
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v14, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
@@ -60861,12 +60856,12 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
 ; GCN-NEXT:    v_or_b32_e32 v21, v58, v21
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v25, v62, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
@@ -60875,7 +60870,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v37, v32, v22
+; GCN-NEXT:    v_or_b32_e32 v35, v32, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -60895,7 +60890,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
-; GCN-NEXT:    v_or_b32_e32 v56, v35, v22
+; GCN-NEXT:    v_or_b32_e32 v56, v40, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v0
@@ -60938,9 +60933,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v63, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v63, v55, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -60948,7 +60941,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v36, v23, v22
+; GCN-NEXT:    v_or_b32_e32 v37, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
@@ -60970,7 +60963,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v0, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
@@ -60981,12 +60974,12 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v51, v23, v22
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v52, v23, v22
 ; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -61029,7 +61022,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v27, v26
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v27, vcc, 3, v27
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
@@ -61037,7 +61030,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v27, v28, v27
-; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v28, vcc, 3, v28
 ; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
@@ -61050,7 +61043,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v30
 ; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
 ; GCN-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v30, v31, v30
 ; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
@@ -61082,17 +61075,17 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v34
 ; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
 ; GCN-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v34, v35, v34
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v34, v36, v34
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v35
-; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v35
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
+; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v36
+; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
+; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v35, v48, v35
+; GCN-NEXT:    v_or_b32_e32 v36, v48, v36
 ; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v48, vcc, 3, v48
@@ -61106,7 +61099,6 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v53, vcc, 3, v53
 ; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v53
 ; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
-; GCN-NEXT:    v_mov_b32_e32 v0, v55
 ; GCN-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v53, v55, v53
@@ -61118,7 +61110,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v55, v40, v55
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v40, vcc, 3, v40
 ; GCN-NEXT:    v_and_b32_e32 v40, 0xff, v40
@@ -61147,21 +61139,23 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v44, vcc, 3, v44
 ; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v44
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v44
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v44, v46, v44
+; GCN-NEXT:    v_or_b32_e32 v44, v0, v44
 ; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v46, vcc, 3, v46
 ; GCN-NEXT:    v_and_b32_e32 v46, 0xff, v46
 ; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v46
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v46, v0, v46
 ; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v47, vcc, 3, v47
 ; GCN-NEXT:    v_and_b32_e32 v47, 0xff, v47
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v47
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v47, v0, v47
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
@@ -61200,7 +61194,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, s7, v21
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, s7, v25
 ; GCN-NEXT:    v_add_i32_e32 v29, vcc, s7, v29
-; GCN-NEXT:    v_add_i32_e32 v37, vcc, s7, v37
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, s7, v35
 ; GCN-NEXT:    v_add_i32_e32 v50, vcc, s7, v50
 ; GCN-NEXT:    v_add_i32_e32 v41, vcc, s7, v41
 ; GCN-NEXT:    v_add_i32_e32 v45, vcc, s7, v45
@@ -61228,7 +61222,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xffff, v21
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xffff, v25
 ; GCN-NEXT:    v_and_b32_e32 v29, 0xffff, v29
-; GCN-NEXT:    v_and_b32_e32 v37, 0xffff, v37
+; GCN-NEXT:    v_and_b32_e32 v35, 0xffff, v35
 ; GCN-NEXT:    v_and_b32_e32 v50, 0xffff, v50
 ; GCN-NEXT:    v_and_b32_e32 v41, 0xffff, v41
 ; GCN-NEXT:    v_and_b32_e32 v45, 0xffff, v45
@@ -61236,7 +61230,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xffff, v58
 ; GCN-NEXT:    v_and_b32_e32 v59, 0xffff, v59
 ; GCN-NEXT:    v_and_b32_e32 v57, 0xffff, v57
-; GCN-NEXT:    v_or_b32_e32 v4, v36, v4
+; GCN-NEXT:    v_or_b32_e32 v4, v37, v4
 ; GCN-NEXT:    v_or_b32_e32 v5, v38, v5
 ; GCN-NEXT:    v_or_b32_e32 v6, v39, v6
 ; GCN-NEXT:    v_or_b32_e32 v7, v49, v7
@@ -61254,9 +61248,9 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v19, v32, v19
 ; GCN-NEXT:    v_or_b32_e32 v20, v33, v20
 ; GCN-NEXT:    v_or_b32_e32 v21, v34, v21
-; GCN-NEXT:    v_or_b32_e32 v22, v35, v25
+; GCN-NEXT:    v_or_b32_e32 v22, v36, v25
 ; GCN-NEXT:    v_or_b32_e32 v23, v48, v29
-; GCN-NEXT:    v_or_b32_e32 v24, v53, v37
+; GCN-NEXT:    v_or_b32_e32 v24, v53, v35
 ; GCN-NEXT:    v_or_b32_e32 v25, v55, v50
 ; GCN-NEXT:    v_or_b32_e32 v26, v40, v41
 ; GCN-NEXT:    v_or_b32_e32 v27, v42, v45
@@ -68875,43 +68869,42 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v37, v18, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v20, 0xffff, v20, v32
 ; GFX11-TRUE16-NEXT:    v_dual_add_f32 v37, 0x40c00000, v38 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v36, 0x400000, v18
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v38, 16, v16
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v48, 0x400000, v37
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v19, 0xffff, v19, v33
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v18, v35, v36, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v38
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v38, v17, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v17
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v35, v37, 16, 1
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v39, v36, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v38, v17, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v51, 0x400000, v36
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v35, v37, 0x7fff
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v39, v39, v36, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v17, v38, v49, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v16, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v49, 0x400000, v16
-; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v18.l, v18.h
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v17.l, v17.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v36, v39, v51, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v16, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v18, 0xffff, v18, v34
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v36.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v35, v35, v48, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v17, 0xffff, v17, v35
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v16, v38, v49, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v16, 0xffff, v36, v16
 ; GFX11-TRUE16-NEXT:  .LBB39_2: ; %end
 ; GFX11-TRUE16-NEXT:    s_or_b32 exec_lo, exec_lo, s0
@@ -69072,15 +69065,15 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v34, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v37, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v33, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v34, 0x40c00000, v34 :: v_dual_add_f32 v35, 0x40c00000, v37
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v38, v34, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v32, v35, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
@@ -69429,17 +69422,16 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v39, 16, v16
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v18, v36, v37, vcc_lo
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v39
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v38, v35, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v38, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v39, v17, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v36, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v49, 0x400000, v36
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v18, v18, v34, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v37, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v38, v39, v17, 0x7fff
@@ -69447,7 +69439,7 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v36, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v50, 0x400000, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v38, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v37, v37, v16, 0x7fff
@@ -72798,90 +72790,85 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:100
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:84
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:100
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:96
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:76
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:92
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:88
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:84
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:68
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:80
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:60
-; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:76
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:52
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:64
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:56
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:48
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:36
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:44
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:28
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:24
+; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:32
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:20
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:16
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:24
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:12
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:20
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:16
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:8
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:12
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v4
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v6
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v19, 8, v6
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v8
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
@@ -72897,91 +72884,81 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v20
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v15, 8, v22
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v22
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v24
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v28
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v30
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:4
 ; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:392
 ; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:116
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:112
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:108
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:104
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:124
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v4
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v11
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v10
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v9
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v7
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v15
 ; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v5
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v13
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:92
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v11
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:88
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v10
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v4
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v9
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:96
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v7
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v8
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 24, v18
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v12
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v8
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:156
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:120
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:132
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:120
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:128
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:128
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:156
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:148
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:144
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -72989,59 +72966,56 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:140
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:136
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:188
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:164
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:152
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v2
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:164
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:152
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:160
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:160
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:188
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:180
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:176
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:172
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:172
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:168
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v32, 8, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:220
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:196
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:184
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:196
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:184
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:192
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:192
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:220
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:212
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:208
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -73049,150 +73023,168 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:204
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:200
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v22, 8, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:252
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v30, 8, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:228
 ; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:216
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:224
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:224
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:252
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:244
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:240
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:236
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:232
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v52, 8, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:284
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v46, 8, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:260
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:248
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:256
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:260
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:248
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:256
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:284
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:276
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:272
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:268
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:264
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v5, 8, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:316
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v13, 8, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292
-; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:280
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:288
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:292
+; GCN-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:280
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:288
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:316
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:308
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:304
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:300
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:296
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v13, 8, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:348
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v31, 8, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:324
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:312
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:320
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:324
+; GCN-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:312
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:320
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:348
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:340
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:336
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:332
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:328
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:332
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:328
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v23, 8, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v21, 8, v1
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:356
 ; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:344
-; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:352
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:388
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:384
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:380
-; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:376
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:376
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:372
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:72
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:40
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:8
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:368
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:364
 ; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:360
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v4
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v23, 8, v3
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v29, 8, v3
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 24, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
 ; GCN-NEXT:    ; implicit-def: $vgpr1
 ; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr1
 ; GCN-NEXT:    ; kill: killed $vgpr1
-; GCN-NEXT:    ; implicit-def: $vgpr63
+; GCN-NEXT:    ; implicit-def: $vgpr33
 ; GCN-NEXT:    ; implicit-def: $vgpr1
 ; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr1
@@ -73230,39 +73222,40 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr58
 ; GCN-NEXT:    ; implicit-def: $vgpr1
 ; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr11
 ; GCN-NEXT:    ; implicit-def: $vgpr3
-; GCN-NEXT:    ; implicit-def: $vgpr7
 ; GCN-NEXT:    ; implicit-def: $vgpr57
-; GCN-NEXT:    ; implicit-def: $vgpr32
-; GCN-NEXT:    ; implicit-def: $vgpr33
+; GCN-NEXT:    ; implicit-def: $vgpr7
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr34
-; GCN-NEXT:    ; implicit-def: $vgpr47
+; GCN-NEXT:    ; implicit-def: $vgpr56
+; GCN-NEXT:    ; implicit-def: $vgpr36
 ; GCN-NEXT:    ; implicit-def: $vgpr4
 ; GCN-NEXT:    ; implicit-def: $vgpr6
+; GCN-NEXT:    ; implicit-def: $vgpr47
 ; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr46
 ; GCN-NEXT:    ; implicit-def: $vgpr1
-; GCN-NEXT:    ; implicit-def: $vgpr10
 ; GCN-NEXT:    ; implicit-def: $vgpr9
 ; GCN-NEXT:    ; implicit-def: $vgpr45
-; GCN-NEXT:    ; implicit-def: $vgpr18
+; GCN-NEXT:    ; implicit-def: $vgpr10
 ; GCN-NEXT:    ; implicit-def: $vgpr17
-; GCN-NEXT:    ; implicit-def: $vgpr36
+; GCN-NEXT:    ; implicit-def: $vgpr18
 ; GCN-NEXT:    ; implicit-def: $vgpr44
+; GCN-NEXT:    ; implicit-def: $vgpr37
 ; GCN-NEXT:    ; implicit-def: $vgpr26
 ; GCN-NEXT:    ; implicit-def: $vgpr25
-; GCN-NEXT:    ; implicit-def: $vgpr8
 ; GCN-NEXT:    ; implicit-def: $vgpr43
-; GCN-NEXT:    ; implicit-def: $vgpr37
-; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr8
+; GCN-NEXT:    ; implicit-def: $vgpr38
 ; GCN-NEXT:    ; implicit-def: $vgpr14
 ; GCN-NEXT:    ; implicit-def: $vgpr41
+; GCN-NEXT:    ; implicit-def: $vgpr16
 ; GCN-NEXT:    ; implicit-def: $vgpr20
+; GCN-NEXT:    ; implicit-def: $vgpr22
+; GCN-NEXT:    ; implicit-def: $vgpr40
 ; GCN-NEXT:    ; implicit-def: $vgpr24
 ; GCN-NEXT:    ; implicit-def: $vgpr28
-; GCN-NEXT:    ; implicit-def: $vgpr40
-; GCN-NEXT:    ; implicit-def: $vgpr30
-; GCN-NEXT:    ; implicit-def: $vgpr38
 ; GCN-NEXT:    ; implicit-def: $vgpr39
 ; GCN-NEXT:    ; implicit-def: $vgpr55
 ; GCN-NEXT:    ; implicit-def: $vgpr48
@@ -73275,94 +73268,91 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_cbranch_execz .LBB44_2
 ; GCN-NEXT:  ; %bb.1: ; %cmp.false
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v5
+; GCN-NEXT:    v_or_b32_e32 v5, v1, v19
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mov_b32_e32 v8, v15
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v47, v1, v2
+; GCN-NEXT:    v_or_b32_e32 v15, v1, v2
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v53, v1, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v19, v1, v2
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    v_or_b32_e32 v55, v1, v15
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v53, v1, v2
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v55, v1, v2
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v40, v1, v2
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v41, v1, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v43, v1, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v44, v1, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v45, v1, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v46, v1, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v15, v1, v2
+; GCN-NEXT:    v_or_b32_e32 v32, v1, v32
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v35
-; GCN-NEXT:    v_or_b32_e32 v22, v1, v22
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v16
-; GCN-NEXT:    v_or_b32_e32 v16, v1, v52
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v56
-; GCN-NEXT:    v_or_b32_e32 v5, v1, v5
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v31
+; GCN-NEXT:    v_or_b32_e32 v30, v1, v30
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v12
+; GCN-NEXT:    v_or_b32_e32 v12, v1, v46
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v52
 ; GCN-NEXT:    v_or_b32_e32 v13, v1, v13
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v63
+; GCN-NEXT:    v_or_b32_e32 v31, v1, v31
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v27
-; GCN-NEXT:    v_or_b32_e32 v21, v1, v21
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v29
 ; GCN-NEXT:    v_or_b32_e32 v23, v1, v23
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v21
+; GCN-NEXT:    v_or_b32_e32 v21, v1, v29
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -73371,17 +73361,17 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -73390,7 +73380,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -73399,17 +73389,17 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -73418,176 +73408,176 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    v_or_b32_e32 v3, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v11, v2, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v7, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v3, v2, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v32, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v7, v2, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    v_or_b32_e32 v33, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v34, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v4, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v36, v2, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    v_or_b32_e32 v6, v2, v1
+; GCN-NEXT:    v_or_b32_e32 v4, v2, v1
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v6, v2, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v8, v1
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v9
-; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    v_or_b32_e32 v10, v9, v8
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v8, 24, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v8, v1
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
@@ -73598,691 +73588,694 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v18, v12, v8
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v10, v10, v8
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v12, 24, v12
+; GCN-NEXT:    v_lshlrev_b32_e32 v14, 24, v14
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    v_or_b32_e32 v17, v12, v8
+; GCN-NEXT:    v_or_b32_e32 v17, v14, v8
 ; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v36, v12, v8
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v18, v14, v8
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v26, v12, v8
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v37, v14, v8
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v12, 24, v12
+; GCN-NEXT:    v_lshlrev_b32_e32 v14, 24, v14
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    v_or_b32_e32 v25, v12, v8
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v26, v14, v8
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v8, v12, v8
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v25, v14, v8
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    v_lshlrev_b32_e32 v12, 16, v12
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v37, v14, v12
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v8, v14, v8
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v14, 24, v14
-; GCN-NEXT:    v_lshlrev_b32_e32 v12, 16, v12
-; GCN-NEXT:    v_or_b32_e32 v12, v14, v12
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GCN-NEXT:    v_lshlrev_b32_e32 v16, 24, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v14, 16, v14
+; GCN-NEXT:    v_or_b32_e32 v38, v16, v14
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GCN-NEXT:    v_lshlrev_b32_e32 v14, 16, v14
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v14, v16, v14
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v14, v20, v14
-; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v16, v20, v16
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v20, 0xff, v20
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v22, 24, v22
 ; GCN-NEXT:    v_lshlrev_b32_e32 v20, 16, v20
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v20, v22, v20
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
+; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v20, v24, v20
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v22, v24, v22
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v27, 24, v27
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v24, v27, v24
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
-; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
-; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v28, 24, v28
+; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
 ; GCN-NEXT:    v_or_b32_e32 v28, v28, v27
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
 ; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
-; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v30, v29, v27
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v19, 24, v19
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    v_or_b32_e32 v38, v19, v11
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v39, v19, v11
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v39, v29, v27
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
+; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v48, v19, v11
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v42
-; GCN-NEXT:    v_lshlrev_b32_e32 v19, 24, v54
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    v_or_b32_e32 v49, v19, v11
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v48, v29, v27
+; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v42
+; GCN-NEXT:    v_lshlrev_b32_e32 v29, 24, v54
+; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
+; GCN-NEXT:    v_or_b32_e32 v49, v29, v27
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
+; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v50, v19, v11
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v50, v29, v27
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v51, v19, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 16, v47
-; GCN-NEXT:    v_lshlrev_b32_e32 v62, 16, v53
-; GCN-NEXT:    v_lshlrev_b32_e32 v61, 16, v55
-; GCN-NEXT:    v_lshlrev_b32_e32 v60, 16, v40
-; GCN-NEXT:    v_lshlrev_b32_e32 v59, 16, v41
-; GCN-NEXT:    v_lshlrev_b32_e32 v58, 16, v43
-; GCN-NEXT:    v_lshlrev_b32_e32 v57, 16, v44
-; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v45
-; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v46
-; GCN-NEXT:    v_lshlrev_b32_e32 v45, 16, v15
-; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v22
-; GCN-NEXT:    v_lshlrev_b32_e32 v43, 16, v16
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 16, v5
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 16, v13
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 16, v21
-; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v23
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
+; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
+; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v51, v29, v27
+; GCN-NEXT:    v_lshlrev_b32_e32 v33, 16, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v62, 16, v15
+; GCN-NEXT:    v_lshlrev_b32_e32 v61, 16, v19
+; GCN-NEXT:    v_lshlrev_b32_e32 v60, 16, v53
+; GCN-NEXT:    v_lshlrev_b32_e32 v59, 16, v55
+; GCN-NEXT:    v_lshlrev_b32_e32 v58, 16, v40
+; GCN-NEXT:    v_lshlrev_b32_e32 v57, 16, v41
+; GCN-NEXT:    v_lshlrev_b32_e32 v56, 16, v43
+; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v44
+; GCN-NEXT:    v_lshlrev_b32_e32 v45, 16, v32
+; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v30
+; GCN-NEXT:    v_lshlrev_b32_e32 v43, 16, v12
+; GCN-NEXT:    v_lshlrev_b32_e32 v41, 16, v13
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 16, v31
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 16, v23
+; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v21
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
 ; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
 ; GCN-NEXT:    ; implicit-def: $vgpr35
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr16
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr56
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr31
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr11
-; GCN-NEXT:    ; implicit-def: $vgpr19
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; kill: killed $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr52
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr63
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
 ; GCN-NEXT:    ; implicit-def: $vgpr27
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
 ; GCN-NEXT:    ; implicit-def: $vgpr42
 ; GCN-NEXT:    ; implicit-def: $vgpr54
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr29
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr22
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr52
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
-; GCN-NEXT:    ; kill: killed $vgpr5
-; GCN-NEXT:    ; implicit-def: $vgpr5
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr21
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr32
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr30
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr13
+; GCN-NEXT:    ; kill: killed $vgpr13
+; GCN-NEXT:    ; implicit-def: $vgpr46
 ; GCN-NEXT:    ; implicit-def: $vgpr13
 ; GCN-NEXT:    ; kill: killed $vgpr13
 ; GCN-NEXT:    ; implicit-def: $vgpr13
 ; GCN-NEXT:    ; kill: killed $vgpr13
 ; GCN-NEXT:    ; implicit-def: $vgpr13
-; GCN-NEXT:    ; implicit-def: $vgpr21
-; GCN-NEXT:    ; kill: killed $vgpr21
-; GCN-NEXT:    ; implicit-def: $vgpr21
-; GCN-NEXT:    ; kill: killed $vgpr21
-; GCN-NEXT:    ; implicit-def: $vgpr21
-; GCN-NEXT:    ; implicit-def: $vgpr23
-; GCN-NEXT:    ; kill: killed $vgpr23
-; GCN-NEXT:    ; implicit-def: $vgpr23
-; GCN-NEXT:    ; kill: killed $vgpr23
-; GCN-NEXT:    ; implicit-def: $vgpr23
 ; GCN-NEXT:    ; implicit-def: $vgpr15
 ; GCN-NEXT:    ; kill: killed $vgpr15
 ; GCN-NEXT:    ; implicit-def: $vgpr15
+; GCN-NEXT:    ; kill: killed $vgpr15
+; GCN-NEXT:    ; implicit-def: $vgpr31
+; GCN-NEXT:    ; implicit-def: $vgpr15
+; GCN-NEXT:    ; kill: killed $vgpr15
+; GCN-NEXT:    ; implicit-def: $vgpr15
+; GCN-NEXT:    ; kill: killed $vgpr15
+; GCN-NEXT:    ; implicit-def: $vgpr15
+; GCN-NEXT:    ; implicit-def: $vgpr23
+; GCN-NEXT:    ; implicit-def: $vgpr29
+; GCN-NEXT:    ; kill: killed $vgpr29
+; GCN-NEXT:    ; implicit-def: $vgpr29
+; GCN-NEXT:    ; kill: killed $vgpr29
+; GCN-NEXT:    ; implicit-def: $vgpr29
+; GCN-NEXT:    ; implicit-def: $vgpr19
+; GCN-NEXT:    ; kill: killed $vgpr19
+; GCN-NEXT:    ; implicit-def: $vgpr19
 ; GCN-NEXT:  .LBB44_2: ; %Flow
 ; GCN-NEXT:    s_andn2_saveexec_b64 s[4:5], s[4:5]
 ; GCN-NEXT:    s_cbranch_execz .LBB44_4
 ; GCN-NEXT:  ; %bb.3: ; %cmp.true
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v29
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v21
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    v_or_b32_e32 v1, v23, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v29, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt vmcnt(3)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v42
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 8, v54
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v3, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, 3, v27
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xff, v3
-; GCN-NEXT:    v_or_b32_e32 v3, v21, v3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, 3, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v6, 8, v19
+; GCN-NEXT:    v_or_b32_e32 v3, v23, v3
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, 3, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v4
-; GCN-NEXT:    v_or_b32_e32 v4, v6, v4
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, 3, v31
-; GCN-NEXT:    v_and_b32_e32 v6, 0xff, v6
-; GCN-NEXT:    v_or_b32_e32 v6, v13, v6
+; GCN-NEXT:    v_or_b32_e32 v4, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v63
+; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v31, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v7, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, 3, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v8, 8, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v7, 8, v1
+; GCN-NEXT:    v_and_b32_e32 v6, 0xff, v6
+; GCN-NEXT:    v_or_b32_e32 v6, v7, v6
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 3, v52
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xff, v7
-; GCN-NEXT:    v_or_b32_e32 v7, v8, v7
-; GCN-NEXT:    v_add_i32_e32 v8, vcc, 3, v56
-; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
-; GCN-NEXT:    v_or_b32_e32 v5, v5, v8
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v7, v13, v7
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v9, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v8
 ; GCN-NEXT:    v_or_b32_e32 v8, v9, v8
-; GCN-NEXT:    v_add_i32_e32 v9, vcc, 3, v16
+; GCN-NEXT:    v_add_i32_e32 v9, vcc, 3, v12
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v9
-; GCN-NEXT:    v_or_b32_e32 v9, v52, v9
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v9, v46, v9
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v11, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v10
 ; GCN-NEXT:    v_or_b32_e32 v10, v11, v10
 ; GCN-NEXT:    v_add_i32_e32 v11, vcc, 3, v35
 ; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v11
-; GCN-NEXT:    v_or_b32_e32 v11, v22, v11
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v11, v30, v11
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v12, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v13, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v12
 ; GCN-NEXT:    v_or_b32_e32 v12, v13, v12
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v13, v1, v13
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v13, v32, v13
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v14, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GCN-NEXT:    v_mov_b32_e32 v2, v15
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v15, 8, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v15, 8, v15
 ; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GCN-NEXT:    v_or_b32_e32 v14, v15, v14
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v15, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v15, 0xff, v15
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v15, v1, v15
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v16, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v17, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v16
 ; GCN-NEXT:    v_or_b32_e32 v16, v17, v16
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v17, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v17, 0xff, v17
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v17, v1, v17
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v18, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mov_b32_e32 v2, v19
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v19, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v18
 ; GCN-NEXT:    v_or_b32_e32 v18, v19, v18
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v19, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v19, 0xff, v19
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v19, v1, v19
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v20, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v21, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v20, 0xff, v20
 ; GCN-NEXT:    v_or_b32_e32 v20, v21, v20
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v21, 0xff, v21
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v21, v1, v21
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v23, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v22
 ; GCN-NEXT:    v_or_b32_e32 v22, v23, v22
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v23, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v23, 0xff, v23
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v23, v1, v23
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_or_b32_e32 v28, v25, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v32, v1, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_or_b32_e32 v37, v25, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
-; GCN-NEXT:    v_or_b32_e32 v40, v2, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v40, v1, v24
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_or_b32_e32 v45, v25, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v57, v1, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_or_b32_e32 v58, v25, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v5
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v59, v1, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v59, v2, v24
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 8, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_or_b32_e32 v60, v25, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v61, v1, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v62, v1, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v63, v1, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v39, v2, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v48, v2, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v49, v2, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v51, v2, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v52, v2, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v53, v2, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v55, v2, v24
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
@@ -74290,7 +74283,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v24, v2, v24
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
@@ -74298,10 +74291,10 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xff, v25
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 16, v25
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v25, v2, v25
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v26, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v26, 0xff, v26
@@ -74309,12 +74302,12 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v2, v26
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v27, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
 ; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v27, v2, v27
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
@@ -74325,7 +74318,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v29, v2, v29
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
@@ -74333,7 +74326,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v30, v2, v30
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v31, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v31, 0xff, v31
@@ -74341,7 +74334,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v31, v2, v31
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v33, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v33, 0xff, v33
@@ -74349,52 +74342,52 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v33, v2, v33
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
 ; GCN-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v34, v2, v34
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v35
 ; GCN-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v35, v2, v35
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
 ; GCN-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v36, v2, v36
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v38, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v38, 0xff, v38
 ; GCN-NEXT:    v_lshlrev_b32_e32 v38, 16, v38
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v38, v2, v38
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v50, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v50, 0xff, v50
 ; GCN-NEXT:    v_lshlrev_b32_e32 v50, 16, v50
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v50, v2, v50
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v54, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v54, 0xff, v54
 ; GCN-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v54, v2, v54
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
@@ -74402,7 +74395,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v41, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v41, 0xff, v41
 ; GCN-NEXT:    v_lshlrev_b32_e32 v41, 16, v41
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v41, v2, v41
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
@@ -74410,7 +74403,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v42, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v42, 0xff, v42
 ; GCN-NEXT:    v_lshlrev_b32_e32 v42, 16, v42
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v42, v2, v42
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
@@ -74450,16 +74443,16 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v56, vcc, 3, v2
 ; GCN-NEXT:    v_and_b32_e32 v56, 0xff, v56
 ; GCN-NEXT:    v_lshlrev_b32_e32 v56, 16, v56
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v56, v2, v56
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0x300, v2
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GCN-NEXT:    v_or_b32_e32 v61, v61, v2
 ; GCN-NEXT:    s_movk_i32 s7, 0x300
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, s7, v2
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xffff, v2
@@ -74471,9 +74464,11 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xffff, v4
 ; GCN-NEXT:    v_or_b32_e32 v4, v1, v4
 ; GCN-NEXT:    s_mov_b32 s6, 0x3000000
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, s7, v6
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, s7, v1
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, s7, v6
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, s7, v7
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, s7, v5
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, s7, v8
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, s7, v9
 ; GCN-NEXT:    v_add_i32_e32 v9, vcc, s7, v10
@@ -74500,8 +74495,8 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_add_i32_e32 v58, vcc, s7, v59
 ; GCN-NEXT:    v_add_i32_e32 v59, vcc, s7, v60
 ; GCN-NEXT:    v_and_b32_e32 v1, 0xffff, v1
-; GCN-NEXT:    v_and_b32_e32 v6, 0xffff, v6
 ; GCN-NEXT:    v_and_b32_e32 v5, 0xffff, v5
+; GCN-NEXT:    v_and_b32_e32 v6, 0xffff, v6
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xffff, v7
 ; GCN-NEXT:    v_and_b32_e32 v8, 0xffff, v8
 ; GCN-NEXT:    v_and_b32_e32 v9, 0xffff, v9
@@ -74528,8 +74523,8 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xffff, v58
 ; GCN-NEXT:    v_and_b32_e32 v59, 0xffff, v59
 ; GCN-NEXT:    v_or_b32_e32 v1, v39, v1
-; GCN-NEXT:    v_or_b32_e32 v6, v48, v6
-; GCN-NEXT:    v_or_b32_e32 v5, v49, v5
+; GCN-NEXT:    v_or_b32_e32 v5, v48, v5
+; GCN-NEXT:    v_or_b32_e32 v6, v49, v6
 ; GCN-NEXT:    v_or_b32_e32 v7, v51, v7
 ; GCN-NEXT:    v_or_b32_e32 v8, v52, v8
 ; GCN-NEXT:    v_or_b32_e32 v9, v53, v9
@@ -74555,311 +74550,326 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v29, v46, v57
 ; GCN-NEXT:    v_or_b32_e32 v30, v47, v58
 ; GCN-NEXT:    v_or_b32_e32 v31, v56, v59
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, s6, v61
-; GCN-NEXT:    v_add_i32_e32 v49, vcc, s6, v2
-; GCN-NEXT:    v_add_i32_e32 v50, vcc, s6, v3
-; GCN-NEXT:    v_add_i32_e32 v38, vcc, s6, v4
+; GCN-NEXT:    v_add_i32_e32 v32, vcc, s6, v61
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, s6, v2
+; GCN-NEXT:    v_add_i32_e32 v49, vcc, s6, v3
+; GCN-NEXT:    v_add_i32_e32 v48, vcc, s6, v4
 ; GCN-NEXT:    v_add_i32_e32 v39, vcc, s6, v1
-; GCN-NEXT:    v_add_i32_e32 v48, vcc, s6, v6
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, s6, v5
-; GCN-NEXT:    v_add_i32_e32 v51, vcc, s6, v7
+; GCN-NEXT:    v_add_i32_e32 v50, vcc, s6, v5
+; GCN-NEXT:    v_add_i32_e32 v51, vcc, s6, v6
+; GCN-NEXT:    v_add_i32_e32 v38, vcc, s6, v7
 ; GCN-NEXT:    v_add_i32_e32 v52, vcc, s6, v8
-; GCN-NEXT:    v_add_i32_e32 v37, vcc, s6, v9
-; GCN-NEXT:    v_add_i32_e32 v8, vcc, s6, v10
-; GCN-NEXT:    v_add_i32_e32 v11, vcc, s6, v11
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, s6, v9
+; GCN-NEXT:    v_add_i32_e32 v53, vcc, s6, v10
+; GCN-NEXT:    v_add_i32_e32 v37, vcc, s6, v11
 ; GCN-NEXT:    v_add_i32_e32 v12, vcc, s6, v12
-; GCN-NEXT:    v_add_i32_e32 v10, vcc, s6, v13
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, s6, v13
 ; GCN-NEXT:    v_add_i32_e32 v9, vcc, s6, v14
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, s6, v15
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s6, v16
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, s6, v17
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, s6, v18
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, s6, v19
-; GCN-NEXT:    v_add_i32_e32 v7, vcc, s6, v20
-; GCN-NEXT:    v_add_i32_e32 v13, vcc, s6, v21
-; GCN-NEXT:    v_add_i32_e32 v14, vcc, s6, v22
-; GCN-NEXT:    v_add_i32_e32 v15, vcc, s6, v23
-; GCN-NEXT:    v_add_i32_e32 v16, vcc, s6, v24
-; GCN-NEXT:    v_add_i32_e32 v17, vcc, s6, v25
-; GCN-NEXT:    v_add_i32_e32 v18, vcc, s6, v26
-; GCN-NEXT:    v_add_i32_e32 v19, vcc, s6, v27
-; GCN-NEXT:    v_add_i32_e32 v20, vcc, s6, v28
-; GCN-NEXT:    v_add_i32_e32 v21, vcc, s6, v29
-; GCN-NEXT:    v_add_i32_e32 v22, vcc, s6, v30
-; GCN-NEXT:    v_add_i32_e32 v23, vcc, s6, v31
-; GCN-NEXT:    v_and_b32_e32 v24, 0xffff0000, v23
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v23, 16, v23
-; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v23, 0xffff0000, v22
-; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 16, v22
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s6, v15
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, s6, v16
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, s6, v17
+; GCN-NEXT:    v_add_i32_e32 v10, vcc, s6, v18
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, s6, v19
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s6, v20
+; GCN-NEXT:    v_add_i32_e32 v11, vcc, s6, v21
+; GCN-NEXT:    v_add_i32_e32 v13, vcc, s6, v22
+; GCN-NEXT:    v_add_i32_e32 v14, vcc, s6, v23
+; GCN-NEXT:    v_add_i32_e32 v15, vcc, s6, v24
+; GCN-NEXT:    v_add_i32_e32 v16, vcc, s6, v25
+; GCN-NEXT:    v_add_i32_e32 v17, vcc, s6, v26
+; GCN-NEXT:    v_add_i32_e32 v18, vcc, s6, v27
+; GCN-NEXT:    v_add_i32_e32 v19, vcc, s6, v28
+; GCN-NEXT:    v_add_i32_e32 v20, vcc, s6, v29
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, s6, v30
+; GCN-NEXT:    v_add_i32_e32 v21, vcc, s6, v31
 ; GCN-NEXT:    v_and_b32_e32 v22, 0xffff0000, v21
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v21, 16, v21
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v21, 0xffff0000, v20
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v62, 16, v20
-; GCN-NEXT:    v_and_b32_e32 v20, 0xffff0000, v19
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v19, 16, v19
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v21, 0xffff0000, v5
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v33, 16, v5
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v20
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v19, 0xffff0000, v18
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v61, 16, v18
-; GCN-NEXT:    v_and_b32_e32 v18, 0xffff0000, v17
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
-; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, 16, v20
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v17, 0xffff0000, v16
-; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v60, 16, v16
-; GCN-NEXT:    v_and_b32_e32 v16, 0xffff0000, v15
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v15, 16, v15
-; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v19
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v62, 16, v19
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v15, 0xffff0000, v14
-; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v59, 16, v14
-; GCN-NEXT:    v_and_b32_e32 v14, 0xffff0000, v13
-; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v18
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v13, 0xffff0000, v7
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v58, 16, v7
-; GCN-NEXT:    v_and_b32_e32 v7, 0xffff0000, v3
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; GCN-NEXT:    v_and_b32_e32 v32, 0xffff0000, v6
-; GCN-NEXT:    v_lshlrev_b32_e32 v57, 16, v6
-; GCN-NEXT:    v_and_b32_e32 v34, 0xffff0000, v4
-; GCN-NEXT:    v_lshlrev_b32_e32 v33, 16, v4
-; GCN-NEXT:    v_and_b32_e32 v4, 0xffff0000, v2
-; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v2
-; GCN-NEXT:    v_and_b32_e32 v2, 0xffff0000, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v6, 16, v1
-; GCN-NEXT:    v_and_b32_e32 v1, 0xffff0000, v9
-; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v9
-; GCN-NEXT:    v_and_b32_e32 v9, 0xffff0000, v10
-; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; GCN-NEXT:    v_and_b32_e32 v18, 0xffff0000, v12
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, 16, v18
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v17
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v61, 16, v17
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v16
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, 16, v16
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v15
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v60, 16, v15
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v14
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, 16, v14
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v13
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v59, 16, v13
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v11
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, 16, v11
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v5, 0xffff0000, v3
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v58, 16, v3
+; GCN-NEXT:    v_and_b32_e32 v3, 0xffff0000, v7
+; GCN-NEXT:    v_lshlrev_b32_e32 v11, 16, v7
+; GCN-NEXT:    v_and_b32_e32 v7, 0xffff0000, v10
+; GCN-NEXT:    v_lshlrev_b32_e32 v57, 16, v10
+; GCN-NEXT:    v_and_b32_e32 v34, 0xffff0000, v6
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, 16, v6
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v36, 0xffff0000, v4
+; GCN-NEXT:    v_lshlrev_b32_e32 v56, 16, v4
+; GCN-NEXT:    v_and_b32_e32 v6, 0xffff0000, v2
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, 16, v2
+; GCN-NEXT:    v_and_b32_e32 v2, 0xffff0000, v9
+; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v9
+; GCN-NEXT:    v_and_b32_e32 v9, 0xffff0000, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_and_b32_e32 v10, 0xffff0000, v12
 ; GCN-NEXT:    v_lshlrev_b32_e32 v45, 16, v12
-; GCN-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
-; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v11
-; GCN-NEXT:    v_and_b32_e32 v26, 0xffff0000, v8
-; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v8
-; GCN-NEXT:    v_and_b32_e32 v8, 0xffff0000, v37
-; GCN-NEXT:    v_lshlrev_b32_e32 v25, 16, v37
-; GCN-NEXT:    v_and_b32_e32 v37, 0xffff0000, v52
+; GCN-NEXT:    v_and_b32_e32 v18, 0xffff0000, v37
+; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v37
+; GCN-NEXT:    v_and_b32_e32 v37, 0xffff0000, v53
+; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v53
+; GCN-NEXT:    v_and_b32_e32 v25, 0xffff0000, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v26, 16, v8
+; GCN-NEXT:    v_and_b32_e32 v8, 0xffff0000, v52
 ; GCN-NEXT:    v_lshlrev_b32_e32 v43, 16, v52
-; GCN-NEXT:    v_and_b32_e32 v14, 0xffff0000, v51
-; GCN-NEXT:    v_lshlrev_b32_e32 v12, 16, v51
-; GCN-NEXT:    v_and_b32_e32 v20, 0xffff0000, v5
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 16, v5
-; GCN-NEXT:    v_and_b32_e32 v28, 0xffff0000, v48
-; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v48
-; GCN-NEXT:    v_and_b32_e32 v30, 0xffff0000, v39
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 16, v39
-; GCN-NEXT:    v_and_b32_e32 v39, 0xffff0000, v38
+; GCN-NEXT:    v_and_b32_e32 v14, 0xffff0000, v38
 ; GCN-NEXT:    v_lshlrev_b32_e32 v38, 16, v38
-; GCN-NEXT:    v_and_b32_e32 v48, 0xffff0000, v50
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 16, v50
-; GCN-NEXT:    v_and_b32_e32 v50, 0xffff0000, v49
-; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v49
-; GCN-NEXT:    v_and_b32_e32 v51, 0xffff0000, v35
-; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v35
+; GCN-NEXT:    v_and_b32_e32 v16, 0xffff0000, v51
+; GCN-NEXT:    v_lshlrev_b32_e32 v41, 16, v51
+; GCN-NEXT:    v_and_b32_e32 v22, 0xffff0000, v50
+; GCN-NEXT:    v_lshlrev_b32_e32 v20, 16, v50
+; GCN-NEXT:    v_and_b32_e32 v24, 0xffff0000, v39
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 16, v39
+; GCN-NEXT:    v_and_b32_e32 v39, 0xffff0000, v48
+; GCN-NEXT:    v_lshlrev_b32_e32 v28, 16, v48
+; GCN-NEXT:    v_and_b32_e32 v48, 0xffff0000, v49
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 16, v49
+; GCN-NEXT:    v_and_b32_e32 v50, 0xffff0000, v35
+; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v35
+; GCN-NEXT:    v_and_b32_e32 v51, 0xffff0000, v32
+; GCN-NEXT:    v_lshlrev_b32_e32 v53, 16, v32
 ; GCN-NEXT:  .LBB44_4: ; %end
 ; GCN-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v5
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v11
-; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT:    v_alignbit_b32 v5, v5, v11, 16
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v13, 1.0, v5
+; GCN-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; GCN-NEXT:    v_alignbit_b32 v12, v12, v13, 16
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT:    v_mul_f32_e32 v13, 1.0, v5
+; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v33
+; GCN-NEXT:    v_lshrrev_b32_e32 v13, 16, v13
+; GCN-NEXT:    v_alignbit_b32 v5, v13, v5, 16
+; GCN-NEXT:    buffer_store_dword v12, v0, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v13, 1.0, v63
-; GCN-NEXT:    v_lshrrev_b32_e32 v11, 16, v11
-; GCN-NEXT:    v_alignbit_b32 v11, v11, v13, 16
-; GCN-NEXT:    buffer_store_dword v5, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, 4, v0
+; GCN-NEXT:    buffer_store_dword v5, v12, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, 4, v0
-; GCN-NEXT:    buffer_store_dword v11, v5, s[0:3], 0 offen
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v12
 ; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT:    v_alignbit_b32 v63, v5, v11, 16
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload
+; GCN-NEXT:    v_alignbit_b32 v63, v5, v12, 16
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v62
+; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v62
 ; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT:    v_alignbit_b32 v56, v5, v11, 16
-; GCN-NEXT:    v_add_i32_e32 v31, vcc, 8, v0
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload
+; GCN-NEXT:    v_alignbit_b32 v46, v5, v12, 16
+; GCN-NEXT:    v_add_i32_e32 v19, vcc, 8, v0
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v12
 ; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT:    v_alignbit_b32 v62, v5, v11, 16
+; GCN-NEXT:    v_alignbit_b32 v62, v5, v12, 16
 ; GCN-NEXT:    v_add_i32_e32 v15, vcc, 12, v0
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v61
+; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v61
 ; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT:    v_alignbit_b32 v61, v5, v11, 16
-; GCN-NEXT:    v_add_i32_e32 v21, vcc, 16, v0
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload
+; GCN-NEXT:    v_alignbit_b32 v61, v5, v12, 16
+; GCN-NEXT:    v_add_i32_e32 v27, vcc, 16, v0
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v12
 ; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT:    v_alignbit_b32 v5, v5, v11, 16
-; GCN-NEXT:    v_add_i32_e32 v11, vcc, 20, v0
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload
+; GCN-NEXT:    v_alignbit_b32 v5, v5, v12, 16
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, 20, v0
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v13, 1.0, v13
-; GCN-NEXT:    v_mul_f32_e32 v16, 1.0, v60
-; GCN-NEXT:    v_lshrrev_b32_e32 v13, 16, v13
-; GCN-NEXT:    v_alignbit_b32 v60, v13, v16, 16
+; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT:    v_mul_f32_e32 v13, 1.0, v60
+; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GCN-NEXT:    v_alignbit_b32 v60, v5, v13, 16
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 24, v0
-; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v16, 1.0, v16
-; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v19, 1.0, v19
-; GCN-NEXT:    v_lshrrev_b32_e32 v16, 16, v16
-; GCN-NEXT:    v_alignbit_b32 v16, v16, v19, 16
-; GCN-NEXT:    v_add_i32_e32 v19, vcc, 28, v0
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v21, 1.0, v21
+; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GCN-NEXT:    v_alignbit_b32 v21, v5, v21, 16
+; GCN-NEXT:    v_add_i32_e32 v23, vcc, 28, v0
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v22, 1.0, v22
-; GCN-NEXT:    v_mul_f32_e32 v23, 1.0, v59
-; GCN-NEXT:    v_lshrrev_b32_e32 v22, 16, v22
-; GCN-NEXT:    v_alignbit_b32 v59, v22, v23, 16
-; GCN-NEXT:    v_add_i32_e32 v22, vcc, 32, v0
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT:    v_mul_f32_e32 v29, 1.0, v59
+; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GCN-NEXT:    v_alignbit_b32 v59, v5, v29, 16
+; GCN-NEXT:    v_add_i32_e32 v29, vcc, 32, v0
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v23, 1.0, v23
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v27, 1.0, v27
-; GCN-NEXT:    v_lshrrev_b32_e32 v23, 16, v23
-; GCN-NEXT:    v_alignbit_b32 v23, v23, v27, 16
-; GCN-NEXT:    v_add_i32_e32 v27, vcc, 36, v0
-; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mul_f32_e32 v30, 1.0, v30
+; GCN-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GCN-NEXT:    v_alignbit_b32 v5, v5, v30, 16
+; GCN-NEXT:    v_add_i32_e32 v30, vcc, 36, v0
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v29, 1.0, v29
-; GCN-NEXT:    v_mul_f32_e32 v35, 1.0, v58
-; GCN-NEXT:    v_lshrrev_b32_e32 v29, 16, v29
-; GCN-NEXT:    v_alignbit_b32 v58, v29, v35, 16
-; GCN-NEXT:    v_add_i32_e32 v29, vcc, 40, v0
-; GCN-NEXT:    v_mul_f32_e32 v7, 1.0, v7
+; GCN-NEXT:    v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT:    v_mul_f32_e32 v32, 1.0, v58
+; GCN-NEXT:    v_lshrrev_b32_e32 v31, 16, v31
+; GCN-NEXT:    v_alignbit_b32 v58, v31, v32, 16
+; GCN-NEXT:    v_add_i32_e32 v31, vcc, 40, v0
 ; GCN-NEXT:    v_mul_f32_e32 v3, 1.0, v3
+; GCN-NEXT:    v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GCN-NEXT:    v_alignbit_b32 v3, v3, v11, 16
+; GCN-NEXT:    v_add_i32_e32 v11, vcc, 44, v0
+; GCN-NEXT:    v_mul_f32_e32 v7, 1.0, v7
+; GCN-NEXT:    v_mul_f32_e32 v32, 1.0, v57
 ; GCN-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GCN-NEXT:    v_alignbit_b32 v3, v7, v3, 16
-; GCN-NEXT:    v_add_i32_e32 v7, vcc, 44, v0
-; GCN-NEXT:    v_mul_f32_e32 v32, 1.0, v32
-; GCN-NEXT:    v_mul_f32_e32 v35, 1.0, v57
-; GCN-NEXT:    v_lshrrev_b32_e32 v32, 16, v32
-; GCN-NEXT:    v_alignbit_b32 v57, v32, v35, 16
+; GCN-NEXT:    v_alignbit_b32 v7, v7, v32, 16
 ; GCN-NEXT:    v_add_i32_e32 v32, vcc, 48, v0
 ; GCN-NEXT:    v_mul_f32_e32 v34, 1.0, v34
+; GCN-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v33, 1.0, v33
 ; GCN-NEXT:    v_lshrrev_b32_e32 v34, 16, v34
 ; GCN-NEXT:    v_alignbit_b32 v33, v34, v33, 16
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, 52, v0
+; GCN-NEXT:    v_mul_f32_e32 v35, 1.0, v36
+; GCN-NEXT:    v_mul_f32_e32 v36, 1.0, v56
+; GCN-NEXT:    v_lshrrev_b32_e32 v35, 16, v35
+; GCN-NEXT:    v_alignbit_b32 v35, v35, v36, 16
+; GCN-NEXT:    v_add_i32_e32 v36, vcc, 56, v0
+; GCN-NEXT:    v_mul_f32_e32 v6, 1.0, v6
 ; GCN-NEXT:    v_mul_f32_e32 v4, 1.0, v4
-; GCN-NEXT:    v_mul_f32_e32 v35, 1.0, v47
-; GCN-NEXT:    v_lshrrev_b32_e32 v4, 16, v4
-; GCN-NEXT:    v_alignbit_b32 v4, v4, v35, 16
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, 56, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
+; GCN-NEXT:    v_alignbit_b32 v4, v6, v4, 16
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, 60, v0
 ; GCN-NEXT:    v_mul_f32_e32 v2, 1.0, v2
-; GCN-NEXT:    v_mul_f32_e32 v6, 1.0, v6
+; GCN-NEXT:    v_mul_f32_e32 v52, 1.0, v47
 ; GCN-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
-; GCN-NEXT:    v_alignbit_b32 v2, v2, v6, 16
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, 60, v0
-; GCN-NEXT:    v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT:    v_mul_f32_e32 v52, 1.0, v46
-; GCN-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    v_alignbit_b32 v1, v1, v52, 16
+; GCN-NEXT:    v_alignbit_b32 v2, v2, v52, 16
 ; GCN-NEXT:    v_add_i32_e32 v52, vcc, 64, v0
 ; GCN-NEXT:    v_mul_f32_e32 v9, 1.0, v9
-; GCN-NEXT:    v_mul_f32_e32 v10, 1.0, v10
+; GCN-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; GCN-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
-; GCN-NEXT:    v_alignbit_b32 v9, v9, v10, 16
-; GCN-NEXT:    v_add_i32_e32 v10, vcc, 0x44, v0
-; GCN-NEXT:    v_mul_f32_e32 v18, 1.0, v18
+; GCN-NEXT:    v_alignbit_b32 v1, v9, v1, 16
+; GCN-NEXT:    v_add_i32_e32 v9, vcc, 0x44, v0
+; GCN-NEXT:    v_mul_f32_e32 v10, 1.0, v10
 ; GCN-NEXT:    v_mul_f32_e32 v54, 1.0, v45
-; GCN-NEXT:    v_lshrrev_b32_e32 v18, 16, v18
-; GCN-NEXT:    v_alignbit_b32 v18, v18, v54, 16
+; GCN-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
+; GCN-NEXT:    v_alignbit_b32 v10, v10, v54, 16
 ; GCN-NEXT:    v_add_i32_e32 v54, vcc, 0x48, v0
-; GCN-NEXT:    v_mul_f32_e32 v36, 1.0, v36
+; GCN-NEXT:    v_mul_f32_e32 v18, 1.0, v18
 ; GCN-NEXT:    v_mul_f32_e32 v17, 1.0, v17
-; GCN-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GCN-NEXT:    v_alignbit_b32 v17, v36, v17, 16
-; GCN-NEXT:    v_add_i32_e32 v36, vcc, 0x4c, v0
-; GCN-NEXT:    v_mul_f32_e32 v26, 1.0, v26
+; GCN-NEXT:    v_lshrrev_b32_e32 v18, 16, v18
+; GCN-NEXT:    v_alignbit_b32 v17, v18, v17, 16
+; GCN-NEXT:    v_add_i32_e32 v18, vcc, 0x4c, v0
+; GCN-NEXT:    v_mul_f32_e32 v37, 1.0, v37
 ; GCN-NEXT:    v_mul_f32_e32 v42, 1.0, v44
-; GCN-NEXT:    v_lshrrev_b32_e32 v26, 16, v26
-; GCN-NEXT:    v_alignbit_b32 v26, v26, v42, 16
+; GCN-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
+; GCN-NEXT:    v_alignbit_b32 v37, v37, v42, 16
 ; GCN-NEXT:    v_add_i32_e32 v42, vcc, 0x50, v0
-; GCN-NEXT:    v_mul_f32_e32 v8, 1.0, v8
 ; GCN-NEXT:    v_mul_f32_e32 v25, 1.0, v25
-; GCN-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
-; GCN-NEXT:    v_alignbit_b32 v8, v8, v25, 16
-; GCN-NEXT:    v_add_i32_e32 v25, vcc, 0x54, v0
-; GCN-NEXT:    v_mul_f32_e32 v37, 1.0, v37
+; GCN-NEXT:    v_mul_f32_e32 v26, 1.0, v26
+; GCN-NEXT:    v_lshrrev_b32_e32 v25, 16, v25
+; GCN-NEXT:    v_alignbit_b32 v25, v25, v26, 16
+; GCN-NEXT:    v_add_i32_e32 v26, vcc, 0x54, v0
+; GCN-NEXT:    v_mul_f32_e32 v8, 1.0, v8
 ; GCN-NEXT:    v_mul_f32_e32 v43, 1.0, v43
-; GCN-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GCN-NEXT:    v_alignbit_b32 v37, v37, v43, 16
+; GCN-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
+; GCN-NEXT:    v_alignbit_b32 v8, v8, v43, 16
 ; GCN-NEXT:    v_add_i32_e32 v43, vcc, 0x58, v0
 ; GCN-NEXT:    v_mul_f32_e32 v14, 1.0, v14
-; GCN-NEXT:    v_mul_f32_e32 v12, 1.0, v12
+; GCN-NEXT:    v_mul_f32_e32 v38, 1.0, v38
 ; GCN-NEXT:    v_lshrrev_b32_e32 v14, 16, v14
-; GCN-NEXT:    v_alignbit_b32 v12, v14, v12, 16
-; GCN-NEXT:    v_add_i32_e32 v14, vcc, 0x5c, v0
-; GCN-NEXT:    v_mul_f32_e32 v20, 1.0, v20
+; GCN-NEXT:    v_alignbit_b32 v14, v14, v38, 16
+; GCN-NEXT:    v_add_i32_e32 v38, vcc, 0x5c, v0
+; GCN-NEXT:    v_mul_f32_e32 v16, 1.0, v16
 ; GCN-NEXT:    v_mul_f32_e32 v41, 1.0, v41
-; GCN-NEXT:    v_lshrrev_b32_e32 v20, 16, v20
-; GCN-NEXT:    v_alignbit_b32 v20, v20, v41, 16
+; GCN-NEXT:    v_lshrrev_b32_e32 v16, 16, v16
+; GCN-NEXT:    v_alignbit_b32 v16, v16, v41, 16
 ; GCN-NEXT:    v_add_i32_e32 v41, vcc, 0x60, v0
-; GCN-NEXT:    v_mul_f32_e32 v28, 1.0, v28
+; GCN-NEXT:    v_mul_f32_e32 v22, 1.0, v22
+; GCN-NEXT:    v_mul_f32_e32 v20, 1.0, v20
+; GCN-NEXT:    v_lshrrev_b32_e32 v22, 16, v22
+; GCN-NEXT:    v_alignbit_b32 v20, v22, v20, 16
+; GCN-NEXT:    v_add_i32_e32 v22, vcc, 0x64, v0
 ; GCN-NEXT:    v_mul_f32_e32 v24, 1.0, v24
-; GCN-NEXT:    v_lshrrev_b32_e32 v28, 16, v28
-; GCN-NEXT:    v_alignbit_b32 v24, v28, v24, 16
-; GCN-NEXT:    v_add_i32_e32 v28, vcc, 0x64, v0
-; GCN-NEXT:    v_mul_f32_e32 v30, 1.0, v30
 ; GCN-NEXT:    v_mul_f32_e32 v40, 1.0, v40
-; GCN-NEXT:    v_lshrrev_b32_e32 v30, 16, v30
-; GCN-NEXT:    v_alignbit_b32 v30, v30, v40, 16
+; GCN-NEXT:    v_lshrrev_b32_e32 v24, 16, v24
+; GCN-NEXT:    v_alignbit_b32 v24, v24, v40, 16
 ; GCN-NEXT:    v_add_i32_e32 v40, vcc, 0x68, v0
 ; GCN-NEXT:    v_mul_f32_e32 v39, 1.0, v39
-; GCN-NEXT:    v_mul_f32_e32 v38, 1.0, v38
+; GCN-NEXT:    v_mul_f32_e32 v28, 1.0, v28
 ; GCN-NEXT:    v_lshrrev_b32_e32 v39, 16, v39
-; GCN-NEXT:    v_alignbit_b32 v38, v39, v38, 16
+; GCN-NEXT:    v_alignbit_b32 v28, v39, v28, 16
 ; GCN-NEXT:    v_add_i32_e32 v39, vcc, 0x6c, v0
 ; GCN-NEXT:    v_mul_f32_e32 v48, 1.0, v48
 ; GCN-NEXT:    v_mul_f32_e32 v55, 1.0, v55
@@ -74877,33 +74887,35 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_alignbit_b32 v51, v51, v53, 16
 ; GCN-NEXT:    v_add_i32_e32 v53, vcc, 0x78, v0
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, 0x7c, v0
-; GCN-NEXT:    buffer_store_dword v63, v31, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v56, v15, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v62, v21, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v61, v11, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v5, v13, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v60, v19, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v16, v22, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v59, v27, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v23, v29, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v58, v7, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v63, v19, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v46, v15, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v62, v27, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v61, v12, s[0:3], 0 offen
+; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v12, v13, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v60, v23, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v21, v29, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v59, v30, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v5, v31, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v58, v11, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v3, v32, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v57, v34, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v33, v35, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v4, v6, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v2, v52, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v9, v54, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v18, v36, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v7, v34, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v33, v36, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v35, v6, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v4, v52, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v2, v9, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v1, v54, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v10, v18, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v17, v42, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v26, v25, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v8, v43, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v37, v14, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v12, v41, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v20, v28, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v24, v40, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v30, v39, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v38, v55, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v37, v26, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v25, v43, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v8, v38, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v14, v41, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v16, v22, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v20, v40, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v24, v39, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v28, v55, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v48, v50, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v49, v53, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v51, v0, s[0:3], 0 offen
@@ -75912,11 +75924,12 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; VI-NEXT:    v_add_u16_e32 v22, 3, v22
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_sdwa v36, v23, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; VI-NEXT:    v_add_u16_sdwa v36, v36, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_add_u16_e32 v22, 3, v22
+; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_or_b32_sdwa v24, v23, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_add_u16_e32 v7, 3, v7
@@ -77138,15 +77151,16 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
 ; GFX9-NEXT:    v_add_u16_e32 v27, 3, v27
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_or_b32_sdwa v39, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; GFX9-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_add_u16_e32 v7, 0x300, v7
 ; GFX9-NEXT:    v_add_u16_e32 v25, 0x300, v39
-; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_add_u16_e32 v27, 3, v27
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_or_b32_sdwa v48, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
@@ -84982,24 +84996,22 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v3, 0xffff, v36, v3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v5
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v33.l, v147.h
-; GFX11-TRUE16-NEXT:    v_dual_add_f32 v34, 0x40c00000, v38 :: v_dual_add_f32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT:    v_dual_add_f32 v34, 0x40c00000, v38 :: v_dual_lshlrev_b32 v5, 16, v5
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v7, 0x40c00000, v7
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v105, 8, v3
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v4, 0xffff, v33, v149
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v37, v34, 16, 1
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v33, v6, 16, 1
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v37, v34, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v38, 0x400000, v34
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v93, 24, v4
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v37, v34, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v33, v33, v6, 0x7fff
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v37, 0x400000, v6
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v95, 8, v4
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_add3_u32 v35, v37, v34, 0x7fff
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v37, 0x400000, v6
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v164, v33, v37, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v36, 0x40c00000, v36
@@ -86311,20 +86323,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v113, v115, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v113, 0x400000, v13
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v115, 16, v15
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v163, v12, v10, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v20, 16, v18
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v13, v112, v113 :: v_dual_add_f32 v112, 0x40c00000, v115
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v113, v114, v67, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v114, 0x400000, v67
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v115, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v116, v112, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v112
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v118, 0x400000, v15
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v148, v13, v66, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v67, v113, v114, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v114, v115, v16, 0x7fff
@@ -86332,20 +86343,20 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v113, v15, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v116, v116, v112, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v118, 0x400000, v15
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v17, 16, v33
-; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[33:34], 24, v[96:97]
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v16, v114, v115, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v112, v112
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v113, v113, v15, 0x7fff
+; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[33:34], 24, v[96:97]
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[34:35], 24, v[86:87]
-; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[35:36], 24, v[84:85]
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v135, v16, v67, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v112, v116, v117, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v14
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v14, 16, v52
+; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[35:36], 24, v[84:85]
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v5
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 16, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v15, v113, v118, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v118, 16, v12
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v12, 16, v10
@@ -86360,6 +86371,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[49:50], 24, v[148:149]
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[50:51], 24, v[162:163]
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[51:52], 24, v[176:177]
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 16, v53
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v117, 16, v25
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v37
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[52:53], 24, v[182:183]
@@ -86796,314 +86808,313 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; GCN-NEXT:    v_mov_b32_e32 v50, v27
-; GCN-NEXT:    v_mov_b32_e32 v49, v25
+; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GCN-NEXT:    v_mov_b32_e32 v49, v23
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v27, v22
 ; GCN-NEXT:    v_mov_b32_e32 v39, v21
-; GCN-NEXT:    v_mov_b32_e32 v48, v3
-; GCN-NEXT:    v_mov_b32_e32 v37, v1
+; GCN-NEXT:    v_mov_b32_e32 v22, v20
+; GCN-NEXT:    v_mov_b32_e32 v38, v19
+; GCN-NEXT:    v_mov_b32_e32 v20, v18
+; GCN-NEXT:    v_mov_b32_e32 v37, v17
+; GCN-NEXT:    v_mov_b32_e32 v18, v16
+; GCN-NEXT:    v_mov_b32_e32 v16, v14
+; GCN-NEXT:    v_mov_b32_e32 v14, v12
+; GCN-NEXT:    v_mov_b32_e32 v12, v3
+; GCN-NEXT:    v_mov_b32_e32 v34, v1
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:108
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:92
-; GCN-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:88
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:116
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:108
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:84
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:100
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:96
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:76
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:92
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:88
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:84
+; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:76
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:72
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:4
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:392
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:68
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:64
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:60
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:56
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:48
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:24
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:44
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:40
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:20
-; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:16
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:32
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v4
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v6
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v4
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v10
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v6
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v12
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v8
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v14
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v10
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v16
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v14
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v18, 8, v18
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v20
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v16
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v22, 8, v22
-; GCN-NEXT:    v_lshlrev_b32_e32 v24, 8, v24
+; GCN-NEXT:    v_lshlrev_b32_e32 v18, 8, v18
+; GCN-NEXT:    v_lshlrev_b32_e32 v20, 8, v20
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v26
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v22
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v28, 8, v28
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v30
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v27
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:12
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:8
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:4
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:392
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:116
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:112
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v24
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:124
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v26
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v28
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v30, 8, v30
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v35
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:24
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:16
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:12
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:8
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:112
+; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:124
+; GCN-NEXT:    v_lshlrev_b32_e32 v22, 8, v33
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt vmcnt(4)
+; GCN-NEXT:    v_lshlrev_b32_e32 v6, 8, v6
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v21
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v34
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v23
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v33
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v21
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v25
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v19
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v32
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v17
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v31
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v27
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v36
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v35
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v32
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v31
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:100
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v29
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:104
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:96
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v2
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:104
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v1
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    s_waitcnt vmcnt(14) expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v8
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v10
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:132
-; GCN-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:128
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:128
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:140
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:136
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:148
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:136
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:144
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:156
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v1
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v2
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v4
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:164
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:160
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:172
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:180
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:168
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:180
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:176
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:188
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v1
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v2
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v4
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:196
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:192
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:204
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:212
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:200
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:212
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:208
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:220
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(4) expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v3
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v4
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v1
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v4
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:228
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:224
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:236
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:232
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:224
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:236
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:244
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:240
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:232
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:240
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:252
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v1
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v3
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v4
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v4
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:260
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:256
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:268
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:264
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:256
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:268
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:276
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:272
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:264
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:272
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:284
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v1
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v3
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v4
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v3
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt vmcnt(3) expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 8, v4
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:292
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:288
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:300
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:296
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:288
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:300
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:308
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:304
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:296
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:304
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:316
 ; GCN-NEXT:    v_lshlrev_b32_e32 v52, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v54, 8, v2
@@ -87111,814 +87122,811 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v41, 8, v4
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:324
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:320
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:332
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:328
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:320
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:332
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:340
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:336
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:328
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:336
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:348
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v45, 8, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v47, 8, v3
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v57, 8, v4
+; GCN-NEXT:    v_lshlrev_b32_e32 v56, 8, v4
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:356
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:364
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:352
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:360
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:352
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:360
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:372
 ; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:388
 ; GCN-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:384
 ; GCN-NEXT:    v_lshlrev_b32_e32 v62, 8, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v31, 8, v2
-; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:120
+; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v2
+; GCN-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:120
 ; GCN-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:152
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:184
 ; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:216
 ; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s32 offset:248
 ; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:280
-; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:312
+; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:312
 ; GCN-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:344
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:380
 ; GCN-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:376
 ; GCN-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:368
-; GCN-NEXT:    s_waitcnt vmcnt(13)
-; GCN-NEXT:    v_lshlrev_b32_e32 v63, 8, v3
+; GCN-NEXT:    s_waitcnt vmcnt(14)
+; GCN-NEXT:    v_lshlrev_b32_e32 v31, 8, v3
 ; GCN-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 8, v4
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr17
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
 ; GCN-NEXT:    ; implicit-def: $vgpr32
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr50
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr29
+; GCN-NEXT:    ; implicit-def: $vgpr24
 ; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr20
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr26
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr6
-; GCN-NEXT:    ; implicit-def: $vgpr27
-; GCN-NEXT:    ; implicit-def: $vgpr38
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr33
+; GCN-NEXT:    ; implicit-def: $vgpr48
 ; GCN-NEXT:    ; implicit-def: $vgpr4
-; GCN-NEXT:    ; implicit-def: $vgpr16
-; GCN-NEXT:    ; implicit-def: $vgpr10
-; GCN-NEXT:    ; implicit-def: $vgpr21
-; GCN-NEXT:    ; implicit-def: $vgpr34
-; GCN-NEXT:    ; implicit-def: $vgpr25
-; GCN-NEXT:    ; implicit-def: $vgpr35
+; GCN-NEXT:    ; implicit-def: $vgpr22
+; GCN-NEXT:    ; implicit-def: $vgpr8
+; GCN-NEXT:    ; implicit-def: $vgpr19
+; GCN-NEXT:    ; implicit-def: $vgpr33
+; GCN-NEXT:    ; implicit-def: $vgpr27
 ; GCN-NEXT:    ; implicit-def: $vgpr36
+; GCN-NEXT:    ; implicit-def: $vgpr35
 ; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
 ; GCN-NEXT:    s_cbranch_execz .LBB46_2
 ; GCN-NEXT:  ; %bb.1: ; %cmp.false
-; GCN-NEXT:    v_mov_b32_e32 v26, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v37
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v48
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v5
+; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v34
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v12
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v7
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v5
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v9
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v7
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v11
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v9
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v13
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v11
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v15
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v21, v0, v2
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v17
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v18
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v13
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v19
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v19, v0, v2
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v39
-; GCN-NEXT:    v_or_b32_e32 v25, v0, v22
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v23
-; GCN-NEXT:    v_or_b32_e32 v23, v0, v24
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v49
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v32, v0, v2
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v50
-; GCN-NEXT:    v_or_b32_e32 v33, v0, v28
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v29
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v15
+; GCN-NEXT:    v_mov_b32_e32 v9, v23
+; GCN-NEXT:    v_or_b32_e32 v23, v2, v18
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v37
+; GCN-NEXT:    v_mov_b32_e32 v7, v21
+; GCN-NEXT:    v_or_b32_e32 v21, v2, v20
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v38
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v34, v0, v2
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v12
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v19, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v39
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v36, v0, v2
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v14
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v17, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v49
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v27, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v25
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v29, v2, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v35, v2, v4
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v32, v2, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v37, v2, v4
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v0
+; GCN-NEXT:    v_or_b32_e32 v33, v2, v30
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v38, v2, v4
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v35, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v10
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v34, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v14
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v39, v2, v4
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v36, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v16
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v37, v2, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v48, v2, v4
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v38, v2, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v4
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v4
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v4, v4, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v6, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v39, v4, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v4, v4, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    v_mov_b32_e32 v5, v6
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v49, v6, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v6, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v6, v6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v6, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v48, v8, v0
+; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v51
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v50, v6, v5
-; GCN-NEXT:    v_and_b32_e32 v6, 0xff, v51
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v51, v8, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v6, v6, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GCN-NEXT:    v_mov_b32_e32 v7, v8
+; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v8, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v8, v8, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v8, v8, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v49, v10, v0
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v10, v10, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v50, v10, v0
+; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v26
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v12, v10, v0
+; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v28
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v14, v12, v5
-; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v30
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v28, v10, v0
+; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v7
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v30, v12, v5
-; GCN-NEXT:    v_and_b32_e32 v12, 0xff, v7
+; GCN-NEXT:    v_or_b32_e32 v10, v10, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v9
 ; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v12, v12, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v5
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v7, 0xff, v5
+; GCN-NEXT:    v_and_b32_e32 v14, 0xff, v5
 ; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v7, v7, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v16, v16, v5
+; GCN-NEXT:    v_or_b32_e32 v14, v14, v5
 ; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v53
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v53, v53, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v5, 0xff, v5
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v9, v9, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v5, v5, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v16, 0xff, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v18, v18, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v16, v16, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v20, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v18, 0xff, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v20, v20, v5
+; GCN-NEXT:    v_or_b32_e32 v18, v18, v7
 ; GCN-NEXT:    v_and_b32_e32 v40, 0xff, v40
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v40, v40, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v40, v40, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v20, 0xff, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v22, v22, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v20, v20, v7
+; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v5
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v7, 0xff, v7
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v7, v7, v9
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v11, v11, v5
-; GCN-NEXT:    v_and_b32_e32 v5, 0xff, v26
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v22, 0xff, v9
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v5, v5, v13
+; GCN-NEXT:    v_or_b32_e32 v22, v22, v9
 ; GCN-NEXT:    v_and_b32_e32 v42, 0xff, v42
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v42, v42, v9
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v11, 0xff, v9
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v42, v42, v13
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v11, v11, v9
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v9, 0xff, v9
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v9, v9, v13
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v13
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v24, v24, v13
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v43, 0xff, v43
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v43, v43, v13
+; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v13, 0xff, v13
-; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v13, v13, v15
-; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v26, 0xff, v15
-; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v26, v15
-; GCN-NEXT:    v_and_b32_e32 v43, 0xff, v43
-; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v43, v43, v15
-; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v15, 0xff, v15
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v15, v15, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v28, v28, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v17, 0xff, v17
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v17, v17, v27
+; GCN-NEXT:    v_or_b32_e32 v15, v15, v25
 ; GCN-NEXT:    v_and_b32_e32 v46, 0xff, v46
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v46, v46, v27
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v46, v46, v25
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v51, 0xff, v27
-; GCN-NEXT:    v_or_b32_e32 v51, v51, v52
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v25
+; GCN-NEXT:    v_or_b32_e32 v30, v30, v52
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v27
+; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v52, v52, v54
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v54, 0xff, v27
+; GCN-NEXT:    v_and_b32_e32 v54, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v54, v54, v55
-; GCN-NEXT:    v_and_b32_e32 v55, 0xff, v56
+; GCN-NEXT:    v_and_b32_e32 v55, 0xff, v57
 ; GCN-NEXT:    v_or_b32_e32 v55, v55, v41
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v41, 0xff, v27
+; GCN-NEXT:    v_and_b32_e32 v41, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v41, v41, v44
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v27
+; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v44, v44, v45
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v45, 0xff, v27
+; GCN-NEXT:    v_and_b32_e32 v45, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v45, v45, v47
 ; GCN-NEXT:    v_and_b32_e32 v47, 0xff, v58
-; GCN-NEXT:    v_or_b32_e32 v47, v47, v57
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v47, v47, v56
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v56, 0xff, v27
+; GCN-NEXT:    v_and_b32_e32 v56, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v56, v56, v62
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_and_b32_e32 v57, 0xff, v27
-; GCN-NEXT:    v_or_b32_e32 v31, v57, v31
-; GCN-NEXT:    v_and_b32_e32 v57, 0xff, v60
+; GCN-NEXT:    v_and_b32_e32 v57, 0xff, v25
 ; GCN-NEXT:    v_or_b32_e32 v57, v57, v63
+; GCN-NEXT:    v_and_b32_e32 v58, 0xff, v60
+; GCN-NEXT:    v_or_b32_e32 v31, v58, v31
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xff, v61
 ; GCN-NEXT:    v_or_b32_e32 v1, v58, v1
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xff, v59
 ; GCN-NEXT:    v_or_b32_e32 v3, v58, v3
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v21, v21
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v23, v23
+; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v21, v21
 ; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v19
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v25
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v23
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v32
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v27
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v33
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v29
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v34
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v32
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v36
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v33
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v35
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v35
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v37
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v34
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v38
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v36
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v39
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v37
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v48
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v38
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v2
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v39
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v4
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v4
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v49
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v6
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v50
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v48
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v6
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v51
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v8
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v8
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v10
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v49
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v14
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v50
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v30
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v12
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v12
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v28
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v7
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v10
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v16
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v14
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v53
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v9
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v5
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v18
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v16
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v20
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v18
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v40
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v22
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v20
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v11
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v7
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v5
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v22
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v42
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v24
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v11
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v13
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v9
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v32, v26
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v24
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v43
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v20, v15
+; GCN-NEXT:    v_cvt_f32_f16_e32 v32, v13
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v28
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v26
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v26, v17
+; GCN-NEXT:    v_cvt_f32_f16_e32 v50, v15
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v46
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v6, v51
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v52
-; GCN-NEXT:    v_cvt_f32_f16_e32 v38, v54
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v30
+; GCN-NEXT:    v_cvt_f32_f16_e32 v29, v52
+; GCN-NEXT:    v_cvt_f32_f16_e32 v24, v54
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v55
-; GCN-NEXT:    v_cvt_f32_f16_e32 v33, v41
+; GCN-NEXT:    v_cvt_f32_f16_e32 v48, v41
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v4, v44
-; GCN-NEXT:    v_cvt_f32_f16_e32 v16, v45
-; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v47
-; GCN-NEXT:    v_cvt_f32_f16_e32 v21, v56
-; GCN-NEXT:    v_cvt_f32_f16_e32 v34, v31
-; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v57
-; GCN-NEXT:    v_cvt_f32_f16_e32 v35, v1
-; GCN-NEXT:    v_cvt_f32_f16_e32 v36, v3
-; GCN-NEXT:    ; implicit-def: $vgpr37
-; GCN-NEXT:    ; implicit-def: $vgpr48
+; GCN-NEXT:    v_cvt_f32_f16_e32 v22, v45
+; GCN-NEXT:    v_cvt_f32_f16_e32 v8, v47
+; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v56
+; GCN-NEXT:    v_cvt_f32_f16_e32 v33, v57
+; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v31
+; GCN-NEXT:    v_cvt_f32_f16_e32 v36, v1
+; GCN-NEXT:    v_cvt_f32_f16_e32 v35, v3
+; GCN-NEXT:    ; implicit-def: $vgpr34
+; GCN-NEXT:    ; implicit-def: $vgpr12
 ; GCN-NEXT:    ; implicit-def: $vgpr5
 ; GCN-NEXT:    ; implicit-def: $vgpr7
 ; GCN-NEXT:    ; implicit-def: $vgpr9
 ; GCN-NEXT:    ; implicit-def: $vgpr11
 ; GCN-NEXT:    ; implicit-def: $vgpr13
 ; GCN-NEXT:    ; implicit-def: $vgpr15
-; GCN-NEXT:    ; implicit-def: $vgpr17
-; GCN-NEXT:    ; implicit-def: $vgpr19
+; GCN-NEXT:    ; implicit-def: $vgpr37
+; GCN-NEXT:    ; implicit-def: $vgpr38
 ; GCN-NEXT:    ; implicit-def: $vgpr39
-; GCN-NEXT:    ; implicit-def: $vgpr23
 ; GCN-NEXT:    ; implicit-def: $vgpr49
-; GCN-NEXT:    ; implicit-def: $vgpr50
-; GCN-NEXT:    ; implicit-def: $vgpr29
-; GCN-NEXT:    ; implicit-def: $vgpr12
+; GCN-NEXT:    ; implicit-def: $vgpr25
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr10
 ; GCN-NEXT:    ; implicit-def: $vgpr14
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr16
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr51
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr30
-; GCN-NEXT:    ; implicit-def: $vgpr8
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr6
+; GCN-NEXT:    ; implicit-def: $vgpr26
+; GCN-NEXT:    ; implicit-def: $vgpr28
+; GCN-NEXT:    ; implicit-def: $vgpr21
+; GCN-NEXT:    ; implicit-def: $vgpr23
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr53
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr40
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr42
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr43
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr46
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr56
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr57
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr58
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr60
 ; GCN-NEXT:    ; implicit-def: $vgpr61
 ; GCN-NEXT:    ; implicit-def: $vgpr59
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr18
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr22
-; GCN-NEXT:    ; implicit-def: $vgpr24
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr28
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    ; kill: killed $vgpr0
-; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    ; implicit-def: $vgpr20
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr30
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
+; GCN-NEXT:    ; implicit-def: $vgpr1
+; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr1
 ; GCN-NEXT:    ; kill: killed $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr1
@@ -87958,10 +87966,10 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr44
 ; GCN-NEXT:    ; implicit-def: $vgpr45
 ; GCN-NEXT:    ; implicit-def: $vgpr47
-; GCN-NEXT:    ; implicit-def: $vgpr57
+; GCN-NEXT:    ; implicit-def: $vgpr56
 ; GCN-NEXT:    ; implicit-def: $vgpr62
-; GCN-NEXT:    ; implicit-def: $vgpr31
 ; GCN-NEXT:    ; implicit-def: $vgpr63
+; GCN-NEXT:    ; implicit-def: $vgpr31
 ; GCN-NEXT:    ; implicit-def: $vgpr1
 ; GCN-NEXT:    ; implicit-def: $vgpr3
 ; GCN-NEXT:  .LBB46_2: ; %Flow
@@ -87970,659 +87978,665 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:  ; %bb.3: ; %cmp.true
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v59
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v6, v3, v2
+; GCN-NEXT:    v_or_b32_e32 v4, v3, v2
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v61
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v10, v1, v2
+; GCN-NEXT:    v_or_b32_e32 v8, v1, v2
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v60
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v16, v63, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v22, v31, v2
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v20, v31, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v0, v63, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v62, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v62, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v58
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v57, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v56, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v47, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v47, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v45, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v45, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v44, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v56
+; GCN-NEXT:    v_or_b32_e32 v0, v44, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v57
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v41, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v41, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v55, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v55, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v54, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v54, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    v_or_b32_e32 v1, v52, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v52, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v46
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v43
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v42
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, 3, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v0
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GCN-NEXT:    v_mov_b32_e32 v3, v24
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 3, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v24, v0, v24
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xff, v0
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GCN-NEXT:    v_mov_b32_e32 v1, v26
 ; GCN-NEXT:    v_add_i32_e32 v26, vcc, 3, v40
 ; GCN-NEXT:    v_and_b32_e32 v26, 0xff, v26
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v0, v26
-; GCN-NEXT:    v_mov_b32_e32 v0, v37
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v27, vcc, 3, v0
+; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GCN-NEXT:    v_mov_b32_e32 v4, v28
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v27, v0, v27
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v28, vcc, 3, v1
-; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v29, vcc, 3, v0
+; GCN-NEXT:    v_and_b32_e32 v29, 0xff, v29
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v28
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v29, v0, v29
+; GCN-NEXT:    v_mov_b32_e32 v2, v30
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
-; GCN-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v0
+; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v2, v1
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v30
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v31, vcc, 3, v53
 ; GCN-NEXT:    v_and_b32_e32 v31, 0xff, v31
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v31
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v31
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v32, vcc, 3, v1
+; GCN-NEXT:    v_add_i32_e32 v32, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v32, 0xff, v32
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v32
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v33, vcc, 3, v1
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v32
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v33, vcc, 3, v23
 ; GCN-NEXT:    v_and_b32_e32 v33, 0xff, v33
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v33
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v8
-; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v8, v1, v34
-; GCN-NEXT:    v_add_i32_e32 v30, vcc, 3, v30
-; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v33
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v34
+; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v21
+; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v34
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v30
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v34
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v28, vcc, 3, v28
+; GCN-NEXT:    v_and_b32_e32 v28, 0xff, v28
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v28
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v35
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v35
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v6
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v36
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v36
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v6, v1, v36
+; GCN-NEXT:    v_mov_b32_e32 v17, v37
+; GCN-NEXT:    v_mov_b32_e32 v19, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v37, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v37, 0xff, v37
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v37
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_mov_b32_e32 v21, v39
-; GCN-NEXT:    v_add_i32_e32 v38, vcc, 3, v51
-; GCN-NEXT:    v_and_b32_e32 v38, 0xff, v38
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v38
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v38, vcc, 3, v1
+; GCN-NEXT:    v_and_b32_e32 v38, 0xff, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v39, vcc, 3, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v38
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GCN-NEXT:    v_mov_b32_e32 v23, v49
+; GCN-NEXT:    v_add_i32_e32 v39, vcc, 3, v51
 ; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v39
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v39
-; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_mov_b32_e32 v1, v48
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v48, vcc, 3, v2
+; GCN-NEXT:    v_add_i32_e32 v48, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v48
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v48
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GCN-NEXT:    v_mov_b32_e32 v25, v49
-; GCN-NEXT:    v_mov_b32_e32 v27, v50
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v48
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v49, vcc, 3, v2
+; GCN-NEXT:    v_add_i32_e32 v49, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v49
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v49
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v49
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v50, vcc, 3, v2
+; GCN-NEXT:    v_add_i32_e32 v50, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v50, 0xff, v50
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v50
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v50
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v51, vcc, 3, v2
+; GCN-NEXT:    v_add_i32_e32 v51, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v51, 0xff, v51
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v51
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v51
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v52, vcc, 3, v2
+; GCN-NEXT:    v_add_i32_e32 v52, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v52
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v52
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v52
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v53, vcc, 3, v2
+; GCN-NEXT:    v_add_i32_e32 v53, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v53
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v53
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v54, vcc, 3, v2
-; GCN-NEXT:    v_and_b32_e32 v54, 0xff, v54
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v54
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v53
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v54, vcc, 3, v16
+; GCN-NEXT:    v_and_b32_e32 v54, 0xff, v54
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v55, vcc, 3, v2
+; GCN-NEXT:    v_or_b32_e32 v16, v1, v54
+; GCN-NEXT:    v_add_i32_e32 v55, vcc, 3, v14
 ; GCN-NEXT:    v_and_b32_e32 v55, 0xff, v55
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v55
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v40, vcc, 3, v14
+; GCN-NEXT:    v_or_b32_e32 v14, v1, v55
+; GCN-NEXT:    v_add_i32_e32 v40, vcc, 3, v10
 ; GCN-NEXT:    v_and_b32_e32 v40, 0xff, v40
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v10, v1, v40
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v14, v2, v40
-; GCN-NEXT:    v_add_i32_e32 v41, vcc, 3, v12
+; GCN-NEXT:    v_add_i32_e32 v41, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v41, 0xff, v41
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v41
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v12, v2, v41
-; GCN-NEXT:    v_add_i32_e32 v42, vcc, 3, v29
+; GCN-NEXT:    v_add_i32_e32 v42, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v42, 0xff, v42
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v1, v2, v42
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v42
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v43, vcc, 3, v27
+; GCN-NEXT:    v_add_i32_e32 v43, vcc, 3, v1
 ; GCN-NEXT:    v_and_b32_e32 v43, 0xff, v43
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v4, v43
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v43
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v44, vcc, 3, v25
 ; GCN-NEXT:    v_and_b32_e32 v44, 0xff, v44
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v2, v44
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v44
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v45, vcc, 3, v23
 ; GCN-NEXT:    v_and_b32_e32 v45, 0xff, v45
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v3, v45
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v45
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v46, vcc, 3, v21
 ; GCN-NEXT:    v_and_b32_e32 v46, 0xff, v46
-; GCN-NEXT:    v_or_b32_e32 v46, v22, v46
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v46
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v47, vcc, 3, v19
 ; GCN-NEXT:    v_and_b32_e32 v47, 0xff, v47
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v47, v2, v47
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v47
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v56, vcc, 3, v17
 ; GCN-NEXT:    v_and_b32_e32 v56, 0xff, v56
-; GCN-NEXT:    v_or_b32_e32 v56, v18, v56
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v20, v56
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v57, vcc, 3, v15
 ; GCN-NEXT:    v_and_b32_e32 v57, 0xff, v57
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v57, v2, v57
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v18, v57
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_add_i32_e32 v58, vcc, 3, v13
 ; GCN-NEXT:    v_and_b32_e32 v58, 0xff, v58
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v58, v2, v58
+; GCN-NEXT:    v_or_b32_e32 v58, v1, v58
 ; GCN-NEXT:    v_add_i32_e32 v59, vcc, 3, v11
 ; GCN-NEXT:    v_and_b32_e32 v59, 0xff, v59
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v59, v2, v59
+; GCN-NEXT:    v_or_b32_e32 v59, v1, v59
 ; GCN-NEXT:    v_add_i32_e32 v60, vcc, 3, v9
 ; GCN-NEXT:    v_and_b32_e32 v60, 0xff, v60
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v60, v2, v60
+; GCN-NEXT:    v_or_b32_e32 v60, v1, v60
 ; GCN-NEXT:    v_add_i32_e32 v61, vcc, 3, v7
 ; GCN-NEXT:    v_and_b32_e32 v61, 0xff, v61
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v61, v2, v61
+; GCN-NEXT:    v_or_b32_e32 v61, v1, v61
 ; GCN-NEXT:    v_add_i32_e32 v62, vcc, 3, v5
 ; GCN-NEXT:    v_and_b32_e32 v62, 0xff, v62
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v62, v2, v62
-; GCN-NEXT:    v_add_i32_e32 v63, vcc, 3, v1
+; GCN-NEXT:    v_or_b32_e32 v62, v1, v62
+; GCN-NEXT:    v_add_i32_e32 v63, vcc, 3, v12
 ; GCN-NEXT:    v_and_b32_e32 v63, 0xff, v63
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v63, v1, v63
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, 3, v0
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xff, v3
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v45, v0, v3
+; GCN-NEXT:    v_or_b32_e32 v57, v0, v3
 ; GCN-NEXT:    s_movk_i32 s6, 0x300
-; GCN-NEXT:    v_add_i32_e32 v44, vcc, 0x300, v6
-; GCN-NEXT:    v_add_i32_e32 v43, vcc, s6, v10
-; GCN-NEXT:    v_add_i32_e32 v42, vcc, s6, v16
-; GCN-NEXT:    v_add_i32_e32 v41, vcc, s6, v20
+; GCN-NEXT:    v_add_i32_e32 v56, vcc, 0x300, v4
+; GCN-NEXT:    v_add_i32_e32 v47, vcc, s6, v8
+; GCN-NEXT:    v_add_i32_e32 v46, vcc, s6, v22
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v40, vcc, s6, v0
+; GCN-NEXT:    v_add_i32_e32 v45, vcc, s6, v0
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v44, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v43, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v42, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v41, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v40, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v55, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v54, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v53, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v52, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v51, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v50, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v49, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v48, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v39, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v38, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v37, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v36, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v35, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v34, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v33, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v33, vcc, s6, v24
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v32, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v31, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v30, vcc, s6, v0
-; GCN-NEXT:    v_add_i32_e32 v29, vcc, s6, v24
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v31, vcc, s6, v26
+; GCN-NEXT:    v_add_i32_e32 v30, vcc, s6, v27
+; GCN-NEXT:    v_add_i32_e32 v29, vcc, s6, v29
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v28, vcc, s6, v0
-; GCN-NEXT:    v_add_i32_e32 v27, vcc, s6, v26
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v27, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v26, vcc, s6, v0
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v24, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v23, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v21, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v19, vcc, s6, v6
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v19, vcc, s6, v0
-; GCN-NEXT:    v_add_i32_e32 v17, vcc, s6, v8
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v17, vcc, s6, v0
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v15, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v11, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v10, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v9, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v7, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s6, v16
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, s6, v14
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v9, vcc, s6, v9
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, s6, v5
-; GCN-NEXT:    v_add_i32_e32 v22, vcc, s6, v14
-; GCN-NEXT:    v_add_i32_e32 v20, vcc, s6, v12
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v11, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v18, vcc, s6, v12
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v22, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v16, vcc, s6, v12
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v20, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v14, vcc, s6, v12
-; GCN-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v18, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v12, vcc, s6, v12
-; GCN-NEXT:    v_add_i32_e32 v46, vcc, s6, v46
-; GCN-NEXT:    v_add_i32_e32 v47, vcc, s6, v47
-; GCN-NEXT:    v_add_i32_e32 v56, vcc, s6, v56
-; GCN-NEXT:    v_add_i32_e32 v57, vcc, s6, v57
+; GCN-NEXT:    v_add_i32_e32 v16, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v14, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, s6, v10
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v10, vcc, s6, v10
 ; GCN-NEXT:    v_add_i32_e32 v58, vcc, s6, v58
 ; GCN-NEXT:    v_add_i32_e32 v59, vcc, s6, v59
 ; GCN-NEXT:    v_add_i32_e32 v60, vcc, s6, v60
 ; GCN-NEXT:    v_add_i32_e32 v61, vcc, s6, v61
 ; GCN-NEXT:    v_add_i32_e32 v62, vcc, s6, v62
 ; GCN-NEXT:    v_add_i32_e32 v63, vcc, s6, v63
-; GCN-NEXT:    v_add_i32_e32 v45, vcc, s6, v45
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v45
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v63
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v62
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v57, vcc, s6, v57
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v57
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v61
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v63
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v60
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v62
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v59
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v61
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v58
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v60
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v57
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v59
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v56
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v57, v58
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v10
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v47
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v12
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v45, v46
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v12, v12
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v14
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v12, v14
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v16
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v12, v16
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v18
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v12, v18
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v20
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v12, v20
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v22
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v12, v22
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v11
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v9, v9
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v5, v5
 ; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v1
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
@@ -88639,421 +88653,410 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v6
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v7
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v8
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v8
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v13
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v9
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v15
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v10
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v17
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v11
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v19
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v13
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v21
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v15
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v23
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v17
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v24
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v19
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v25
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v21
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v26
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v23
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v27
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v24
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v28
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v25
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v29
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v26
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v30
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v27
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v31
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v28
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v32
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v29
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v33
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v30
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v34
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v31
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v35
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v32
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v36
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v33
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v37
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v32, v34
+; GCN-NEXT:    v_cvt_f32_f16_e32 v17, v38
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v35
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v39
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v20, v36
+; GCN-NEXT:    v_cvt_f32_f16_e32 v32, v48
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v37
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v49
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v26, v38
+; GCN-NEXT:    v_cvt_f32_f16_e32 v50, v50
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v39
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
-; GCN-NEXT:    v_cvt_f32_f16_e32 v6, v48
-; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v49
-; GCN-NEXT:    v_cvt_f32_f16_e32 v38, v50
-; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v51
-; GCN-NEXT:    v_cvt_f32_f16_e32 v33, v52
-; GCN-NEXT:    v_cvt_f32_f16_e32 v4, v53
-; GCN-NEXT:    v_cvt_f32_f16_e32 v16, v54
-; GCN-NEXT:    v_cvt_f32_f16_e32 v10, v55
-; GCN-NEXT:    v_cvt_f32_f16_e32 v21, v40
-; GCN-NEXT:    v_cvt_f32_f16_e32 v34, v41
-; GCN-NEXT:    v_cvt_f32_f16_e32 v25, v42
-; GCN-NEXT:    v_cvt_f32_f16_e32 v35, v43
-; GCN-NEXT:    v_cvt_f32_f16_e32 v36, v44
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v51
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v52
+; GCN-NEXT:    v_cvt_f32_f16_e32 v29, v53
+; GCN-NEXT:    v_cvt_f32_f16_e32 v24, v54
+; GCN-NEXT:    v_cvt_f32_f16_e32 v2, v55
+; GCN-NEXT:    v_cvt_f32_f16_e32 v48, v40
+; GCN-NEXT:    v_cvt_f32_f16_e32 v4, v41
+; GCN-NEXT:    v_cvt_f32_f16_e32 v22, v42
+; GCN-NEXT:    v_cvt_f32_f16_e32 v8, v43
+; GCN-NEXT:    v_cvt_f32_f16_e32 v19, v44
+; GCN-NEXT:    v_cvt_f32_f16_e32 v33, v45
+; GCN-NEXT:    v_cvt_f32_f16_e32 v27, v46
+; GCN-NEXT:    v_cvt_f32_f16_e32 v36, v47
+; GCN-NEXT:    v_cvt_f32_f16_e32 v35, v56
 ; GCN-NEXT:  .LBB46_4: ; %end
 ; GCN-NEXT:    s_or_b64 exec, exec, s[4:5]
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v0, v50, s[0:3], 0 offen
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, 4, v50
-; GCN-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GCN-NEXT:    v_cvt_f16_f32_e32 v6, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GCN-NEXT:    v_or_b32_e32 v3, v6, v3
+; GCN-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, v38, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 4, v38
+; GCN-NEXT:    buffer_store_dword v3, v1, s[0:3], 0 offen
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v45, v1, v0
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v45, v3, v1
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v44, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v59, vcc, 8, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v44, v3, v1
+; GCN-NEXT:    v_add_i32_e32 v59, vcc, 8, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v47, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v58, vcc, 12, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v47, v3, v1
+; GCN-NEXT:    v_add_i32_e32 v58, vcc, 12, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v46, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v57, vcc, 16, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v46, v3, v1
+; GCN-NEXT:    v_add_i32_e32 v57, vcc, 16, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v56, vcc, 20, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v56, vcc, 20, v38
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, 24, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, 24, v38
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v48, vcc, 28, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v6, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v6, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, 28, v38
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v8, vcc, 32, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v10, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, 32, v38
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v14, vcc, 36, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v10, v1
+; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GCN-NEXT:    v_add_i32_e32 v16, vcc, 36, v38
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v18, vcc, 40, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v63, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v20, vcc, 40, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GCN-NEXT:    v_add_i32_e32 v22, vcc, 44, v50
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v60, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v11, vcc, 44, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v63, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v24, vcc, 48, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v9, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v26, vcc, 48, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v60, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v28, vcc, 52, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v28, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v30, vcc, 52, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v30, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v31, vcc, 56, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v31, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v34, vcc, 56, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v61, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v37, vcc, 60, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v61, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v37, vcc, 60, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v7, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v39, vcc, 64, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v18, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v39, vcc, 64, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v62, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v49, vcc, 0x44, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v62, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v49, vcc, 0x44, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v5, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v51, vcc, 0x48, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v14, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v51, vcc, 0x48, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v12, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v52, v12, v0
-; GCN-NEXT:    v_add_i32_e32 v53, vcc, 0x4c, v50
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v52, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v53, vcc, 0x4c, v38
+; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v12, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v54, v12, v0
-; GCN-NEXT:    v_add_i32_e32 v55, vcc, 0x50, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v32
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v11, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v1, v11, v0
-; GCN-NEXT:    v_add_i32_e32 v40, vcc, 0x54, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v20
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v54, v10, v1
+; GCN-NEXT:    v_add_i32_e32 v55, vcc, 0x50, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v1, v17
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v11, v9
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v11, v11, v0
-; GCN-NEXT:    v_add_i32_e32 v41, vcc, 0x58, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v26
-; GCN-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v7, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT:    v_or_b32_e32 v1, v7, v1
+; GCN-NEXT:    v_add_i32_e32 v40, vcc, 0x54, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v7, v32
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v12, v9
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v15, v12, v0
-; GCN-NEXT:    v_add_i32_e32 v42, vcc, 0x5c, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v6
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v7, 16, v7
+; GCN-NEXT:    v_or_b32_e32 v7, v10, v7
+; GCN-NEXT:    v_add_i32_e32 v41, vcc, 0x58, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v50
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cvt_f16_f32_e32 v6, v6
+; GCN-NEXT:    v_cvt_f16_f32_e32 v13, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
+; GCN-NEXT:    v_or_b32_e32 v13, v13, v10
+; GCN-NEXT:    v_add_i32_e32 v42, vcc, 0x5c, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cvt_f16_f32_e32 v10, v5
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v19, v6, v0
-; GCN-NEXT:    v_add_i32_e32 v43, vcc, 0x60, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v38
-; GCN-NEXT:    v_cvt_f16_f32_e32 v6, v27
+; GCN-NEXT:    v_or_b32_e32 v17, v10, v0
+; GCN-NEXT:    v_add_i32_e32 v43, vcc, 0x60, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v24
+; GCN-NEXT:    v_cvt_f16_f32_e32 v5, v29
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v9, v6, v0
-; GCN-NEXT:    v_add_i32_e32 v23, vcc, 0x64, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v33
+; GCN-NEXT:    v_or_b32_e32 v5, v5, v0
+; GCN-NEXT:    v_add_i32_e32 v21, vcc, 0x64, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v48
 ; GCN-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v13, v2, v0
-; GCN-NEXT:    v_add_i32_e32 v27, vcc, 0x68, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v16
-; GCN-NEXT:    v_cvt_f16_f32_e32 v2, v4
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v0
+; GCN-NEXT:    v_add_i32_e32 v25, vcc, 0x68, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v22
+; GCN-NEXT:    v_cvt_f16_f32_e32 v4, v4
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v17, v2, v0
-; GCN-NEXT:    v_add_i32_e32 v32, vcc, 0x6c, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v21
-; GCN-NEXT:    v_cvt_f16_f32_e32 v2, v10
+; GCN-NEXT:    v_or_b32_e32 v15, v4, v0
+; GCN-NEXT:    v_add_i32_e32 v29, vcc, 0x6c, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v19
+; GCN-NEXT:    v_cvt_f16_f32_e32 v4, v8
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v21, v2, v0
-; GCN-NEXT:    v_add_i32_e32 v33, vcc, 0x70, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v25
-; GCN-NEXT:    v_cvt_f16_f32_e32 v2, v34
+; GCN-NEXT:    v_or_b32_e32 v19, v4, v0
+; GCN-NEXT:    v_add_i32_e32 v32, vcc, 0x70, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v27
+; GCN-NEXT:    v_cvt_f16_f32_e32 v4, v33
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v25, v2, v0
-; GCN-NEXT:    v_add_i32_e32 v34, vcc, 0x74, v50
-; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v36
-; GCN-NEXT:    v_cvt_f16_f32_e32 v2, v35
+; GCN-NEXT:    v_or_b32_e32 v23, v4, v0
+; GCN-NEXT:    v_add_i32_e32 v33, vcc, 0x74, v38
+; GCN-NEXT:    v_cvt_f16_f32_e32 v0, v35
+; GCN-NEXT:    v_cvt_f16_f32_e32 v4, v36
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT:    v_or_b32_e32 v29, v2, v0
-; GCN-NEXT:    v_add_i32_e32 v36, vcc, 0x78, v50
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, 0x7c, v50
+; GCN-NEXT:    v_or_b32_e32 v27, v4, v0
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, 0x78, v38
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, 0x7c, v38
 ; GCN-NEXT:    buffer_store_dword v45, v59, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v44, v58, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v47, v57, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v46, v56, s[0:3], 0 offen
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v3, s[0:3], 0 offen
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v48, s[0:3], 0 offen
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v8, s[0:3], 0 offen
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_store_dword v4, v3, s[0:3], 0 offen
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v14, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v3, v6, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v18, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v3, v12, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v22, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v3, v16, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v2, v24, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v63, v28, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v60, v31, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v30, v37, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v3, v20, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v63, v11, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v60, v26, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v9, v30, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v28, v34, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v31, v37, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v61, v39, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v7, v49, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v18, v49, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v62, v51, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v5, v53, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v14, v53, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v52, v55, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v54, v40, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_store_dword v1, v41, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v11, v42, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v15, v43, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v19, v23, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v9, v27, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v13, v32, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v17, v33, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v21, v34, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v25, v36, s[0:3], 0 offen
-; GCN-NEXT:    buffer_store_dword v29, v0, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v7, v42, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v13, v43, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v17, v21, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v5, v25, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v2, v29, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v15, v32, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v19, v33, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v23, v35, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v27, v0, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
@@ -90059,11 +90062,12 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; VI-NEXT:    v_add_u16_e32 v22, 3, v22
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_sdwa v36, v23, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; VI-NEXT:    v_add_u16_sdwa v36, v36, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_add_u16_e32 v22, 3, v22
+; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_or_b32_sdwa v24, v23, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_add_u16_e32 v7, 3, v7
@@ -91285,15 +91289,16 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
 ; GFX9-NEXT:    v_add_u16_e32 v27, 3, v27
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_or_b32_sdwa v39, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; GFX9-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_add_u16_e32 v7, 0x300, v7
 ; GFX9-NEXT:    v_add_u16_e32 v25, 0x300, v39
-; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_add_u16_e32 v27, 3, v27
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_or_b32_sdwa v48, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
@@ -101926,11 +101931,12 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
 ; VI-NEXT:    v_add_u16_e32 v22, 3, v22
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_sdwa v36, v23, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
 ; VI-NEXT:    v_add_u16_sdwa v36, v36, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_add_u16_e32 v22, 3, v22
+; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_or_b32_sdwa v24, v23, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_add_u16_e32 v7, 3, v7
@@ -103152,15 +103158,16 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
 ; GFX9-NEXT:    v_add_u16_e32 v27, 3, v27
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_or_b32_sdwa v39, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; GFX9-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_add_u16_e32 v7, 0x300, v7
 ; GFX9-NEXT:    v_add_u16_e32 v25, 0x300, v39
-; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_add_u16_e32 v27, 3, v27
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_or_b32_sdwa v48, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
@@ -107786,21 +107793,19 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v86, 0x40c00000, v86
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v36, v36, v37, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v37, v38, v39, 0x7fff
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v38, 0x400000, v39
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v36.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_add_f32 v87, 0x40c00000, v87 :: v_dual_lshlrev_b32 v12, 16, v12
-; GFX11-TRUE16-NEXT:    v_dual_add_f32 v96, 0x40c00000, v96 :: v_dual_cndmask_b32 v21, v37, v38
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v21, v37, v38, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v37, v48, v49, 0x7fff
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v38, 0x400000, v49
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v49, v49
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v48, 0x40c00000, v22
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v96, 0x40c00000, v96
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v14, 16, v14
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v15
 ; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v22, v37, v38 :: v_dual_add_f32 v49, 0x40c00000, v51
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v39, v50, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v38, 0x400000, v50
@@ -107810,7 +107815,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v37, v39, v50, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v39, v48, 16, 1
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v50, v49, 16, 1
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v98, 0x40c00000, v98
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v15
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v101, v14, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v37, v37, v38, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v39, v48, 0x7fff
@@ -107818,16 +107823,15 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v48, v48
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v101, v101, v14, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v112, 0x400000, v14
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v102, v98, 16, 1
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v114, 0x400000, v98
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v23, v38, v39, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_dual_add_f32 v98, 0x40c00000, v98 :: v_dual_cndmask_b32 v23, v38, v39
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v38, v50, v49, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v39, 0x400000, v49
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v50, 0x40c00000, v52
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v48, v51, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT:    v_add3_u32 v102, v102, v98, 0x7fff
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v102, v98, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v114, 0x400000, v98
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v49, v50, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v38, v38, v39, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v39, v48, v51, 0x7fff
@@ -107835,7 +107839,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v51, v51
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v25
 ; GFX11-TRUE16-NEXT:    v_dual_add_f32 v52, 0x40c00000, v24 :: v_dual_lshlrev_b32 v25, 16, v25
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_add3_u32 v102, v102, v98, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v24, v39, v48, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v39, v49, v50, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v48, 0x400000, v50
@@ -107965,8 +107969,8 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v64, v65, v66, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v65, 0x400000, v66
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-TRUE16-NEXT:    v_add_f32_e32 v70, 0x40c00000, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v66, 0x400000, v67
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v70, 0x40c00000, v1
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v64, v64, v65, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v65, v68, v67, 0x7fff
@@ -107977,8 +107981,8 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v64.l, v64.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v1, v65, v66, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v65, v68, v69, 0x7fff
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v66, 0x400000, v69
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v3
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v69, v2, 16, 1
@@ -108263,15 +108267,15 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v33, v34, v36, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v34, 0x400000, v36
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v37, v17, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v35, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v39, 0x40c00000, v18
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v34, v37, v35, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v39, 0x40c00000, v18
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v17, v33, v17, 0x7060302
@@ -108478,14 +108482,12 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v0, 0x40c00000, v0 :: v_dual_lshlrev_b32 v67, 16, v1
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v30, v54, v30, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v55, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v67, 0x40c00000, v67
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v55, v65, v66, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v66
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v65, v68, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v67, 0x40c00000, v67
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v66, v0, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v64, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v65, v68, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v68
@@ -113248,15 +113250,15 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v33, v34, v36, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v34, 0x400000, v36
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v37, v17, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v35, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v35
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v39, 0x40c00000, v18
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v34, v37, v35, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v39, 0x40c00000, v18
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v17, v33, v17, 0x7060302
@@ -113463,14 +113465,12 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v0, 0x40c00000, v0 :: v_dual_lshlrev_b32 v67, 16, v1
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v30, v54, v30, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v55, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v67, 0x40c00000, v67
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v55, v65, v66, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v66
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v65, v68, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v67, 0x40c00000, v67
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v66, v0, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v64, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v65, v68, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v68
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
index c0577b1c1a2b5..970027bc617e4 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
@@ -9533,22 +9533,23 @@ define <8 x i16> @bitcast_v8bf16_to_v8i16(<8 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v5, v11, v12 :: v_dual_add_f32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v7, v13, v1, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v3
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v11, v6, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v9, v9, v0, 0x7fff
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v1, v7, v8 :: v_dual_add_f32 v2, 0x40c00000, v2
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v7, 0x40c00000, v12
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v8, v11, v6, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, 0x400000, v6
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v12, v2, 16, 1
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v13, v7, 16, 1
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v14, v3, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, 0x400000, v2
+; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v5, v1, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v8, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v8, v12, v2, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v11, v13, v7, 0x7fff
@@ -9556,18 +9557,18 @@ define <8 x i16> @bitcast_v8bf16_to_v8i16(<8 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v13, v14, v3, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, 0x400000, v3
-; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v5, v1, 0x7060302
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v11, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v13, v14, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v3, v7, v3, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v8, v15, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v2, v6, v2, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v10, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v0, v4, v0, 0x7060302
 ; GFX11-FAKE16-NEXT:  .LBB47_2: ; %end
 ; GFX11-FAKE16-NEXT:    s_or_b32 exec_lo, exec_lo, s0
@@ -11092,22 +11093,23 @@ define <8 x half> @bitcast_v8bf16_to_v8f16(<8 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v5, v11, v12 :: v_dual_add_f32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v7, v13, v1, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v3
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v11, v6, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v9, v9, v0, 0x7fff
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v1, v7, v8 :: v_dual_add_f32 v2, 0x40c00000, v2
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v7, 0x40c00000, v12
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v8, v11, v6, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, 0x400000, v6
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v12, v2, 16, 1
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v13, v7, 16, 1
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v14, v3, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, 0x400000, v2
+; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v5, v1, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v8, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v8, v12, v2, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v11, v13, v7, 0x7fff
@@ -11115,18 +11117,18 @@ define <8 x half> @bitcast_v8bf16_to_v8f16(<8 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v13, v14, v3, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, 0x400000, v3
-; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v5, v1, 0x7060302
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v11, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v13, v14, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v3, v7, v3, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v8, v15, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v2, v6, v2, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v10, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v0, v4, v0, 0x7060302
 ; GFX11-FAKE16-NEXT:  .LBB51_2: ; %end
 ; GFX11-FAKE16-NEXT:    s_or_b32 exec_lo, exec_lo, s0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index cc32c19b267bf..9137900b1b53b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -20552,19 +20552,19 @@ define <32 x i8> @bitcast_v16bf16_to_v32i8(<16 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v9, v15, v10, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, 0x400000, v10
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v10, v19, v14, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v20, v9, v15 :: v_dual_add_f32 v9, 0x40c00000, v17
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v17, 0xffff0000, v33
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v18, 0x400000, v12
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v12, v12
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, 0x400000, v14
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v16, v18, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v12, v16, v18 :: v_dual_and_b32 v17, 0xffff0000, v33
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v16, v9, 16, 1
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v18, 16, v32
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v14, v10, v15 :: v_dual_add_f32 v15, 0x40c00000, v17
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v17, 0x40c00000, v18
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v16, v16, v9, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v18, 0x400000, v9
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index 5f21bdc09a15d..735f3eeb8afbb 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -3506,15 +3506,15 @@ define <16 x i32> @bitcast_v32bf16_to_v16i32(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v18, v22, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v17, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_add_f32 v19, 0x40c00000, v21
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v19, 0x40c00000, v21
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v22, v18, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v16, v19, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v20, 0x400000, v19
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
@@ -11012,15 +11012,15 @@ define <16 x float> @bitcast_v32bf16_to_v16f32(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v18, v22, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v17, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_add_f32 v19, 0x40c00000, v21
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v19, 0x40c00000, v21
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v22, v18, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v16, v19, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v20, 0x400000, v19
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
@@ -18286,15 +18286,15 @@ define <8 x i64> @bitcast_v32bf16_to_v8i64(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v18, v22, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v17, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_add_f32 v19, 0x40c00000, v21
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v19, 0x40c00000, v21
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v22, v18, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v16, v19, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v20, 0x400000, v19
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
@@ -25220,15 +25220,15 @@ define <8 x double> @bitcast_v32bf16_to_v8f64(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v18, v22, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v8, v17, 0x7060302
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_add_f32 v19, 0x40c00000, v21
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v19, 0x40c00000, v21
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v22, v18, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v16, v19, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v20, 0x400000, v19
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
@@ -31733,44 +31733,44 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v11
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v24, 0x40c00000, v24 :: v_dual_lshlrev_b32 v25, 16, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v21, v17, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v19, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v22, 0x400000, v16
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v25, 0x40c00000, v25
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v21, v21, v17, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v19, v16, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_lshlrev_b32 v27, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v16, v19, v22, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v25, 0x40c00000, v25 :: v_dual_add_f32 v6, 0x40c00000, v6
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v16, v19, v22 :: v_dual_lshlrev_b32 v27, 16, v8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v19, 0x400000, v17
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v22, 16, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_add_f32 v27, 0x40c00000, v27
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v0, 0x40c00000, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v27, 0x40c00000, v27
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_lshlrev_b32 v29, 16, v10
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v20, v0, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v23, 0x400000, v0
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_add_f32 v29, 0x40c00000, v29
-; GFX11-FAKE16-NEXT:    v_add3_u32 v20, v20, v0, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-FAKE16-NEXT:    v_add3_u32 v20, v20, v0, 0x7fff
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v29, 0x40c00000, v29
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v30, 0x40c00000, v30 :: v_dual_lshlrev_b32 v31, 16, v12
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v20, v23 :: v_dual_lshlrev_b32 v23, 16, v4
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v20, v1, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v10, 0x40c00000, v10 :: v_dual_add_f32 v23, 0x40c00000, v23
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v23, 0x40c00000, v23
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v0, v0, v16, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v21, v19, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v20, v1, 0x7fff
@@ -31788,12 +31788,12 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v21, v18, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v20, 0x400000, v18
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v33, 0x400000, v31
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v34, v12, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v21, v18, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v21, v2, 16, 1
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v34, v12, 16, 1
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v1, v17, 0x7060302
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v18, v19, v20 :: v_dual_and_b32 v7, 0xffff0000, v7
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v21, v2, 0x7fff
@@ -38354,44 +38354,44 @@ define <32 x half> @bitcast_v32bf16_to_v32f16(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v11
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v24, 0x40c00000, v24 :: v_dual_lshlrev_b32 v25, 16, v6
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v21, v17, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v19, v16, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v22, 0x400000, v16
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v25, 0x40c00000, v25
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v21, v21, v17, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v19, v16, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_lshlrev_b32 v27, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v16, v19, v22, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v25, 0x40c00000, v25 :: v_dual_add_f32 v6, 0x40c00000, v6
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v16, v19, v22 :: v_dual_lshlrev_b32 v27, 16, v8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v19, 0x400000, v17
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v22, 16, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_add_f32 v27, 0x40c00000, v27
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v0, 0x40c00000, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v27, 0x40c00000, v27
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT:    v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_lshlrev_b32 v29, 16, v10
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v20, v0, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v23, 0x400000, v0
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-FAKE16-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_add_f32 v29, 0x40c00000, v29
-; GFX11-FAKE16-NEXT:    v_add3_u32 v20, v20, v0, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-FAKE16-NEXT:    v_add3_u32 v20, v20, v0, 0x7fff
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v29, 0x40c00000, v29
 ; GFX11-FAKE16-NEXT:    v_dual_add_f32 v30, 0x40c00000, v30 :: v_dual_lshlrev_b32 v31, 16, v12
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v20, v23 :: v_dual_lshlrev_b32 v23, 16, v4
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v20, v1, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-FAKE16-NEXT:    v_dual_add_f32 v10, 0x40c00000, v10 :: v_dual_add_f32 v23, 0x40c00000, v23
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v23, 0x40c00000, v23
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v0, v0, v16, 0x7060302
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v17, v21, v19, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v20, v1, 0x7fff
@@ -38409,12 +38409,12 @@ define <32 x half> @bitcast_v32bf16_to_v32f16(<32 x bfloat> %a, i32 %b) {
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v21, v18, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v20, 0x400000, v18
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-FAKE16-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v33, 0x400000, v31
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v34, v12, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v21, v18, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v21, v2, 16, 1
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v34, v12, 16, 1
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v1, v17, 0x7060302
+; GFX11-FAKE16-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v18, v19, v20 :: v_dual_and_b32 v7, 0xffff0000, v7
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v19, v21, v2, 0x7fff
@@ -45369,27 +45369,28 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v20, v9, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v23, 0x400000, v9
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v22, 0xffff0000, v14
-; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v65, v19, v25 :: v_dual_lshlrev_b32 v14, 16, v14
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v7.l, v52.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v65, v19, v25, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v12, v12
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v20, v20, v9, 0x7fff
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v7.l, v52.h
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v14, 16, v14
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v19, v21, 16, 1
-; GFX11-TRUE16-NEXT:    v_dual_add_f32 v14, 0x40c00000, v14 :: v_dual_lshlrev_b32 v11, 16, v11
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v53, v24, v50, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT:    v_add_f32_e32 v14, 0x40c00000, v14
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v71, 24, v10
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v80, 8, v10
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v25, v14, 16, 1
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v12, 0xffff, v7, v53
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v9, v20, v23, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v7, 0x40c00000, v11
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v11, v19, v21, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v19, 0x400000, v21
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v20.l, v65.h
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v25, v14, 16, 1
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v23, v7, 16, 1
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v20.l, v65.h
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v64, 24, v12
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v66, 8, v12
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v11, v11, v19, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v19, 0x40c00000, v22
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v21, v23, v7, 0x7fff
@@ -45417,7 +45418,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v68, v23, v24, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v23, v13, 16, 1
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v83, 8, v9
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v66, 8, v12
 ; GFX11-TRUE16-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v22.l, v68.h
 ; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v7, v14, v19 :: v_dual_add_f32 v14, 0x40c00000, v21
@@ -45459,7 +45460,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v16, 0xffff, v19, v82
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v54, 8, v14
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v69, 8, v11
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v83, 8, v9
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v15, 0xffff, v15, v13
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v13, 0xffff, v21, v7
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v7, 0xffff, v18, v17
@@ -46265,41 +46266,53 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(4)
 ; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:96
-; GCN-NEXT:    s_waitcnt expcnt(2)
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:80
-; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:76
-; GCN-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:72
-; GCN-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:68
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:64
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:88
+; GCN-NEXT:    s_waitcnt expcnt(3)
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:80
+; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:76
+; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:72
+; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:68
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:64
 ; GCN-NEXT:    s_waitcnt expcnt(1)
 ; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:60
-; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:56
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:52
-; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:48
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:56
+; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:52
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:48
 ; GCN-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:44
-; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:40
-; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:36
-; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:32
-; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:28
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:24
-; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:20
-; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:16
-; GCN-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:12
-; GCN-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:8
-; GCN-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:4
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(1)
+; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:40
+; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:36
+; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:132
+; GCN-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:128
+; GCN-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:124
+; GCN-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:120
+; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:116
+; GCN-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:112
+; GCN-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:108
+; GCN-NEXT:    s_waitcnt vmcnt(6)
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v18
+; GCN-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:32
+; GCN-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:28
+; GCN-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:24
+; GCN-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:20
+; GCN-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:16
+; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:12
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:8
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:4
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v5
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(1)
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v7
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v7
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v11
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(1)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v13
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(1)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v15
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
@@ -46317,53 +46330,42 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v29
 ; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(1)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:132
-; GCN-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:128
-; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:124
-; GCN-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:120
-; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:116
-; GCN-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:112
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:108
-; GCN-NEXT:    s_waitcnt vmcnt(6)
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v32
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt vmcnt(14) expcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v38
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v37
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GCN-NEXT:    s_waitcnt expcnt(1)
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v36
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v32
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v31
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v31
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v26
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(1)
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v30
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v25
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v26
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v14
 ; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 8, v25
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(1)
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:88
-; GCN-NEXT:    v_lshlrev_b32_e32 v5, 24, v24
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v5, 24, v10
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:84
-; GCN-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NEXT:    v_lshlrev_b32_e32 v26, 8, v3
+; GCN-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:84
+; GCN-NEXT:    v_lshlrev_b32_e32 v58, 8, v24
 ; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:92
+; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v17
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:104
 ; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:100
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v13
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GCN-NEXT:    v_lshlrev_b32_e32 v30, 8, v11
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v35
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v26, 8, v34
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v7
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 24, v33
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
 ; GCN-NEXT:    ; implicit-def: $vgpr43
 ; GCN-NEXT:    ; implicit-def: $vgpr35
 ; GCN-NEXT:    ; implicit-def: $vgpr49
@@ -46406,12 +46408,10 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v3, v3, v2
 ; GCN-NEXT:    v_and_b32_e32 v4, 0xff, v12
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v4, v2
-; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GCN-NEXT:    v_or_b32_e32 v41, v4, v2
 ; GCN-NEXT:    v_and_b32_e32 v5, 0xff, v20
-; GCN-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v11, v5, v2
@@ -46457,29 +46457,29 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v32, 0xff, v2
-; GCN-NEXT:    v_and_b32_e32 v33, 0xff, v62
-; GCN-NEXT:    v_lshlrev_b32_e32 v8, 24, v61
+; GCN-NEXT:    v_and_b32_e32 v33, 0xff, v22
+; GCN-NEXT:    v_lshlrev_b32_e32 v8, 24, v18
+; GCN-NEXT:    v_and_b32_e32 v30, 0xff, v30
 ; GCN-NEXT:    v_and_b32_e32 v34, 0xff, v60
-; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v56
-; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v44
-; GCN-NEXT:    v_and_b32_e32 v37, 0xff, v47
-; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v45
-; GCN-NEXT:    v_and_b32_e32 v38, 0xff, v63
-; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v46
-; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v6
-; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v59
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v58
-; GCN-NEXT:    v_and_b32_e32 v50, 0xff, v57
-; GCN-NEXT:    v_and_b32_e32 v51, 0xff, v10
+; GCN-NEXT:    v_and_b32_e32 v35, 0xff, v59
+; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v46
+; GCN-NEXT:    v_lshlrev_b32_e32 v9, 24, v44
+; GCN-NEXT:    v_and_b32_e32 v37, 0xff, v63
+; GCN-NEXT:    v_and_b32_e32 v38, 0xff, v45
+; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v6
+; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v57
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 24, v56
+; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v47
+; GCN-NEXT:    v_and_b32_e32 v50, 0xff, v14
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xff, v25
 ; GCN-NEXT:    v_and_b32_e32 v24, 0xff, v24
-; GCN-NEXT:    v_lshlrev_b32_e32 v10, 24, v17
-; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v22
-; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v14
-; GCN-NEXT:    v_and_b32_e32 v54, 0xff, v18
-; GCN-NEXT:    v_lshlrev_b32_e32 v55, 16, v7
-; GCN-NEXT:    v_lshlrev_b32_e32 v40, 16, v13
-; GCN-NEXT:    v_lshlrev_b32_e32 v41, 16, v15
+; GCN-NEXT:    v_lshlrev_b32_e32 v14, 24, v17
+; GCN-NEXT:    v_and_b32_e32 v51, 0xff, v62
+; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v10
+; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v61
+; GCN-NEXT:    v_lshlrev_b32_e32 v54, 16, v7
+; GCN-NEXT:    v_lshlrev_b32_e32 v55, 16, v13
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 16, v15
 ; GCN-NEXT:    v_lshlrev_b32_e32 v21, 16, v19
 ; GCN-NEXT:    v_lshlrev_b32_e32 v22, 16, v20
 ; GCN-NEXT:    v_lshlrev_b32_e32 v7, 16, v23
@@ -46488,42 +46488,40 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v13, 16, v28
 ; GCN-NEXT:    v_lshlrev_b32_e32 v15, 16, v29
 ; GCN-NEXT:    v_lshlrev_b32_e32 v18, 16, v31
-; GCN-NEXT:    v_lshlrev_b32_e32 v14, 16, v32
+; GCN-NEXT:    v_lshlrev_b32_e32 v10, 16, v32
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v33
-; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v34
+; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v30
 ; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v23, v35, v6
-; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v36
-; GCN-NEXT:    v_lshlrev_b32_e32 v28, 16, v37
-; GCN-NEXT:    v_lshlrev_b32_e32 v29, 16, v38
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v23, v34, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v27, 16, v35
+; GCN-NEXT:    v_lshlrev_b32_e32 v28, 16, v36
+; GCN-NEXT:    v_lshlrev_b32_e32 v29, 16, v37
+; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v31, v39, v6
+; GCN-NEXT:    v_or_b32_e32 v30, v38, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v31, 16, v39
 ; GCN-NEXT:    v_lshlrev_b32_e32 v42, 16, v48
 ; GCN-NEXT:    v_lshlrev_b32_e32 v44, 16, v49
-; GCN-NEXT:    v_lshlrev_b32_e32 v45, 16, v50
-; GCN-NEXT:    v_or_b32_e32 v26, v51, v26
+; GCN-NEXT:    v_or_b32_e32 v45, v50, v58
 ; GCN-NEXT:    v_lshlrev_b32_e32 v25, 16, v25
 ; GCN-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v52
-; GCN-NEXT:    v_or_b32_e32 v30, v53, v30
-; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v54
-; GCN-NEXT:    v_or_b32_e32 v43, v5, v55
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GCN-NEXT:    v_lshlrev_b32_e32 v46, 16, v51
+; GCN-NEXT:    v_or_b32_e32 v26, v52, v26
+; GCN-NEXT:    v_lshlrev_b32_e32 v47, 16, v53
+; GCN-NEXT:    v_or_b32_e32 v43, v5, v54
+; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v35, v5, v40
+; GCN-NEXT:    v_or_b32_e32 v35, v5, v55
 ; GCN-NEXT:    v_lshlrev_b32_e32 v49, 16, v3
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v3, v3, v41
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v40
 ; GCN-NEXT:    v_or_b32_e32 v33, v0, v21
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v5, v0, v22
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_lshlrev_b32_e32 v39, 16, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v39, 16, v41
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v7, v0, v7
@@ -46540,39 +46538,39 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v13, v0, v18
 ; GCN-NEXT:    v_lshlrev_b32_e32 v34, 16, v12
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v15, v0, v14
+; GCN-NEXT:    v_or_b32_e32 v15, v0, v10
 ; GCN-NEXT:    v_or_b32_e32 v36, v8, v16
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v37, v0, v17
 ; GCN-NEXT:    v_lshlrev_b32_e32 v38, 16, v23
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v19, v0, v27
 ; GCN-NEXT:    v_or_b32_e32 v48, v9, v28
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v21, v0, v29
-; GCN-NEXT:    v_lshlrev_b32_e32 v50, 16, v31
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GCN-NEXT:    v_lshlrev_b32_e32 v50, 16, v30
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v23, v0, v42
-; GCN-NEXT:    v_or_b32_e32 v52, v2, v44
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v23, v0, v31
+; GCN-NEXT:    v_or_b32_e32 v52, v2, v42
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v53, v0, v45
-; GCN-NEXT:    v_lshlrev_b32_e32 v54, 16, v26
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v53, v0, v44
+; GCN-NEXT:    v_lshlrev_b32_e32 v54, 16, v45
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v27, v0, v25
-; GCN-NEXT:    v_or_b32_e32 v40, v10, v24
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GCN-NEXT:    v_or_b32_e32 v40, v14, v24
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v29, v0, v46
-; GCN-NEXT:    v_lshlrev_b32_e32 v42, 16, v30
-; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GCN-NEXT:    v_lshlrev_b32_e32 v42, 16, v26
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v31, v0, v47
 ; GCN-NEXT:    ; implicit-def: $vgpr0
@@ -46606,26 +46604,32 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; implicit-def: $vgpr28
 ; GCN-NEXT:    ; implicit-def: $vgpr2
 ; GCN-NEXT:    ; kill: killed $vgpr2
-; GCN-NEXT:    ; implicit-def: $vgpr62
-; GCN-NEXT:    ; implicit-def: $vgpr61
+; GCN-NEXT:    ; implicit-def: $vgpr22
+; GCN-NEXT:    ; implicit-def: $vgpr18
+; GCN-NEXT:    ; implicit-def: $vgpr30
 ; GCN-NEXT:    ; implicit-def: $vgpr60
-; GCN-NEXT:    ; implicit-def: $vgpr56
+; GCN-NEXT:    ; implicit-def: $vgpr59
+; GCN-NEXT:    ; implicit-def: $vgpr46
 ; GCN-NEXT:    ; implicit-def: $vgpr44
-; GCN-NEXT:    ; implicit-def: $vgpr47
-; GCN-NEXT:    ; implicit-def: $vgpr45
 ; GCN-NEXT:    ; implicit-def: $vgpr63
-; GCN-NEXT:    ; implicit-def: $vgpr46
+; GCN-NEXT:    ; implicit-def: $vgpr45
 ; GCN-NEXT:    ; implicit-def: $vgpr6
-; GCN-NEXT:    ; implicit-def: $vgpr59
-; GCN-NEXT:    ; implicit-def: $vgpr58
 ; GCN-NEXT:    ; implicit-def: $vgpr57
-; GCN-NEXT:    ; implicit-def: $vgpr10
+; GCN-NEXT:    ; implicit-def: $vgpr56
+; GCN-NEXT:    ; implicit-def: $vgpr47
+; GCN-NEXT:    ; implicit-def: $vgpr14
 ; GCN-NEXT:    ; implicit-def: $vgpr25
 ; GCN-NEXT:    ; implicit-def: $vgpr24
 ; GCN-NEXT:    ; implicit-def: $vgpr17
-; GCN-NEXT:    ; implicit-def: $vgpr22
-; GCN-NEXT:    ; implicit-def: $vgpr14
-; GCN-NEXT:    ; implicit-def: $vgpr18
+; GCN-NEXT:    ; implicit-def: $vgpr62
+; GCN-NEXT:    ; implicit-def: $vgpr10
+; GCN-NEXT:    ; implicit-def: $vgpr61
+; GCN-NEXT:    ; implicit-def: $vgpr2
+; GCN-NEXT:    ; kill: killed $vgpr2
+; GCN-NEXT:    ; implicit-def: $vgpr2
+; GCN-NEXT:    ; kill: killed $vgpr2
+; GCN-NEXT:    ; implicit-def: $vgpr2
+; GCN-NEXT:    ; kill: killed $vgpr2
 ; GCN-NEXT:    ; implicit-def: $vgpr26
 ; GCN-NEXT:    ; kill: killed $vgpr26
 ; GCN-NEXT:    ; implicit-def: $vgpr2
@@ -46658,18 +46662,12 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    ; kill: killed $vgpr26
 ; GCN-NEXT:    ; implicit-def: $vgpr26
 ; GCN-NEXT:    ; kill: killed $vgpr26
-; GCN-NEXT:    ; implicit-def: $vgpr2
-; GCN-NEXT:    ; kill: killed $vgpr2
+; GCN-NEXT:    ; implicit-def: $vgpr58
 ; GCN-NEXT:    ; implicit-def: $vgpr26
 ; GCN-NEXT:    ; kill: killed $vgpr26
 ; GCN-NEXT:    ; implicit-def: $vgpr26
 ; GCN-NEXT:    ; kill: killed $vgpr26
 ; GCN-NEXT:    ; implicit-def: $vgpr26
-; GCN-NEXT:    ; implicit-def: $vgpr30
-; GCN-NEXT:    ; kill: killed $vgpr30
-; GCN-NEXT:    ; implicit-def: $vgpr30
-; GCN-NEXT:    ; kill: killed $vgpr30
-; GCN-NEXT:    ; implicit-def: $vgpr30
 ; GCN-NEXT:    ; implicit-def: $vgpr2
 ; GCN-NEXT:    ; kill: killed $vgpr2
 ; GCN-NEXT:  .LBB55_2: ; %Flow
@@ -46677,37 +46675,37 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_cbranch_execz .LBB55_4
 ; GCN-NEXT:  ; %bb.3: ; %cmp.true
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, 3, v14
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, 3, v10
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xff, v3
-; GCN-NEXT:    v_or_b32_e32 v3, v30, v3
+; GCN-NEXT:    v_or_b32_e32 v3, v26, v3
 ; GCN-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, 3, v24
 ; GCN-NEXT:    v_lshlrev_b32_e32 v7, 8, v17
 ; GCN-NEXT:    v_and_b32_e32 v5, 0xff, v5
 ; GCN-NEXT:    v_or_b32_e32 v5, v7, v5
-; GCN-NEXT:    v_add_i32_e32 v7, vcc, 3, v10
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 3, v14
 ; GCN-NEXT:    v_and_b32_e32 v7, 0xff, v7
-; GCN-NEXT:    v_or_b32_e32 v7, v26, v7
-; GCN-NEXT:    v_add_i32_e32 v10, vcc, 3, v59
-; GCN-NEXT:    v_lshlrev_b32_e32 v11, 8, v58
+; GCN-NEXT:    v_or_b32_e32 v7, v58, v7
+; GCN-NEXT:    v_add_i32_e32 v10, vcc, 3, v57
+; GCN-NEXT:    v_lshlrev_b32_e32 v11, 8, v56
 ; GCN-NEXT:    v_and_b32_e32 v10, 0xff, v10
 ; GCN-NEXT:    v_or_b32_e32 v10, v11, v10
 ; GCN-NEXT:    s_movk_i32 s7, 0x300
-; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v18
+; GCN-NEXT:    v_add_i32_e32 v34, vcc, 3, v61
 ; GCN-NEXT:    s_mov_b32 s6, 0x3000000
-; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v22
+; GCN-NEXT:    v_add_i32_e32 v35, vcc, 3, v62
 ; GCN-NEXT:    v_add_i32_e32 v36, vcc, 3, v25
-; GCN-NEXT:    v_add_i32_e32 v38, vcc, 3, v57
-; GCN-NEXT:    v_add_i32_e32 v15, vcc, 3, v46
+; GCN-NEXT:    v_add_i32_e32 v38, vcc, 3, v47
+; GCN-NEXT:    v_add_i32_e32 v15, vcc, 3, v45
 ; GCN-NEXT:    v_add_i32_e32 v17, vcc, 3, v6
-; GCN-NEXT:    v_add_i32_e32 v18, vcc, 3, v47
-; GCN-NEXT:    v_lshlrev_b32_e32 v14, 8, v45
-; GCN-NEXT:    v_add_i32_e32 v19, vcc, 3, v63
-; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v56
-; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v44
-; GCN-NEXT:    v_add_i32_e32 v23, vcc, 3, v62
-; GCN-NEXT:    v_lshlrev_b32_e32 v13, 8, v61
-; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v60
+; GCN-NEXT:    v_add_i32_e32 v19, vcc, 3, v46
+; GCN-NEXT:    v_lshlrev_b32_e32 v14, 8, v44
+; GCN-NEXT:    v_add_i32_e32 v21, vcc, 3, v63
+; GCN-NEXT:    v_add_i32_e32 v23, vcc, 3, v60
+; GCN-NEXT:    v_add_i32_e32 v24, vcc, 3, v59
+; GCN-NEXT:    v_add_i32_e32 v22, vcc, 3, v22
+; GCN-NEXT:    v_lshlrev_b32_e32 v13, 8, v18
+; GCN-NEXT:    v_add_i32_e32 v18, vcc, 3, v30
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, 3, v28
 ; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
@@ -46756,12 +46754,12 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_and_b32_e32 v36, 0xff, v38
 ; GCN-NEXT:    v_and_b32_e32 v38, 0xff, v15
 ; GCN-NEXT:    v_and_b32_e32 v39, 0xff, v17
-; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v18
-; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v19
-; GCN-NEXT:    v_and_b32_e32 v50, 0xff, v21
-; GCN-NEXT:    v_and_b32_e32 v51, 0xff, v22
-; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v23
-; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v24
+; GCN-NEXT:    v_and_b32_e32 v48, 0xff, v19
+; GCN-NEXT:    v_and_b32_e32 v49, 0xff, v21
+; GCN-NEXT:    v_and_b32_e32 v50, 0xff, v23
+; GCN-NEXT:    v_and_b32_e32 v51, 0xff, v24
+; GCN-NEXT:    v_and_b32_e32 v52, 0xff, v22
+; GCN-NEXT:    v_and_b32_e32 v53, 0xff, v18
 ; GCN-NEXT:    v_and_b32_e32 v25, 0xff, v25
 ; GCN-NEXT:    v_and_b32_e32 v26, 0xff, v26
 ; GCN-NEXT:    v_and_b32_e32 v27, 0xff, v27
@@ -46782,7 +46780,7 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v6, 16, v34
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 16, v35
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v36
-; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v29, v29, v38
 ; GCN-NEXT:    v_lshlrev_b32_e32 v30, 16, v39
@@ -46806,7 +46804,7 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v15, 16, v15
 ; GCN-NEXT:    v_or_b32_e32 v2, v2, v16
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 16, v17
-; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v12, v17, v12
 ; GCN-NEXT:    v_lshlrev_b32_e32 v17, 16, v18
@@ -46819,39 +46817,39 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v23
 ; GCN-NEXT:    v_lshlrev_b32_e32 v21, 16, v24
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, 0x300, v3
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v9, v22, v9
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, s7, v5
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v6, v22, v6
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, s7, v7
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v4, v22, v4
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, s7, v10
-; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v1, v22, v1
 ; GCN-NEXT:    v_add_i32_e32 v22, vcc, s7, v29
-; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v23, v23, v30
 ; GCN-NEXT:    v_add_i32_e32 v14, vcc, s7, v14
-; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v24, v24, v31
 ; GCN-NEXT:    v_add_i32_e32 v29, vcc, s7, v32
-; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v30, v30, v33
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, s7, v13
-; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v31, v31, v34
 ; GCN-NEXT:    v_add_i32_e32 v25, vcc, s7, v25
-; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v26, v32, v26
 ; GCN-NEXT:    v_add_i32_e32 v11, vcc, s7, v11
@@ -46875,11 +46873,11 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v18, v32, v18
 ; GCN-NEXT:    v_add_i32_e32 v19, vcc, s7, v19
-; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v20, v32, v20
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, s7, v0
-; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_or_b32_e32 v21, v32, v21
 ; GCN-NEXT:    v_and_b32_e32 v3, 0xffff, v3
@@ -46971,9 +46969,9 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
 ; GCN-NEXT:    v_mov_b32_e32 v6, v39
 ; GCN-NEXT:    v_mov_b32_e32 v8, v51
 ; GCN-NEXT:    v_mov_b32_e32 v9, v55
-; GCN-NEXT:    s_waitcnt vmcnt(6)
 ; GCN-NEXT:    v_mov_b32_e32 v10, v41
 ; GCN-NEXT:    v_mov_b32_e32 v12, v32
+; GCN-NEXT:    s_waitcnt vmcnt(6)
 ; GCN-NEXT:    v_mov_b32_e32 v14, v34
 ; GCN-NEXT:    v_mov_b32_e32 v16, v36
 ; GCN-NEXT:    s_waitcnt vmcnt(3)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
index 8ca3e8255b634..6e8a5a1266a15 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
@@ -937,8 +937,8 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_dont_realign_stack(i32 %idx) {
 ; GISEL-GFX11-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
 ; GISEL-GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v4, v0
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
 ; GISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GISEL-GFX11-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
index 4ba9f0729ea1f..2d4f7485c6576 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
@@ -590,8 +590,8 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_preserve_dont_realign_stac
 ; GISEL-GFX11-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
 ; GISEL-GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v4, v0
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
 ; GISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GISEL-GFX11-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index 9775a37276dfd..2d3a941b8a516 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -12122,11 +12122,11 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
 ; GFX1132_DPP-NEXT:    v_bfrev_b32_e32 v5, 1
 ; GFX1132_DPP-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX1132_DPP-NEXT:    v_cmp_gt_i64_e32 vcc_lo, v[1:2], v[3:4]
-; GFX1132_DPP-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc_lo
-; GFX1132_DPP-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX1132_DPP-NEXT:    v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX1132_DPP-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX1132_DPP-NEXT:    v_readlane_b32 s3, v2, 15
 ; GFX1132_DPP-NEXT:    v_readlane_b32 s1, v2, 31
-; GFX1132_DPP-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1132_DPP-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
 ; GFX1132_DPP-NEXT:    v_readlane_b32 s0, v1, 31
 ; GFX1132_DPP-NEXT:    v_mov_b32_dpp v5, v2 row_shr:1 row_mask:0xf bank_mask:0xf
 ; GFX1132_DPP-NEXT:    v_mov_b32_dpp v4, v1 row_shr:1 row_mask:0xf bank_mask:0xf
@@ -13950,11 +13950,11 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
 ; GFX1132_DPP-NEXT:    v_bfrev_b32_e32 v5, -2
 ; GFX1132_DPP-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX1132_DPP-NEXT:    v_cmp_lt_i64_e32 vcc_lo, v[1:2], v[3:4]
-; GFX1132_DPP-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc_lo
-; GFX1132_DPP-NEXT:    v_dual_mov_b32 v4, -1 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX1132_DPP-NEXT:    v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX1132_DPP-NEXT:    v_mov_b32_e32 v4, -1
 ; GFX1132_DPP-NEXT:    v_readlane_b32 s3, v2, 15
 ; GFX1132_DPP-NEXT:    v_readlane_b32 s1, v2, 31
-; GFX1132_DPP-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1132_DPP-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
 ; GFX1132_DPP-NEXT:    v_readlane_b32 s0, v1, 31
 ; GFX1132_DPP-NEXT:    v_mov_b32_dpp v5, v2 row_shr:1 row_mask:0xf bank_mask:0xf
 ; GFX1132_DPP-NEXT:    v_mov_b32_dpp v4, v1 row_shr:1 row_mask:0xf bank_mask:0xf
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index c4957fd44e2be..ff10e7d499f9b 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -1428,47 +1428,48 @@ define void @v_store_global_v32bf16(<32 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX7-LABEL: v_store_global_v32bf16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT:    v_alignbit_b32 v3, v3, v2, 16
-; GFX7-NEXT:    v_alignbit_b32 v2, v1, v0, 16
-; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v14
-; GFX7-NEXT:    buffer_load_dword v14, off, s[0:3], s32
 ; GFX7-NEXT:    v_mul_f32_e32 v25, 1.0, v25
 ; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v15
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v25, 16, v25
 ; GFX7-NEXT:    v_mul_f32_e32 v24, 1.0, v24
 ; GFX7-NEXT:    v_mul_f32_e32 v6, 1.0, v6
 ; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v5
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX7-NEXT:    v_alignbit_b32 v25, v25, v24, 16
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v24, 16, v5
 ; GFX7-NEXT:    v_alignbit_b32 v5, v7, v6, 16
-; GFX7-NEXT:    v_mul_f32_e32 v6, 1.0, v13
+; GFX7-NEXT:    buffer_load_dword v6, off, s[0:3], s32
+; GFX7-NEXT:    v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT:    v_alignbit_b32 v3, v3, v2, 16
+; GFX7-NEXT:    v_alignbit_b32 v2, v1, v0, 16
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v15
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v14
+; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v13
 ; GFX7-NEXT:    v_alignbit_b32 v13, v0, v1, 16
 ; GFX7-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:8
 ; GFX7-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:4
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v12
-; GFX7-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
-; GFX7-NEXT:    v_alignbit_b32 v12, v6, v7, 16
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v11
-; GFX7-NEXT:    v_mul_f32_e32 v10, 1.0, v10
-; GFX7-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
 ; GFX7-NEXT:    v_mul_f32_e32 v29, 1.0, v29
-; GFX7-NEXT:    v_alignbit_b32 v11, v7, v10, 16
+; GFX7-NEXT:    v_mul_f32_e32 v12, 1.0, v12
+; GFX7-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v29, 16, v29
 ; GFX7-NEXT:    v_mul_f32_e32 v28, 1.0, v28
 ; GFX7-NEXT:    v_mul_f32_e32 v27, 1.0, v27
-; GFX7-NEXT:    v_mul_f32_e32 v6, 1.0, v30
+; GFX7-NEXT:    v_alignbit_b32 v12, v7, v12, 16
+; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v30
+; GFX7-NEXT:    v_mul_f32_e32 v11, 1.0, v11
 ; GFX7-NEXT:    v_mul_f32_e32 v9, 1.0, v9
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v31, 16, v27
 ; GFX7-NEXT:    v_alignbit_b32 v27, v29, v28, 16
+; GFX7-NEXT:    v_mul_f32_e32 v10, 1.0, v10
+; GFX7-NEXT:    v_lshrrev_b32_e32 v11, 16, v11
+; GFX7-NEXT:    v_alignbit_b32 v11, v11, v10, 16
+; GFX7-NEXT:    v_mul_f32_e32 v14, 1.0, v20
 ; GFX7-NEXT:    v_mul_f32_e32 v26, 1.0, v26
 ; GFX7-NEXT:    s_mov_b32 s6, 0
 ; GFX7-NEXT:    v_alignbit_b32 v26, v31, v26, 16
@@ -1478,9 +1479,9 @@ define void @v_store_global_v32bf16(<32 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX7-NEXT:    s_mov_b32 s5, s6
 ; GFX7-NEXT:    v_alignbit_b32 v4, v24, v4, 16
 ; GFX7-NEXT:    s_waitcnt vmcnt(2)
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v14
-; GFX7-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GFX7-NEXT:    v_alignbit_b32 v28, v7, v6, 16
+; GFX7-NEXT:    v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
+; GFX7-NEXT:    v_alignbit_b32 v28, v6, v7, 16
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v6, 16, v9
 ; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v8
 ; GFX7-NEXT:    v_alignbit_b32 v10, v6, v7, 16
@@ -1493,7 +1494,6 @@ define void @v_store_global_v32bf16(<32 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
 ; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v18
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
-; GFX7-NEXT:    v_mul_f32_e32 v14, 1.0, v20
 ; GFX7-NEXT:    v_alignbit_b32 v7, v6, v7, 16
 ; GFX7-NEXT:    v_mul_f32_e32 v6, 1.0, v17
 ; GFX7-NEXT:    v_alignbit_b32 v8, v8, v14, 16
@@ -5378,15 +5378,14 @@ define { <32 x i32>, bfloat } @test_overflow_stack(bfloat %a, <32 x i32> %b) {
 ; GCN-LABEL: test_overflow_stack:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:8
 ; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
 ; GCN-NEXT:    s_waitcnt expcnt(0)
-; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8
-; GCN-NEXT:    v_add_i32_e32 v31, vcc, 0x7c, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0x7c, v0
 ; GCN-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:4
 ; GCN-NEXT:    buffer_load_dword v33, off, s[0:3], s32
-; GCN-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NEXT:    buffer_store_dword v2, v31, s[0:3], 0 offen
-; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    s_waitcnt vmcnt(3)
+; GCN-NEXT:    buffer_store_dword v31, v2, s[0:3], 0 offen
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0x78, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-NEXT:    buffer_store_dword v32, v2, s[0:3], 0 offen
@@ -5394,6 +5393,7 @@ define { <32 x i32>, bfloat } @test_overflow_stack(bfloat %a, <32 x i32> %b) {
 ; GCN-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-NEXT:    buffer_store_dword v33, v2, s[0:3], 0 offen
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0x70, v0
+; GCN-NEXT:    s_waitcnt expcnt(2)
 ; GCN-NEXT:    v_add_i32_e32 v31, vcc, 0x6c, v0
 ; GCN-NEXT:    buffer_store_dword v30, v2, s[0:3], 0 offen
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0x68, v0
@@ -11050,38 +11050,38 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v22, 16, v0
 ; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.l, v3.h
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v10, v19, v21 :: v_dual_lshlrev_b32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v10, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v19, 0x400000, v2
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_dual_add_f32 v9, v22, v21 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v16, v19, vcc_lo
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v16, v1, 16, 1
+; GFX11TRUE16-NEXT:    v_add_f32_e32 v9, v22, v21
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11TRUE16-NEXT:    v_add_f32_e32 v0, v0, v8
+; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v8, v9, 16, 1
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v16, v22, vcc_lo
 ; GFX11TRUE16-NEXT:    v_add_f32_e32 v17, v24, v23
+; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v24, 0x400000, v9
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v17, 16, 1
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v21, 0x400000, v17
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_add3_u32 v19, v23, v17, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v0, 16, 1
+; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v8.l, v8.h
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v9, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_add3_u32 v16, v23, v0, 0x7fff
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -12394,280 +12394,280 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-LABEL: v_fadd_v32bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32
-; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT:    v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
+; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v32, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX10-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v13
 ; GFX10-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT:    v_add_f32_e32 v12, v12, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v28, 16, v22
-; GFX10-NEXT:    v_add_f32_e32 v39, v48, v39
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v6
-; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    v_add_f32_e32 v11, v11, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v27, 16, v21
-; GFX10-NEXT:    v_add_f32_e32 v49, v50, v49
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v5
-; GFX10-NEXT:    v_add_f32_e32 v33, v34, v33
-; GFX10-NEXT:    v_add_f32_e32 v14, v14, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v30, 16, v24
-; GFX10-NEXT:    v_add_f32_e32 v35, v36, v35
+; GFX10-NEXT:    v_add_f32_e32 v31, v32, v31
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v12
+; GFX10-NEXT:    v_add_f32_e32 v30, v14, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v14, 16, v29
+; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_bfe_u32 v32, v31, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v31
+; GFX10-NEXT:    v_bfe_u32 v35, v30, 16, 1
+; GFX10-NEXT:    v_add_f32_e32 v33, v33, v14
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX10-NEXT:    v_add3_u32 v32, v32, v31, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX10-NEXT:    v_add3_u32 v31, v35, v30, 0x7fff
+; GFX10-NEXT:    v_add_f32_e32 v35, v13, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v13, 16, v28
+; GFX10-NEXT:    v_cndmask_b32_e32 v14, v32, v34, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v30
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v21
+; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v5
+; GFX10-NEXT:    v_add3_u32 v30, v34, v33, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v29, v31, v32, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_add_f32_e32 v34, v36, v13
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_add_f32_e32 v33, v12, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v12, 16, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v11
+; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_add_f32_e32 v35, v36, v12
+; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v10
+; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX10-NEXT:    v_cndmask_b32_e32 v28, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_add_f32_e32 v34, v11, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v11, 16, v26
+; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_add_f32_e32 v33, v36, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v9
+; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v27, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_add_f32_e32 v35, v10, v26
+; GFX10-NEXT:    v_lshlrev_b32_e32 v10, 16, v25
+; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_add_f32_e32 v34, v36, v10
+; GFX10-NEXT:    v_add_f32_e32 v9, v9, v25
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v8
-; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT:    v_add_f32_e32 v13, v13, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v29, 16, v23
-; GFX10-NEXT:    v_add_f32_e32 v37, v38, v37
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v26, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v24
+; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_lshlrev_b32_e32 v52, 16, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_add_f32_e32 v33, v36, v33
+; GFX10-NEXT:    v_add_f32_e32 v8, v8, v24
+; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v23
+; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v25, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v9, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    v_add_f32_e32 v6, v6, v22
-; GFX10-NEXT:    v_lshlrev_b32_e32 v22, 16, v16
-; GFX10-NEXT:    v_add_f32_e32 v27, v50, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v9
-; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT:    v_add_f32_e32 v8, v8, v24
-; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v18
-; GFX10-NEXT:    v_add_f32_e32 v29, v38, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v2
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_add_f32_e32 v24, v35, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v30, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v32, v9, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v9
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX10-NEXT:    v_add_f32_e32 v7, v7, v23
-; GFX10-NEXT:    v_lshlrev_b32_e32 v23, 16, v17
-; GFX10-NEXT:    v_add_f32_e32 v28, v48, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v1
-; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_bfe_u32 v23, v24, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v36, 0x400000, v24
+; GFX10-NEXT:    v_cmp_u_f32_e64 s4, v24, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v34, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v34, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v8
+; GFX10-NEXT:    v_bfe_u32 v35, v7, 16, 1
+; GFX10-NEXT:    v_add3_u32 v23, v23, v24, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e64 s5, v7, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v31, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v32, v34, v8, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v22
+; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
+; GFX10-NEXT:    v_add3_u32 v24, v35, v7, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_add_f32_e32 v8, v34, v8
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_add_f32_e32 v0, v0, v16
-; GFX10-NEXT:    v_bfe_u32 v16, v33, 16, 1
-; GFX10-NEXT:    v_add_f32_e32 v10, v10, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v26, 16, v20
-; GFX10-NEXT:    v_add_f32_e32 v34, v34, v51
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v4
+; GFX10-NEXT:    v_add_f32_e32 v6, v6, v22
+; GFX10-NEXT:    v_cndmask_b32_e32 v32, v32, v33, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s6, v8, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s7, v6, v6
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v15
+; GFX10-NEXT:    v_add3_u32 v7, v35, v8, 0x7fff
+; GFX10-NEXT:    v_add_f32_e32 v35, v38, v37
+; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v21
+; GFX10-NEXT:    v_bfe_u32 v37, v6, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v6
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v22, s6
+; GFX10-NEXT:    v_bfe_u32 v21, v35, 16, 1
+; GFX10-NEXT:    v_add_f32_e32 v5, v5, v8
+; GFX10-NEXT:    v_add3_u32 v37, v37, v6, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v20
 ; GFX10-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT:    v_add3_u32 v6, v21, v35, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v21, 16, v4
+; GFX10-NEXT:    v_bfe_u32 v48, v5, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT:    v_add_f32_e32 v9, v9, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v25, 16, v19
-; GFX10-NEXT:    v_add_f32_e32 v30, v36, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v3
+; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v35
+; GFX10-NEXT:    v_cmp_u_f32_e64 s8, v35, v35
+; GFX10-NEXT:    v_add_f32_e32 v8, v21, v8
+; GFX10-NEXT:    v_add3_u32 v21, v48, v5, 0x7fff
+; GFX10-NEXT:    v_add_f32_e32 v4, v4, v20
+; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v19
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v5
+; GFX10-NEXT:    v_bfe_u32 v20, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s9, v5, v5
+; GFX10-NEXT:    v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT:    v_add_f32_e32 v48, v49, v48
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v18
+; GFX10-NEXT:    v_add3_u32 v20, v20, v8, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s10, v8, v8
+; GFX10-NEXT:    v_add3_u32 v5, v5, v4, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT:    v_cmp_u_f32_e64 s11, v4, v4
+; GFX10-NEXT:    v_bfe_u32 v4, v48, 16, 1
+; GFX10-NEXT:    v_add_f32_e32 v49, v51, v49
+; GFX10-NEXT:    v_or_b32_e32 v51, 0x400000, v48
+; GFX10-NEXT:    v_cmp_u_f32_e64 s12, v48, v48
 ; GFX10-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_add3_u32 v4, v4, v48, 0x7fff
+; GFX10-NEXT:    v_bfe_u32 v48, v49, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s13, v49, v49
+; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
+; GFX10-NEXT:    v_add_f32_e32 v3, v3, v19
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v39, s8
+; GFX10-NEXT:    v_add3_u32 v19, v48, v49, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v49
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v17
 ; GFX10-NEXT:    v_add_f32_e32 v2, v2, v18
-; GFX10-NEXT:    v_add_f32_e32 v18, v48, v23
+; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v21, v35, s9
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v20, v50, s10
+; GFX10-NEXT:    v_add_f32_e32 v49, v52, v49
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v8, s11
 ; GFX10-NEXT:    v_add_f32_e32 v1, v1, v17
-; GFX10-NEXT:    v_add_f32_e32 v17, v50, v22
-; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v33
-; GFX10-NEXT:    v_bfe_u32 v23, v14, 16, 1
-; GFX10-NEXT:    v_add3_u32 v16, v16, v33, 0x7fff
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX10-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
-; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT:    v_add_f32_e32 v4, v4, v20
-; GFX10-NEXT:    v_add_f32_e32 v20, v36, v25
-; GFX10-NEXT:    v_add_f32_e32 v3, v3, v19
-; GFX10-NEXT:    v_add_f32_e32 v19, v38, v24
-; GFX10-NEXT:    v_or_b32_e32 v24, 0x400000, v14
-; GFX10-NEXT:    v_bfe_u32 v25, v35, 16, 1
-; GFX10-NEXT:    v_add3_u32 v23, v23, v14, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v22, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX10-NEXT:    v_add_f32_e32 v5, v5, v21
-; GFX10-NEXT:    v_add_f32_e32 v21, v51, v26
-; GFX10-NEXT:    v_or_b32_e32 v26, 0x400000, v35
-; GFX10-NEXT:    v_bfe_u32 v36, v13, 16, 1
-; GFX10-NEXT:    v_add3_u32 v25, v25, v35, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v23, v23, v24, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v13
-; GFX10-NEXT:    v_bfe_u32 v48, v37, 16, 1
-; GFX10-NEXT:    v_add3_u32 v36, v36, v13, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v37
-; GFX10-NEXT:    v_cndmask_b32_e32 v25, v25, v26, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX10-NEXT:    v_bfe_u32 v51, v12, 16, 1
-; GFX10-NEXT:    v_add3_u32 v48, v48, v37, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v12
-; GFX10-NEXT:    v_bfe_u32 v22, v39, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v36, v36, v38, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX10-NEXT:    v_add3_u32 v51, v51, v12, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v14, 0x400000, v39
-; GFX10-NEXT:    v_bfe_u32 v24, v11, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v39, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v48, v48, v50, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v11
-; GFX10-NEXT:    v_bfe_u32 v26, v49, 16, 1
-; GFX10-NEXT:    v_add3_u32 v24, v24, v11, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v13, 0x400000, v49
-; GFX10-NEXT:    v_cndmask_b32_e32 v33, v51, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX10-NEXT:    v_bfe_u32 v38, v10, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v49, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v37, 0x400000, v10
-; GFX10-NEXT:    v_bfe_u32 v50, v34, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, v22, v14, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX10-NEXT:    v_add3_u32 v38, v38, v10, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v34
-; GFX10-NEXT:    v_bfe_u32 v51, v9, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v34, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v24, v24, v35, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v9
-; GFX10-NEXT:    v_bfe_u32 v22, v30, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v9, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v11, 0x400000, v30
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v26, v13, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v30, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v49, 0x400000, v8
-; GFX10-NEXT:    v_bfe_u32 v26, v29, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v37, v38, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX10-NEXT:    v_add3_u32 v35, v35, v8, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v10, 0x400000, v29
-; GFX10-NEXT:    v_bfe_u32 v38, v7, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v29, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v50, v12, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
-; GFX10-NEXT:    v_bfe_u32 v50, v28, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v7, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v28
-; GFX10-NEXT:    v_cndmask_b32_e32 v39, v51, v39, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX10-NEXT:    v_bfe_u32 v51, v6, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v28, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v30, 0x400000, v6
-; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v22, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT:    v_bfe_u32 v22, v27, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v6, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v27
-; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v35, v35, v49, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX10-NEXT:    v_bfe_u32 v49, v5, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v27, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v29, 0x400000, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v26, v10, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT:    v_bfe_u32 v26, v21, 16, 1
-; GFX10-NEXT:    v_add3_u32 v49, v49, v5, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v7, 0x400000, v21
-; GFX10-NEXT:    v_cndmask_b32_e32 v34, v38, v34, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX10-NEXT:    v_bfe_u32 v38, v4, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v28, 0x400000, v4
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v50, v9, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT:    v_bfe_u32 v50, v20, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v4, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v20
-; GFX10-NEXT:    v_cndmask_b32_e32 v30, v51, v30, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX10-NEXT:    v_add3_u32 v50, v50, v20, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v51, v3, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v27, 0x400000, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v22, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT:    v_bfe_u32 v22, v19, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v19
-; GFX10-NEXT:    v_add3_u32 v51, v51, v3, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v29, v49, v29, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX10-NEXT:    v_add3_u32 v22, v22, v19, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v49, v2, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v21, 0x400000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v26, v7, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT:    v_bfe_u32 v26, v18, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v4, 0x400000, v18
-; GFX10-NEXT:    v_add3_u32 v49, v49, v2, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v28, v38, v28, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX10-NEXT:    v_bfe_u32 v38, v1, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v18, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v50, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX10-NEXT:    v_bfe_u32 v50, v17, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v1, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v22, v5, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX10-NEXT:    v_bfe_u32 v22, v0, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v17, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v18, 0x400000, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v26, v4, vcc_lo
+; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_bfe_u32 v18, v49, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v52, 0x400000, v49
+; GFX10-NEXT:    v_cmp_u_f32_e64 s14, v49, v49
+; GFX10-NEXT:    v_bfe_u32 v39, v1, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v1
+; GFX10-NEXT:    v_add3_u32 v18, v18, v49, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT:    v_add3_u32 v39, v39, v1, 0x7fff
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v38, v20, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v19, v48, s13
+; GFX10-NEXT:    v_add_f32_e32 v17, v49, v17
+; GFX10-NEXT:    v_add_f32_e32 v0, v0, v16
+; GFX10-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v39, v35, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v22, v2, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v49, v17, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v17
+; GFX10-NEXT:    v_bfe_u32 v50, v0, 16, 1
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v50, v19, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v0
+; GFX10-NEXT:    v_add3_u32 v49, v49, v17, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT:    v_add3_u32 v50, v50, v0, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v23, v36, s4
+; GFX10-NEXT:    v_bfe_u32 v36, v3, 16, 1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v49, v8, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_perm_b32 v4, v28, v7, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v7, v34, v10, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v22, v18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v37, v37, v38, s7
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v2
+; GFX10-NEXT:    v_add3_u32 v22, v22, v2, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v24, v24, v34, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v50, v48, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT:    v_perm_b32 v0, v0, v17, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v49, v21, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v3
+; GFX10-NEXT:    v_add3_u32 v36, v36, v3, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v52, s14
+; GFX10-NEXT:    v_perm_b32 v0, v0, v8, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v22, v38, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_perm_b32 v2, v2, v5, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v51, v27, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v5, v29, v8, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v8, v35, v11, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v3, v3, v6, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v6, v30, v9, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v9, v39, v12, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v51, s12
+; GFX10-NEXT:    v_perm_b32 v1, v1, v18, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v9, v9, v30, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v2, v2, v19, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v36, v34, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v10, v25, v10, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v11, v26, v11, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v12, v27, v12, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v13, v28, v13, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v3, v3, v4, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v4, v5, v20, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v5, v21, v6, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v6, v37, v7, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v7, v24, v23, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v14, v29, v14, 0x7060302
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v32
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v32
-; GFX10-NEXT:    v_add_f32_e32 v17, v31, v17
-; GFX10-NEXT:    v_add_f32_e32 v15, v15, v18
-; GFX10-NEXT:    v_bfe_u32 v10, v17, 16, 1
-; GFX10-NEXT:    v_bfe_u32 v11, v15, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v17
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_add_f32_e32 v17, v33, v8
+; GFX10-NEXT:    v_add_f32_e32 v15, v15, v16
+; GFX10-NEXT:    v_perm_b32 v8, v32, v31, 0x7060302
+; GFX10-NEXT:    v_bfe_u32 v16, v17, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v18, v15, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v15
-; GFX10-NEXT:    v_add3_u32 v18, v10, v17, 0x7fff
-; GFX10-NEXT:    v_add3_u32 v11, v11, v15, 0x7fff
-; GFX10-NEXT:    v_perm_b32 v10, v37, v13, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v13, v36, v25, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v18, v12, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v15
+; GFX10-NEXT:    v_add3_u32 v16, v16, v17, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v18, v18, v15, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v19, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX10-NEXT:    v_perm_b32 v12, v33, v48, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v11, v19, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v11, v24, v14, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v14, v23, v16, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v15, v18, v20, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v15, v15, v16, 0x7060302
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_fadd_v32bf16:
@@ -16243,38 +16243,38 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v22, 16, v0
 ; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.l, v3.h
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v10, v19, v21 :: v_dual_lshlrev_b32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v10, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v19, 0x400000, v2
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_dual_mul_f32 v9, v22, v21 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v16, v19, vcc_lo
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v16, v1, 16, 1
+; GFX11TRUE16-NEXT:    v_mul_f32_e32 v9, v22, v21
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11TRUE16-NEXT:    v_mul_f32_e32 v0, v0, v8
+; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v8, v9, 16, 1
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v16, v22, vcc_lo
 ; GFX11TRUE16-NEXT:    v_mul_f32_e32 v17, v24, v23
+; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v24, 0x400000, v9
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v17, 16, 1
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v21, 0x400000, v17
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_add3_u32 v19, v23, v17, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v0, 16, 1
+; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v8.l, v8.h
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v9, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_add3_u32 v16, v23, v0, 0x7fff
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -17587,280 +17587,280 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-LABEL: v_fmul_v32bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32
-; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT:    v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
+; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v32, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX10-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v13
 ; GFX10-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT:    v_mul_f32_e32 v12, v12, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v28, 16, v22
-; GFX10-NEXT:    v_mul_f32_e32 v39, v48, v39
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v6
-; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    v_mul_f32_e32 v11, v11, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v27, 16, v21
-; GFX10-NEXT:    v_mul_f32_e32 v49, v50, v49
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v5
-; GFX10-NEXT:    v_mul_f32_e32 v33, v34, v33
-; GFX10-NEXT:    v_mul_f32_e32 v14, v14, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v30, 16, v24
-; GFX10-NEXT:    v_mul_f32_e32 v35, v36, v35
+; GFX10-NEXT:    v_mul_f32_e32 v31, v32, v31
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v12
+; GFX10-NEXT:    v_mul_f32_e32 v30, v14, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v14, 16, v29
+; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_bfe_u32 v32, v31, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v31
+; GFX10-NEXT:    v_bfe_u32 v35, v30, 16, 1
+; GFX10-NEXT:    v_mul_f32_e32 v33, v33, v14
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX10-NEXT:    v_add3_u32 v32, v32, v31, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX10-NEXT:    v_add3_u32 v31, v35, v30, 0x7fff
+; GFX10-NEXT:    v_mul_f32_e32 v35, v13, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v13, 16, v28
+; GFX10-NEXT:    v_cndmask_b32_e32 v14, v32, v34, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v30
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v21
+; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v5
+; GFX10-NEXT:    v_add3_u32 v30, v34, v33, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v29, v31, v32, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_mul_f32_e32 v34, v36, v13
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_mul_f32_e32 v33, v12, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v12, 16, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v11
+; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_mul_f32_e32 v35, v36, v12
+; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v10
+; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX10-NEXT:    v_cndmask_b32_e32 v28, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_mul_f32_e32 v34, v11, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v11, 16, v26
+; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_mul_f32_e32 v33, v36, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v9
+; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v27, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_mul_f32_e32 v35, v10, v26
+; GFX10-NEXT:    v_lshlrev_b32_e32 v10, 16, v25
+; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_mul_f32_e32 v34, v36, v10
+; GFX10-NEXT:    v_mul_f32_e32 v9, v9, v25
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v8
-; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT:    v_mul_f32_e32 v13, v13, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v29, 16, v23
-; GFX10-NEXT:    v_mul_f32_e32 v37, v38, v37
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v26, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v24
+; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_lshlrev_b32_e32 v52, 16, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_mul_f32_e32 v33, v36, v33
+; GFX10-NEXT:    v_mul_f32_e32 v8, v8, v24
+; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v23
+; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v25, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v9, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    v_mul_f32_e32 v6, v6, v22
-; GFX10-NEXT:    v_lshlrev_b32_e32 v22, 16, v16
-; GFX10-NEXT:    v_mul_f32_e32 v27, v50, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v9
-; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT:    v_mul_f32_e32 v8, v8, v24
-; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v18
-; GFX10-NEXT:    v_mul_f32_e32 v29, v38, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v2
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_mul_f32_e32 v24, v35, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v30, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v32, v9, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v9
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX10-NEXT:    v_mul_f32_e32 v7, v7, v23
-; GFX10-NEXT:    v_lshlrev_b32_e32 v23, 16, v17
-; GFX10-NEXT:    v_mul_f32_e32 v28, v48, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v1
-; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_bfe_u32 v23, v24, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v36, 0x400000, v24
+; GFX10-NEXT:    v_cmp_u_f32_e64 s4, v24, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v34, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v34, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v8
+; GFX10-NEXT:    v_bfe_u32 v35, v7, 16, 1
+; GFX10-NEXT:    v_add3_u32 v23, v23, v24, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e64 s5, v7, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v31, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v32, v34, v8, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v22
+; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
+; GFX10-NEXT:    v_add3_u32 v24, v35, v7, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_mul_f32_e32 v8, v34, v8
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v16
-; GFX10-NEXT:    v_bfe_u32 v16, v33, 16, 1
-; GFX10-NEXT:    v_mul_f32_e32 v10, v10, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v26, 16, v20
-; GFX10-NEXT:    v_mul_f32_e32 v34, v34, v51
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v4
+; GFX10-NEXT:    v_mul_f32_e32 v6, v6, v22
+; GFX10-NEXT:    v_cndmask_b32_e32 v32, v32, v33, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s6, v8, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s7, v6, v6
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v15
+; GFX10-NEXT:    v_add3_u32 v7, v35, v8, 0x7fff
+; GFX10-NEXT:    v_mul_f32_e32 v35, v38, v37
+; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v21
+; GFX10-NEXT:    v_bfe_u32 v37, v6, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v6
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v22, s6
+; GFX10-NEXT:    v_bfe_u32 v21, v35, 16, 1
+; GFX10-NEXT:    v_mul_f32_e32 v5, v5, v8
+; GFX10-NEXT:    v_add3_u32 v37, v37, v6, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v20
 ; GFX10-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT:    v_add3_u32 v6, v21, v35, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v21, 16, v4
+; GFX10-NEXT:    v_bfe_u32 v48, v5, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT:    v_mul_f32_e32 v9, v9, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v25, 16, v19
-; GFX10-NEXT:    v_mul_f32_e32 v30, v36, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v3
+; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v35
+; GFX10-NEXT:    v_cmp_u_f32_e64 s8, v35, v35
+; GFX10-NEXT:    v_mul_f32_e32 v8, v21, v8
+; GFX10-NEXT:    v_add3_u32 v21, v48, v5, 0x7fff
+; GFX10-NEXT:    v_mul_f32_e32 v4, v4, v20
+; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v19
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v5
+; GFX10-NEXT:    v_bfe_u32 v20, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s9, v5, v5
+; GFX10-NEXT:    v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT:    v_mul_f32_e32 v48, v49, v48
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v18
+; GFX10-NEXT:    v_add3_u32 v20, v20, v8, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s10, v8, v8
+; GFX10-NEXT:    v_add3_u32 v5, v5, v4, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT:    v_cmp_u_f32_e64 s11, v4, v4
+; GFX10-NEXT:    v_bfe_u32 v4, v48, 16, 1
+; GFX10-NEXT:    v_mul_f32_e32 v49, v51, v49
+; GFX10-NEXT:    v_or_b32_e32 v51, 0x400000, v48
+; GFX10-NEXT:    v_cmp_u_f32_e64 s12, v48, v48
 ; GFX10-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_add3_u32 v4, v4, v48, 0x7fff
+; GFX10-NEXT:    v_bfe_u32 v48, v49, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s13, v49, v49
+; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
+; GFX10-NEXT:    v_mul_f32_e32 v3, v3, v19
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v39, s8
+; GFX10-NEXT:    v_add3_u32 v19, v48, v49, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v49
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v17
 ; GFX10-NEXT:    v_mul_f32_e32 v2, v2, v18
-; GFX10-NEXT:    v_mul_f32_e32 v18, v48, v23
+; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v21, v35, s9
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v20, v50, s10
+; GFX10-NEXT:    v_mul_f32_e32 v49, v52, v49
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v8, s11
 ; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v17
-; GFX10-NEXT:    v_mul_f32_e32 v17, v50, v22
-; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v33
-; GFX10-NEXT:    v_bfe_u32 v23, v14, 16, 1
-; GFX10-NEXT:    v_add3_u32 v16, v16, v33, 0x7fff
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX10-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
-; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT:    v_mul_f32_e32 v4, v4, v20
-; GFX10-NEXT:    v_mul_f32_e32 v20, v36, v25
-; GFX10-NEXT:    v_mul_f32_e32 v3, v3, v19
-; GFX10-NEXT:    v_mul_f32_e32 v19, v38, v24
-; GFX10-NEXT:    v_or_b32_e32 v24, 0x400000, v14
-; GFX10-NEXT:    v_bfe_u32 v25, v35, 16, 1
-; GFX10-NEXT:    v_add3_u32 v23, v23, v14, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v22, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX10-NEXT:    v_mul_f32_e32 v5, v5, v21
-; GFX10-NEXT:    v_mul_f32_e32 v21, v51, v26
-; GFX10-NEXT:    v_or_b32_e32 v26, 0x400000, v35
-; GFX10-NEXT:    v_bfe_u32 v36, v13, 16, 1
-; GFX10-NEXT:    v_add3_u32 v25, v25, v35, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v23, v23, v24, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v13
-; GFX10-NEXT:    v_bfe_u32 v48, v37, 16, 1
-; GFX10-NEXT:    v_add3_u32 v36, v36, v13, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v37
-; GFX10-NEXT:    v_cndmask_b32_e32 v25, v25, v26, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX10-NEXT:    v_bfe_u32 v51, v12, 16, 1
-; GFX10-NEXT:    v_add3_u32 v48, v48, v37, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v12
-; GFX10-NEXT:    v_bfe_u32 v22, v39, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v36, v36, v38, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX10-NEXT:    v_add3_u32 v51, v51, v12, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v14, 0x400000, v39
-; GFX10-NEXT:    v_bfe_u32 v24, v11, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v39, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v48, v48, v50, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v11
-; GFX10-NEXT:    v_bfe_u32 v26, v49, 16, 1
-; GFX10-NEXT:    v_add3_u32 v24, v24, v11, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v13, 0x400000, v49
-; GFX10-NEXT:    v_cndmask_b32_e32 v33, v51, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX10-NEXT:    v_bfe_u32 v38, v10, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v49, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v37, 0x400000, v10
-; GFX10-NEXT:    v_bfe_u32 v50, v34, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, v22, v14, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX10-NEXT:    v_add3_u32 v38, v38, v10, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v34
-; GFX10-NEXT:    v_bfe_u32 v51, v9, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v34, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v24, v24, v35, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v9
-; GFX10-NEXT:    v_bfe_u32 v22, v30, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v9, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v11, 0x400000, v30
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v26, v13, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v30, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v49, 0x400000, v8
-; GFX10-NEXT:    v_bfe_u32 v26, v29, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v37, v38, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX10-NEXT:    v_add3_u32 v35, v35, v8, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v10, 0x400000, v29
-; GFX10-NEXT:    v_bfe_u32 v38, v7, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v29, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v50, v12, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
-; GFX10-NEXT:    v_bfe_u32 v50, v28, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v7, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v28
-; GFX10-NEXT:    v_cndmask_b32_e32 v39, v51, v39, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX10-NEXT:    v_bfe_u32 v51, v6, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v28, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v30, 0x400000, v6
-; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v22, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT:    v_bfe_u32 v22, v27, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v6, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v27
-; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v35, v35, v49, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX10-NEXT:    v_bfe_u32 v49, v5, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v27, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v29, 0x400000, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v26, v10, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT:    v_bfe_u32 v26, v21, 16, 1
-; GFX10-NEXT:    v_add3_u32 v49, v49, v5, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v7, 0x400000, v21
-; GFX10-NEXT:    v_cndmask_b32_e32 v34, v38, v34, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX10-NEXT:    v_bfe_u32 v38, v4, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v28, 0x400000, v4
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v50, v9, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT:    v_bfe_u32 v50, v20, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v4, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v20
-; GFX10-NEXT:    v_cndmask_b32_e32 v30, v51, v30, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX10-NEXT:    v_add3_u32 v50, v50, v20, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v51, v3, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v27, 0x400000, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v22, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT:    v_bfe_u32 v22, v19, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v19
-; GFX10-NEXT:    v_add3_u32 v51, v51, v3, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v29, v49, v29, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX10-NEXT:    v_add3_u32 v22, v22, v19, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v49, v2, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v21, 0x400000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v26, v7, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT:    v_bfe_u32 v26, v18, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v4, 0x400000, v18
-; GFX10-NEXT:    v_add3_u32 v49, v49, v2, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v28, v38, v28, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX10-NEXT:    v_bfe_u32 v38, v1, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v18, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v50, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX10-NEXT:    v_bfe_u32 v50, v17, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v1, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v22, v5, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX10-NEXT:    v_bfe_u32 v22, v0, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v17, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v18, 0x400000, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v26, v4, vcc_lo
+; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_bfe_u32 v18, v49, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v52, 0x400000, v49
+; GFX10-NEXT:    v_cmp_u_f32_e64 s14, v49, v49
+; GFX10-NEXT:    v_bfe_u32 v39, v1, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v1
+; GFX10-NEXT:    v_add3_u32 v18, v18, v49, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT:    v_add3_u32 v39, v39, v1, 0x7fff
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v38, v20, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v19, v48, s13
+; GFX10-NEXT:    v_mul_f32_e32 v17, v49, v17
+; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v16
+; GFX10-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v39, v35, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v22, v2, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v49, v17, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v17
+; GFX10-NEXT:    v_bfe_u32 v50, v0, 16, 1
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v50, v19, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v0
+; GFX10-NEXT:    v_add3_u32 v49, v49, v17, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT:    v_add3_u32 v50, v50, v0, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v23, v36, s4
+; GFX10-NEXT:    v_bfe_u32 v36, v3, 16, 1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v49, v8, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_perm_b32 v4, v28, v7, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v7, v34, v10, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v22, v18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v37, v37, v38, s7
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v2
+; GFX10-NEXT:    v_add3_u32 v22, v22, v2, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v24, v24, v34, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v50, v48, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT:    v_perm_b32 v0, v0, v17, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v49, v21, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v3
+; GFX10-NEXT:    v_add3_u32 v36, v36, v3, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v52, s14
+; GFX10-NEXT:    v_perm_b32 v0, v0, v8, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v22, v38, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_perm_b32 v2, v2, v5, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v51, v27, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v5, v29, v8, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v8, v35, v11, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v3, v3, v6, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v6, v30, v9, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v9, v39, v12, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v51, s12
+; GFX10-NEXT:    v_perm_b32 v1, v1, v18, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v9, v9, v30, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v2, v2, v19, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v36, v34, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v10, v25, v10, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v11, v26, v11, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v12, v27, v12, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v13, v28, v13, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v3, v3, v4, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v4, v5, v20, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v5, v21, v6, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v6, v37, v7, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v7, v24, v23, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v14, v29, v14, 0x7060302
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v32
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v32
-; GFX10-NEXT:    v_mul_f32_e32 v17, v31, v17
-; GFX10-NEXT:    v_mul_f32_e32 v15, v15, v18
-; GFX10-NEXT:    v_bfe_u32 v10, v17, 16, 1
-; GFX10-NEXT:    v_bfe_u32 v11, v15, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v17
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_mul_f32_e32 v17, v33, v8
+; GFX10-NEXT:    v_mul_f32_e32 v15, v15, v16
+; GFX10-NEXT:    v_perm_b32 v8, v32, v31, 0x7060302
+; GFX10-NEXT:    v_bfe_u32 v16, v17, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v18, v15, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v15
-; GFX10-NEXT:    v_add3_u32 v18, v10, v17, 0x7fff
-; GFX10-NEXT:    v_add3_u32 v11, v11, v15, 0x7fff
-; GFX10-NEXT:    v_perm_b32 v10, v37, v13, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v13, v36, v25, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v18, v12, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v15
+; GFX10-NEXT:    v_add3_u32 v16, v16, v17, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v18, v18, v15, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v19, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX10-NEXT:    v_perm_b32 v12, v33, v48, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v11, v19, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v11, v24, v14, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v14, v23, v16, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v15, v18, v20, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v15, v15, v16, 0x7060302
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_fmul_v32bf16:
@@ -20986,38 +20986,38 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v22, 16, v0
 ; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.l, v3.h
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v10, v19, v21 :: v_dual_lshlrev_b32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v10, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v19, 0x400000, v2
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_dual_min_f32 v9, v22, v21 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v16, v19, vcc_lo
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v16, v1, 16, 1
+; GFX11TRUE16-NEXT:    v_min_f32_e32 v9, v22, v21
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11TRUE16-NEXT:    v_min_f32_e32 v0, v0, v8
+; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v8, v9, 16, 1
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v16, v22, vcc_lo
 ; GFX11TRUE16-NEXT:    v_min_f32_e32 v17, v24, v23
+; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v24, 0x400000, v9
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v17, 16, 1
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v21, 0x400000, v17
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_add3_u32 v19, v23, v17, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v0, 16, 1
+; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v8.l, v8.h
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v9, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_add3_u32 v16, v23, v0, 0x7fff
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -22330,280 +22330,280 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-LABEL: v_minnum_v32bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32
-; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT:    v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
+; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v32, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX10-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v13
 ; GFX10-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT:    v_min_f32_e32 v12, v12, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v28, 16, v22
-; GFX10-NEXT:    v_min_f32_e32 v39, v48, v39
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v6
-; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    v_min_f32_e32 v11, v11, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v27, 16, v21
-; GFX10-NEXT:    v_min_f32_e32 v49, v50, v49
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v5
-; GFX10-NEXT:    v_min_f32_e32 v33, v34, v33
-; GFX10-NEXT:    v_min_f32_e32 v14, v14, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v30, 16, v24
-; GFX10-NEXT:    v_min_f32_e32 v35, v36, v35
+; GFX10-NEXT:    v_min_f32_e32 v31, v32, v31
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v12
+; GFX10-NEXT:    v_min_f32_e32 v30, v14, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v14, 16, v29
+; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_bfe_u32 v32, v31, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v31
+; GFX10-NEXT:    v_bfe_u32 v35, v30, 16, 1
+; GFX10-NEXT:    v_min_f32_e32 v33, v33, v14
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX10-NEXT:    v_add3_u32 v32, v32, v31, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX10-NEXT:    v_add3_u32 v31, v35, v30, 0x7fff
+; GFX10-NEXT:    v_min_f32_e32 v35, v13, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v13, 16, v28
+; GFX10-NEXT:    v_cndmask_b32_e32 v14, v32, v34, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v30
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v21
+; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v5
+; GFX10-NEXT:    v_add3_u32 v30, v34, v33, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v29, v31, v32, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_min_f32_e32 v34, v36, v13
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_min_f32_e32 v33, v12, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v12, 16, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v11
+; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_min_f32_e32 v35, v36, v12
+; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v10
+; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX10-NEXT:    v_cndmask_b32_e32 v28, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_min_f32_e32 v34, v11, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v11, 16, v26
+; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_min_f32_e32 v33, v36, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v9
+; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v27, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_min_f32_e32 v35, v10, v26
+; GFX10-NEXT:    v_lshlrev_b32_e32 v10, 16, v25
+; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_min_f32_e32 v34, v36, v10
+; GFX10-NEXT:    v_min_f32_e32 v9, v9, v25
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v8
-; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT:    v_min_f32_e32 v13, v13, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v29, 16, v23
-; GFX10-NEXT:    v_min_f32_e32 v37, v38, v37
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v26, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v24
+; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_lshlrev_b32_e32 v52, 16, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_min_f32_e32 v33, v36, v33
+; GFX10-NEXT:    v_min_f32_e32 v8, v8, v24
+; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v23
+; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v25, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v9, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    v_min_f32_e32 v6, v6, v22
-; GFX10-NEXT:    v_lshlrev_b32_e32 v22, 16, v16
-; GFX10-NEXT:    v_min_f32_e32 v27, v50, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v9
-; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT:    v_min_f32_e32 v8, v8, v24
-; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v18
-; GFX10-NEXT:    v_min_f32_e32 v29, v38, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v2
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_min_f32_e32 v24, v35, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v30, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v32, v9, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v9
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX10-NEXT:    v_min_f32_e32 v7, v7, v23
-; GFX10-NEXT:    v_lshlrev_b32_e32 v23, 16, v17
-; GFX10-NEXT:    v_min_f32_e32 v28, v48, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v1
-; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_bfe_u32 v23, v24, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v36, 0x400000, v24
+; GFX10-NEXT:    v_cmp_u_f32_e64 s4, v24, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v34, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v34, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v8
+; GFX10-NEXT:    v_bfe_u32 v35, v7, 16, 1
+; GFX10-NEXT:    v_add3_u32 v23, v23, v24, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e64 s5, v7, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v31, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v32, v34, v8, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v22
+; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
+; GFX10-NEXT:    v_add3_u32 v24, v35, v7, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_min_f32_e32 v8, v34, v8
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_min_f32_e32 v0, v0, v16
-; GFX10-NEXT:    v_bfe_u32 v16, v33, 16, 1
-; GFX10-NEXT:    v_min_f32_e32 v10, v10, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v26, 16, v20
-; GFX10-NEXT:    v_min_f32_e32 v34, v34, v51
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v4
+; GFX10-NEXT:    v_min_f32_e32 v6, v6, v22
+; GFX10-NEXT:    v_cndmask_b32_e32 v32, v32, v33, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s6, v8, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s7, v6, v6
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v15
+; GFX10-NEXT:    v_add3_u32 v7, v35, v8, 0x7fff
+; GFX10-NEXT:    v_min_f32_e32 v35, v38, v37
+; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v21
+; GFX10-NEXT:    v_bfe_u32 v37, v6, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v6
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v22, s6
+; GFX10-NEXT:    v_bfe_u32 v21, v35, 16, 1
+; GFX10-NEXT:    v_min_f32_e32 v5, v5, v8
+; GFX10-NEXT:    v_add3_u32 v37, v37, v6, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v20
 ; GFX10-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT:    v_add3_u32 v6, v21, v35, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v21, 16, v4
+; GFX10-NEXT:    v_bfe_u32 v48, v5, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT:    v_min_f32_e32 v9, v9, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v25, 16, v19
-; GFX10-NEXT:    v_min_f32_e32 v30, v36, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v3
+; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v35
+; GFX10-NEXT:    v_cmp_u_f32_e64 s8, v35, v35
+; GFX10-NEXT:    v_min_f32_e32 v8, v21, v8
+; GFX10-NEXT:    v_add3_u32 v21, v48, v5, 0x7fff
+; GFX10-NEXT:    v_min_f32_e32 v4, v4, v20
+; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v19
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v5
+; GFX10-NEXT:    v_bfe_u32 v20, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s9, v5, v5
+; GFX10-NEXT:    v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT:    v_min_f32_e32 v48, v49, v48
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v18
+; GFX10-NEXT:    v_add3_u32 v20, v20, v8, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s10, v8, v8
+; GFX10-NEXT:    v_add3_u32 v5, v5, v4, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT:    v_cmp_u_f32_e64 s11, v4, v4
+; GFX10-NEXT:    v_bfe_u32 v4, v48, 16, 1
+; GFX10-NEXT:    v_min_f32_e32 v49, v51, v49
+; GFX10-NEXT:    v_or_b32_e32 v51, 0x400000, v48
+; GFX10-NEXT:    v_cmp_u_f32_e64 s12, v48, v48
 ; GFX10-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_add3_u32 v4, v4, v48, 0x7fff
+; GFX10-NEXT:    v_bfe_u32 v48, v49, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s13, v49, v49
+; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
+; GFX10-NEXT:    v_min_f32_e32 v3, v3, v19
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v39, s8
+; GFX10-NEXT:    v_add3_u32 v19, v48, v49, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v49
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v17
 ; GFX10-NEXT:    v_min_f32_e32 v2, v2, v18
-; GFX10-NEXT:    v_min_f32_e32 v18, v48, v23
+; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v21, v35, s9
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v20, v50, s10
+; GFX10-NEXT:    v_min_f32_e32 v49, v52, v49
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v8, s11
 ; GFX10-NEXT:    v_min_f32_e32 v1, v1, v17
-; GFX10-NEXT:    v_min_f32_e32 v17, v50, v22
-; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v33
-; GFX10-NEXT:    v_bfe_u32 v23, v14, 16, 1
-; GFX10-NEXT:    v_add3_u32 v16, v16, v33, 0x7fff
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX10-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
-; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT:    v_min_f32_e32 v4, v4, v20
-; GFX10-NEXT:    v_min_f32_e32 v20, v36, v25
-; GFX10-NEXT:    v_min_f32_e32 v3, v3, v19
-; GFX10-NEXT:    v_min_f32_e32 v19, v38, v24
-; GFX10-NEXT:    v_or_b32_e32 v24, 0x400000, v14
-; GFX10-NEXT:    v_bfe_u32 v25, v35, 16, 1
-; GFX10-NEXT:    v_add3_u32 v23, v23, v14, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v22, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX10-NEXT:    v_min_f32_e32 v5, v5, v21
-; GFX10-NEXT:    v_min_f32_e32 v21, v51, v26
-; GFX10-NEXT:    v_or_b32_e32 v26, 0x400000, v35
-; GFX10-NEXT:    v_bfe_u32 v36, v13, 16, 1
-; GFX10-NEXT:    v_add3_u32 v25, v25, v35, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v23, v23, v24, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v13
-; GFX10-NEXT:    v_bfe_u32 v48, v37, 16, 1
-; GFX10-NEXT:    v_add3_u32 v36, v36, v13, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v37
-; GFX10-NEXT:    v_cndmask_b32_e32 v25, v25, v26, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX10-NEXT:    v_bfe_u32 v51, v12, 16, 1
-; GFX10-NEXT:    v_add3_u32 v48, v48, v37, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v12
-; GFX10-NEXT:    v_bfe_u32 v22, v39, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v36, v36, v38, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX10-NEXT:    v_add3_u32 v51, v51, v12, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v14, 0x400000, v39
-; GFX10-NEXT:    v_bfe_u32 v24, v11, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v39, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v48, v48, v50, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v11
-; GFX10-NEXT:    v_bfe_u32 v26, v49, 16, 1
-; GFX10-NEXT:    v_add3_u32 v24, v24, v11, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v13, 0x400000, v49
-; GFX10-NEXT:    v_cndmask_b32_e32 v33, v51, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX10-NEXT:    v_bfe_u32 v38, v10, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v49, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v37, 0x400000, v10
-; GFX10-NEXT:    v_bfe_u32 v50, v34, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, v22, v14, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX10-NEXT:    v_add3_u32 v38, v38, v10, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v34
-; GFX10-NEXT:    v_bfe_u32 v51, v9, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v34, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v24, v24, v35, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v9
-; GFX10-NEXT:    v_bfe_u32 v22, v30, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v9, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v11, 0x400000, v30
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v26, v13, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v30, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v49, 0x400000, v8
-; GFX10-NEXT:    v_bfe_u32 v26, v29, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v37, v38, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX10-NEXT:    v_add3_u32 v35, v35, v8, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v10, 0x400000, v29
-; GFX10-NEXT:    v_bfe_u32 v38, v7, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v29, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v50, v12, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
-; GFX10-NEXT:    v_bfe_u32 v50, v28, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v7, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v28
-; GFX10-NEXT:    v_cndmask_b32_e32 v39, v51, v39, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX10-NEXT:    v_bfe_u32 v51, v6, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v28, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v30, 0x400000, v6
-; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v22, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT:    v_bfe_u32 v22, v27, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v6, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v27
-; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v35, v35, v49, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX10-NEXT:    v_bfe_u32 v49, v5, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v27, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v29, 0x400000, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v26, v10, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT:    v_bfe_u32 v26, v21, 16, 1
-; GFX10-NEXT:    v_add3_u32 v49, v49, v5, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v7, 0x400000, v21
-; GFX10-NEXT:    v_cndmask_b32_e32 v34, v38, v34, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX10-NEXT:    v_bfe_u32 v38, v4, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v28, 0x400000, v4
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v50, v9, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT:    v_bfe_u32 v50, v20, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v4, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v20
-; GFX10-NEXT:    v_cndmask_b32_e32 v30, v51, v30, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX10-NEXT:    v_add3_u32 v50, v50, v20, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v51, v3, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v27, 0x400000, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v22, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT:    v_bfe_u32 v22, v19, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v19
-; GFX10-NEXT:    v_add3_u32 v51, v51, v3, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v29, v49, v29, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX10-NEXT:    v_add3_u32 v22, v22, v19, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v49, v2, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v21, 0x400000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v26, v7, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT:    v_bfe_u32 v26, v18, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v4, 0x400000, v18
-; GFX10-NEXT:    v_add3_u32 v49, v49, v2, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v28, v38, v28, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX10-NEXT:    v_bfe_u32 v38, v1, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v18, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v50, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX10-NEXT:    v_bfe_u32 v50, v17, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v1, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v22, v5, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX10-NEXT:    v_bfe_u32 v22, v0, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v17, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v18, 0x400000, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v26, v4, vcc_lo
+; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_bfe_u32 v18, v49, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v52, 0x400000, v49
+; GFX10-NEXT:    v_cmp_u_f32_e64 s14, v49, v49
+; GFX10-NEXT:    v_bfe_u32 v39, v1, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v1
+; GFX10-NEXT:    v_add3_u32 v18, v18, v49, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT:    v_add3_u32 v39, v39, v1, 0x7fff
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v38, v20, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v19, v48, s13
+; GFX10-NEXT:    v_min_f32_e32 v17, v49, v17
+; GFX10-NEXT:    v_min_f32_e32 v0, v0, v16
+; GFX10-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v39, v35, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v22, v2, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v49, v17, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v17
+; GFX10-NEXT:    v_bfe_u32 v50, v0, 16, 1
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v50, v19, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v0
+; GFX10-NEXT:    v_add3_u32 v49, v49, v17, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT:    v_add3_u32 v50, v50, v0, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v23, v36, s4
+; GFX10-NEXT:    v_bfe_u32 v36, v3, 16, 1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v49, v8, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_perm_b32 v4, v28, v7, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v7, v34, v10, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v22, v18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v37, v37, v38, s7
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v2
+; GFX10-NEXT:    v_add3_u32 v22, v22, v2, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v24, v24, v34, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v50, v48, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT:    v_perm_b32 v0, v0, v17, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v49, v21, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v3
+; GFX10-NEXT:    v_add3_u32 v36, v36, v3, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v52, s14
+; GFX10-NEXT:    v_perm_b32 v0, v0, v8, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v22, v38, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_perm_b32 v2, v2, v5, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v51, v27, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v5, v29, v8, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v8, v35, v11, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v3, v3, v6, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v6, v30, v9, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v9, v39, v12, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v51, s12
+; GFX10-NEXT:    v_perm_b32 v1, v1, v18, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v9, v9, v30, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v2, v2, v19, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v36, v34, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v10, v25, v10, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v11, v26, v11, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v12, v27, v12, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v13, v28, v13, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v3, v3, v4, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v4, v5, v20, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v5, v21, v6, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v6, v37, v7, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v7, v24, v23, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v14, v29, v14, 0x7060302
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v32
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v32
-; GFX10-NEXT:    v_min_f32_e32 v17, v31, v17
-; GFX10-NEXT:    v_min_f32_e32 v15, v15, v18
-; GFX10-NEXT:    v_bfe_u32 v10, v17, 16, 1
-; GFX10-NEXT:    v_bfe_u32 v11, v15, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v17
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_min_f32_e32 v17, v33, v8
+; GFX10-NEXT:    v_min_f32_e32 v15, v15, v16
+; GFX10-NEXT:    v_perm_b32 v8, v32, v31, 0x7060302
+; GFX10-NEXT:    v_bfe_u32 v16, v17, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v18, v15, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v15
-; GFX10-NEXT:    v_add3_u32 v18, v10, v17, 0x7fff
-; GFX10-NEXT:    v_add3_u32 v11, v11, v15, 0x7fff
-; GFX10-NEXT:    v_perm_b32 v10, v37, v13, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v13, v36, v25, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v18, v12, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v15
+; GFX10-NEXT:    v_add3_u32 v16, v16, v17, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v18, v18, v15, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v19, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX10-NEXT:    v_perm_b32 v12, v33, v48, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v11, v19, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v11, v24, v14, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v14, v23, v16, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v15, v18, v20, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v15, v15, v16, 0x7060302
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_minnum_v32bf16:
@@ -25238,38 +25238,38 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v22, 16, v0
 ; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.l, v3.h
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v10, v19, v21 :: v_dual_lshlrev_b32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v10, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v19, 0x400000, v2
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_dual_max_f32 v9, v22, v21 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v21, 16, v8
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v16, v19, vcc_lo
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v16, v1, 16, 1
+; GFX11TRUE16-NEXT:    v_max_f32_e32 v9, v22, v21
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v1
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11TRUE16-NEXT:    v_max_f32_e32 v0, v0, v8
+; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v8, v9, 16, 1
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT:    v_add3_u32 v16, v16, v1, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v16, v22, vcc_lo
 ; GFX11TRUE16-NEXT:    v_max_f32_e32 v17, v24, v23
+; GFX11TRUE16-NEXT:    v_add3_u32 v8, v8, v9, 0x7fff
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v24, 0x400000, v9
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v22, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v17, 16, 1
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v21, 0x400000, v17
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_add3_u32 v19, v23, v17, 0x7fff
 ; GFX11TRUE16-NEXT:    v_bfe_u32 v23, v0, 16, 1
+; GFX11TRUE16-NEXT:    v_bfi_b32 v2, 0xffff, v2, v10
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v8.l, v8.h
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v9, v19, v21, vcc_lo
 ; GFX11TRUE16-NEXT:    v_add3_u32 v16, v23, v0, 0x7fff
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -26582,280 +26582,280 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-LABEL: v_maxnum_v32bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32
-; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT:    v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
+; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v32, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX10-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v13
 ; GFX10-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT:    v_max_f32_e32 v12, v12, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v28, 16, v22
-; GFX10-NEXT:    v_max_f32_e32 v39, v48, v39
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v6
-; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    v_max_f32_e32 v11, v11, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v27, 16, v21
-; GFX10-NEXT:    v_max_f32_e32 v49, v50, v49
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v5
-; GFX10-NEXT:    v_max_f32_e32 v33, v34, v33
-; GFX10-NEXT:    v_max_f32_e32 v14, v14, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v30, 16, v24
-; GFX10-NEXT:    v_max_f32_e32 v35, v36, v35
+; GFX10-NEXT:    v_max_f32_e32 v31, v32, v31
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v12
+; GFX10-NEXT:    v_max_f32_e32 v30, v14, v30
+; GFX10-NEXT:    v_lshlrev_b32_e32 v14, 16, v29
+; GFX10-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT:    v_bfe_u32 v32, v31, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v31
+; GFX10-NEXT:    v_bfe_u32 v35, v30, 16, 1
+; GFX10-NEXT:    v_max_f32_e32 v33, v33, v14
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX10-NEXT:    v_add3_u32 v32, v32, v31, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX10-NEXT:    v_add3_u32 v31, v35, v30, 0x7fff
+; GFX10-NEXT:    v_max_f32_e32 v35, v13, v29
+; GFX10-NEXT:    v_lshlrev_b32_e32 v13, 16, v28
+; GFX10-NEXT:    v_cndmask_b32_e32 v14, v32, v34, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v30
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX10-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v37, 16, v21
+; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v5
+; GFX10-NEXT:    v_add3_u32 v30, v34, v33, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v29, v31, v32, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_max_f32_e32 v34, v36, v13
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_max_f32_e32 v33, v12, v28
+; GFX10-NEXT:    v_lshlrev_b32_e32 v12, 16, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v11
+; GFX10-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_max_f32_e32 v35, v36, v12
+; GFX10-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v10
+; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX10-NEXT:    v_cndmask_b32_e32 v28, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_max_f32_e32 v34, v11, v27
+; GFX10-NEXT:    v_lshlrev_b32_e32 v11, 16, v26
+; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_max_f32_e32 v33, v36, v11
+; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v9
+; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v27, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_max_f32_e32 v35, v10, v26
+; GFX10-NEXT:    v_lshlrev_b32_e32 v10, 16, v25
+; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v33, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_max_f32_e32 v34, v36, v10
+; GFX10-NEXT:    v_max_f32_e32 v9, v9, v25
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v8
-; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT:    v_max_f32_e32 v13, v13, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v29, 16, v23
-; GFX10-NEXT:    v_max_f32_e32 v37, v38, v37
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v26, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v32, v35, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v24
+; GFX10-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_lshlrev_b32_e32 v52, 16, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v35, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v35
+; GFX10-NEXT:    v_bfe_u32 v32, v34, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX10-NEXT:    v_max_f32_e32 v33, v36, v33
+; GFX10-NEXT:    v_max_f32_e32 v8, v8, v24
+; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v23
+; GFX10-NEXT:    v_lshlrev_b32_e32 v35, 16, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v25, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v30, v32, v34, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v31, 0x400000, v34
+; GFX10-NEXT:    v_bfe_u32 v32, v9, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX10-NEXT:    v_bfe_u32 v34, v33, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    v_max_f32_e32 v6, v6, v22
-; GFX10-NEXT:    v_lshlrev_b32_e32 v22, 16, v16
-; GFX10-NEXT:    v_max_f32_e32 v27, v50, v27
-; GFX10-NEXT:    v_lshlrev_b32_e32 v50, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v9
-; GFX10-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT:    v_max_f32_e32 v8, v8, v24
-; GFX10-NEXT:    v_lshlrev_b32_e32 v24, 16, v18
-; GFX10-NEXT:    v_max_f32_e32 v29, v38, v29
-; GFX10-NEXT:    v_lshlrev_b32_e32 v38, 16, v2
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_max_f32_e32 v24, v35, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v30, v30, v31, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v32, v9, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v9
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
 ; GFX10-NEXT:    v_max_f32_e32 v7, v7, v23
-; GFX10-NEXT:    v_lshlrev_b32_e32 v23, 16, v17
-; GFX10-NEXT:    v_max_f32_e32 v28, v48, v28
-; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v1
-; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_bfe_u32 v23, v24, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v36, 0x400000, v24
+; GFX10-NEXT:    v_cmp_u_f32_e64 s4, v24, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v31, v34, v33, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v32, 0x400000, v33
+; GFX10-NEXT:    v_bfe_u32 v34, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v8
+; GFX10-NEXT:    v_bfe_u32 v35, v7, 16, 1
+; GFX10-NEXT:    v_add3_u32 v23, v23, v24, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e64 s5, v7, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v31, v31, v32, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v32, v34, v8, 0x7fff
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v22
+; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v6
+; GFX10-NEXT:    v_add3_u32 v24, v35, v7, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT:    v_max_f32_e32 v8, v34, v8
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_max_f32_e32 v0, v0, v16
-; GFX10-NEXT:    v_bfe_u32 v16, v33, 16, 1
-; GFX10-NEXT:    v_max_f32_e32 v10, v10, v26
-; GFX10-NEXT:    v_lshlrev_b32_e32 v26, 16, v20
-; GFX10-NEXT:    v_max_f32_e32 v34, v34, v51
-; GFX10-NEXT:    v_lshlrev_b32_e32 v51, 16, v4
+; GFX10-NEXT:    v_max_f32_e32 v6, v6, v22
+; GFX10-NEXT:    v_cndmask_b32_e32 v32, v32, v33, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s6, v8, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s7, v6, v6
+; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v15
+; GFX10-NEXT:    v_add3_u32 v7, v35, v8, 0x7fff
+; GFX10-NEXT:    v_max_f32_e32 v35, v38, v37
+; GFX10-NEXT:    v_and_b32_e32 v8, 0xffff0000, v21
+; GFX10-NEXT:    v_bfe_u32 v37, v6, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v6
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v22, s6
+; GFX10-NEXT:    v_bfe_u32 v21, v35, 16, 1
+; GFX10-NEXT:    v_max_f32_e32 v5, v5, v8
+; GFX10-NEXT:    v_add3_u32 v37, v37, v6, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v20
 ; GFX10-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT:    v_add3_u32 v6, v21, v35, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v21, 16, v4
+; GFX10-NEXT:    v_bfe_u32 v48, v5, 16, 1
 ; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT:    v_max_f32_e32 v9, v9, v25
-; GFX10-NEXT:    v_lshlrev_b32_e32 v25, 16, v19
-; GFX10-NEXT:    v_max_f32_e32 v30, v36, v30
-; GFX10-NEXT:    v_lshlrev_b32_e32 v36, 16, v3
+; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v35
+; GFX10-NEXT:    v_cmp_u_f32_e64 s8, v35, v35
+; GFX10-NEXT:    v_max_f32_e32 v8, v21, v8
+; GFX10-NEXT:    v_add3_u32 v21, v48, v5, 0x7fff
+; GFX10-NEXT:    v_max_f32_e32 v4, v4, v20
+; GFX10-NEXT:    v_lshlrev_b32_e32 v48, 16, v19
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v5
+; GFX10-NEXT:    v_bfe_u32 v20, v8, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s9, v5, v5
+; GFX10-NEXT:    v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT:    v_max_f32_e32 v48, v49, v48
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v18
+; GFX10-NEXT:    v_add3_u32 v20, v20, v8, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v8
+; GFX10-NEXT:    v_cmp_u_f32_e64 s10, v8, v8
+; GFX10-NEXT:    v_add3_u32 v5, v5, v4, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT:    v_cmp_u_f32_e64 s11, v4, v4
+; GFX10-NEXT:    v_bfe_u32 v4, v48, 16, 1
+; GFX10-NEXT:    v_max_f32_e32 v49, v51, v49
+; GFX10-NEXT:    v_or_b32_e32 v51, 0x400000, v48
+; GFX10-NEXT:    v_cmp_u_f32_e64 s12, v48, v48
 ; GFX10-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT:    v_add3_u32 v4, v4, v48, 0x7fff
+; GFX10-NEXT:    v_bfe_u32 v48, v49, 16, 1
+; GFX10-NEXT:    v_cmp_u_f32_e64 s13, v49, v49
+; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
+; GFX10-NEXT:    v_max_f32_e32 v3, v3, v19
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v39, s8
+; GFX10-NEXT:    v_add3_u32 v19, v48, v49, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v49
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v17
 ; GFX10-NEXT:    v_max_f32_e32 v2, v2, v18
-; GFX10-NEXT:    v_max_f32_e32 v18, v48, v23
+; GFX10-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v21, v35, s9
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v20, v50, s10
+; GFX10-NEXT:    v_max_f32_e32 v49, v52, v49
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v8, s11
 ; GFX10-NEXT:    v_max_f32_e32 v1, v1, v17
-; GFX10-NEXT:    v_max_f32_e32 v17, v50, v22
-; GFX10-NEXT:    v_or_b32_e32 v22, 0x400000, v33
-; GFX10-NEXT:    v_bfe_u32 v23, v14, 16, 1
-; GFX10-NEXT:    v_add3_u32 v16, v16, v33, 0x7fff
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX10-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
-; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT:    v_max_f32_e32 v4, v4, v20
-; GFX10-NEXT:    v_max_f32_e32 v20, v36, v25
-; GFX10-NEXT:    v_max_f32_e32 v3, v3, v19
-; GFX10-NEXT:    v_max_f32_e32 v19, v38, v24
-; GFX10-NEXT:    v_or_b32_e32 v24, 0x400000, v14
-; GFX10-NEXT:    v_bfe_u32 v25, v35, 16, 1
-; GFX10-NEXT:    v_add3_u32 v23, v23, v14, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v22, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX10-NEXT:    v_max_f32_e32 v5, v5, v21
-; GFX10-NEXT:    v_max_f32_e32 v21, v51, v26
-; GFX10-NEXT:    v_or_b32_e32 v26, 0x400000, v35
-; GFX10-NEXT:    v_bfe_u32 v36, v13, 16, 1
-; GFX10-NEXT:    v_add3_u32 v25, v25, v35, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v23, v23, v24, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v13
-; GFX10-NEXT:    v_bfe_u32 v48, v37, 16, 1
-; GFX10-NEXT:    v_add3_u32 v36, v36, v13, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v50, 0x400000, v37
-; GFX10-NEXT:    v_cndmask_b32_e32 v25, v25, v26, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX10-NEXT:    v_bfe_u32 v51, v12, 16, 1
-; GFX10-NEXT:    v_add3_u32 v48, v48, v37, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v33, 0x400000, v12
-; GFX10-NEXT:    v_bfe_u32 v22, v39, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v36, v36, v38, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX10-NEXT:    v_add3_u32 v51, v51, v12, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v14, 0x400000, v39
-; GFX10-NEXT:    v_bfe_u32 v24, v11, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v39, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v48, v48, v50, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v11
-; GFX10-NEXT:    v_bfe_u32 v26, v49, 16, 1
-; GFX10-NEXT:    v_add3_u32 v24, v24, v11, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v13, 0x400000, v49
-; GFX10-NEXT:    v_cndmask_b32_e32 v33, v51, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX10-NEXT:    v_bfe_u32 v38, v10, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v49, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v37, 0x400000, v10
-; GFX10-NEXT:    v_bfe_u32 v50, v34, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, v22, v14, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX10-NEXT:    v_add3_u32 v38, v38, v10, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v34
-; GFX10-NEXT:    v_bfe_u32 v51, v9, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v34, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v24, v24, v35, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX10-NEXT:    v_or_b32_e32 v39, 0x400000, v9
-; GFX10-NEXT:    v_bfe_u32 v22, v30, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v9, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v11, 0x400000, v30
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v26, v13, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX10-NEXT:    v_bfe_u32 v35, v8, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v30, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v49, 0x400000, v8
-; GFX10-NEXT:    v_bfe_u32 v26, v29, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v37, v38, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX10-NEXT:    v_add3_u32 v35, v35, v8, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v10, 0x400000, v29
-; GFX10-NEXT:    v_bfe_u32 v38, v7, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v29, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v50, v12, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v7
-; GFX10-NEXT:    v_bfe_u32 v50, v28, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v7, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v28
-; GFX10-NEXT:    v_cndmask_b32_e32 v39, v51, v39, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX10-NEXT:    v_bfe_u32 v51, v6, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v28, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v30, 0x400000, v6
-; GFX10-NEXT:    v_lshlrev_b32_e32 v31, 16, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v22, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT:    v_bfe_u32 v22, v27, 16, 1
-; GFX10-NEXT:    v_add3_u32 v51, v51, v6, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v27
-; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v35, v35, v49, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX10-NEXT:    v_bfe_u32 v49, v5, 16, 1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v27, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v29, 0x400000, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v26, v10, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT:    v_bfe_u32 v26, v21, 16, 1
-; GFX10-NEXT:    v_add3_u32 v49, v49, v5, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v7, 0x400000, v21
-; GFX10-NEXT:    v_cndmask_b32_e32 v34, v38, v34, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX10-NEXT:    v_bfe_u32 v38, v4, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v28, 0x400000, v4
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v50, v9, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT:    v_bfe_u32 v50, v20, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v4, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v20
-; GFX10-NEXT:    v_cndmask_b32_e32 v30, v51, v30, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX10-NEXT:    v_add3_u32 v50, v50, v20, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v51, v3, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v27, 0x400000, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v22, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT:    v_bfe_u32 v22, v19, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v19
-; GFX10-NEXT:    v_add3_u32 v51, v51, v3, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v29, v49, v29, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX10-NEXT:    v_add3_u32 v22, v22, v19, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v49, v2, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v21, 0x400000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v26, v7, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT:    v_bfe_u32 v26, v18, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v4, 0x400000, v18
-; GFX10-NEXT:    v_add3_u32 v49, v49, v2, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v28, v38, v28, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX10-NEXT:    v_bfe_u32 v38, v1, 16, 1
-; GFX10-NEXT:    v_add3_u32 v26, v26, v18, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v50, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX10-NEXT:    v_bfe_u32 v50, v17, 16, 1
-; GFX10-NEXT:    v_add3_u32 v38, v38, v1, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v22, v5, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX10-NEXT:    v_bfe_u32 v22, v0, 16, 1
-; GFX10-NEXT:    v_add3_u32 v50, v50, v17, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v18, 0x400000, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v26, v4, vcc_lo
+; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_bfe_u32 v18, v49, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v52, 0x400000, v49
+; GFX10-NEXT:    v_cmp_u_f32_e64 s14, v49, v49
+; GFX10-NEXT:    v_bfe_u32 v39, v1, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v35, 0x400000, v1
+; GFX10-NEXT:    v_add3_u32 v18, v18, v49, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v49, 16, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT:    v_add3_u32 v39, v39, v1, 0x7fff
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_add3_u32 v22, v22, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v38, v20, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v19, v48, s13
+; GFX10-NEXT:    v_max_f32_e32 v17, v49, v17
+; GFX10-NEXT:    v_max_f32_e32 v0, v0, v16
+; GFX10-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v39, v35, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v22, v2, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v49, v17, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v17
+; GFX10-NEXT:    v_bfe_u32 v50, v0, 16, 1
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v50, v19, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v48, 0x400000, v0
+; GFX10-NEXT:    v_add3_u32 v49, v49, v17, 0x7fff
+; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT:    v_add3_u32 v50, v50, v0, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v23, v36, s4
+; GFX10-NEXT:    v_bfe_u32 v36, v3, 16, 1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v49, v8, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_perm_b32 v4, v28, v7, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v7, v34, v10, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v22, v18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v37, v37, v38, s7
+; GFX10-NEXT:    v_or_b32_e32 v38, 0x400000, v2
+; GFX10-NEXT:    v_add3_u32 v22, v22, v2, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v24, v24, v34, s5
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v50, v48, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT:    v_perm_b32 v0, v0, v17, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v49, v21, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v34, 0x400000, v3
+; GFX10-NEXT:    v_add3_u32 v36, v36, v3, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v52, s14
+; GFX10-NEXT:    v_perm_b32 v0, v0, v8, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v22, v38, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_perm_b32 v2, v2, v5, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v51, v27, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v5, v29, v8, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v8, v35, v11, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v3, v3, v6, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v6, v30, v9, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v9, v39, v12, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v51, s12
+; GFX10-NEXT:    v_perm_b32 v1, v1, v18, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v9, v9, v30, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v2, v2, v19, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v36, v34, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v10, v25, v10, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v11, v26, v11, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v12, v27, v12, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v13, v28, v13, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v3, v3, v4, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v4, v5, v20, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v5, v21, v6, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v6, v37, v7, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v7, v24, v23, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v14, v29, v14, 0x7060302
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v32
-; GFX10-NEXT:    v_and_b32_e32 v18, 0xffff0000, v32
-; GFX10-NEXT:    v_max_f32_e32 v17, v31, v17
-; GFX10-NEXT:    v_max_f32_e32 v15, v15, v18
-; GFX10-NEXT:    v_bfe_u32 v10, v17, 16, 1
-; GFX10-NEXT:    v_bfe_u32 v11, v15, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v17
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v16
+; GFX10-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT:    v_max_f32_e32 v17, v33, v8
+; GFX10-NEXT:    v_max_f32_e32 v15, v15, v16
+; GFX10-NEXT:    v_perm_b32 v8, v32, v31, 0x7060302
+; GFX10-NEXT:    v_bfe_u32 v16, v17, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v18, v15, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v17
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX10-NEXT:    v_or_b32_e32 v19, 0x400000, v15
-; GFX10-NEXT:    v_add3_u32 v18, v10, v17, 0x7fff
-; GFX10-NEXT:    v_add3_u32 v11, v11, v15, 0x7fff
-; GFX10-NEXT:    v_perm_b32 v10, v37, v13, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v13, v36, v25, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v18, v12, vcc_lo
+; GFX10-NEXT:    v_or_b32_e32 v20, 0x400000, v15
+; GFX10-NEXT:    v_add3_u32 v16, v16, v17, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v18, v18, v15, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v19, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX10-NEXT:    v_perm_b32 v12, v33, v48, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v11, v19, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v11, v24, v14, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v14, v23, v16, 0x7060302
-; GFX10-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
+; GFX10-NEXT:    v_cndmask_b32_e32 v15, v18, v20, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v15, v15, v16, 0x7060302
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_maxnum_v32bf16:
@@ -41304,136 +41304,136 @@ define <16 x bfloat> @v_vselect_v16bf16(<16 x i1> %cond, <16 x bfloat> %a, <16 x
 ; GFX7-LABEL: v_vselect_v16bf16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    v_and_b32_e32 v8, 1, v8
-; GFX7-NEXT:    v_and_b32_e32 v7, 1, v7
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[16:17], 1, v8
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[14:15], 1, v7
-; GFX7-NEXT:    buffer_load_dword v7, off, s[0:3], s32
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:64
-; GFX7-NEXT:    v_and_b32_e32 v15, 1, v15
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[12:13], 1, v15
-; GFX7-NEXT:    v_and_b32_e32 v14, 1, v14
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[10:11], 1, v14
-; GFX7-NEXT:    v_and_b32_e32 v13, 1, v13
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[8:9], 1, v13
-; GFX7-NEXT:    v_and_b32_e32 v12, 1, v12
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[6:7], 1, v12
-; GFX7-NEXT:    v_and_b32_e32 v11, 1, v11
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v11
-; GFX7-NEXT:    v_and_b32_e32 v10, 1, v10
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v10
-; GFX7-NEXT:    v_and_b32_e32 v6, 1, v6
-; GFX7-NEXT:    v_and_b32_e32 v5, 1, v5
-; GFX7-NEXT:    v_and_b32_e32 v9, 1, v9
-; GFX7-NEXT:    v_cmp_eq_u32_e64 s[18:19], 1, v9
-; GFX7-NEXT:    v_and_b32_e32 v4, 1, v4
-; GFX7-NEXT:    v_mul_f32_e32 v20, 1.0, v20
-; GFX7-NEXT:    v_and_b32_e32 v3, 1, v3
-; GFX7-NEXT:    v_mul_f32_e32 v19, 1.0, v19
-; GFX7-NEXT:    v_and_b32_e32 v2, 1, v2
-; GFX7-NEXT:    v_mul_f32_e32 v18, 1.0, v18
-; GFX7-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GFX7-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX7-NEXT:    v_mul_f32_e32 v17, 1.0, v17
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v1
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v2
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[6:7], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v3
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[8:9], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v4
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[10:11], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v5
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[12:13], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v6
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[14:15], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v7
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[16:17], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v8
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[18:19], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v9
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[20:21], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v10
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[22:23], 1, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 1, v11
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[24:25], 1, v0
+; GFX7-NEXT:    buffer_load_dword v0, off, s[0:3], s32
+; GFX7-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:64
+; GFX7-NEXT:    v_and_b32_e32 v2, 1, v12
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[26:27], 1, v2
+; GFX7-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:60
+; GFX7-NEXT:    v_and_b32_e32 v3, 1, v13
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[28:29], 1, v3
+; GFX7-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:56
+; GFX7-NEXT:    v_and_b32_e32 v4, 1, v14
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[40:41], 1, v4
+; GFX7-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:52
+; GFX7-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:48
+; GFX7-NEXT:    v_and_b32_e32 v4, 1, v15
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[42:43], 1, v4
 ; GFX7-NEXT:    v_mul_f32_e32 v16, 1.0, v16
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v7
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v15, v8, v7, s[12:13]
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:60
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v30
-; GFX7-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v14, v8, v7, s[10:11]
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:56
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v29
-; GFX7-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v13, v8, v7, s[8:9]
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:52
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v28
-; GFX7-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v12, v8, v7, s[6:7]
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:48
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v27
-; GFX7-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v11, v8, v7, s[4:5]
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:44
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v26
+; GFX7-NEXT:    v_mul_f32_e32 v17, 1.0, v17
+; GFX7-NEXT:    v_mul_f32_e32 v18, 1.0, v18
+; GFX7-NEXT:    v_mul_f32_e32 v19, 1.0, v19
+; GFX7-NEXT:    v_mul_f32_e32 v20, 1.0, v20
+; GFX7-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:40
+; GFX7-NEXT:    s_waitcnt vmcnt(6)
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    s_waitcnt vmcnt(5)
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v15, v1, v0, s[42:43]
+; GFX7-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:44
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v30
+; GFX7-NEXT:    s_waitcnt vmcnt(5)
+; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT:    v_cndmask_b32_e64 v14, v2, v1, s[40:41]
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v29
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT:    v_cndmask_b32_e64 v13, v2, v1, s[28:29]
+; GFX7-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:36
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v28
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v5
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v2, v1, s[26:27]
+; GFX7-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:32
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v27
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v6
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v5, v1, s[24:25]
+; GFX7-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:28
+; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v26
 ; GFX7-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e32 v10, v8, v7, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v6
-; GFX7-NEXT:    v_mul_f32_e32 v6, 1.0, v22
-; GFX7-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:28
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:40
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v25
-; GFX7-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v22, 1.0, v22
-; GFX7-NEXT:    v_cndmask_b32_e32 v6, v22, v6, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v5
+; GFX7-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v4, 1.0, v4
+; GFX7-NEXT:    s_waitcnt vmcnt(3)
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v10, v0, v5, s[22:23]
+; GFX7-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:24
+; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v25
+; GFX7-NEXT:    v_cndmask_b32_e64 v9, v4, v5, s[20:21]
+; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v24
+; GFX7-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:4
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT:    v_cndmask_b32_e64 v8, v3, v5, s[18:19]
+; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v23
+; GFX7-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT:    v_cndmask_b32_e64 v7, v2, v5, s[16:17]
+; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v22
+; GFX7-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:12
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, v1, v5, s[14:15]
 ; GFX7-NEXT:    v_mul_f32_e32 v5, 1.0, v21
-; GFX7-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:24
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v9, v8, v7, s[18:19]
-; GFX7-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:36
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v24
+; GFX7-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:16
 ; GFX7-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX7-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v21, 1.0, v21
-; GFX7-NEXT:    v_cndmask_b32_e32 v5, v21, v5, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v4
-; GFX7-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v8, 1.0, v8
-; GFX7-NEXT:    v_cndmask_b32_e64 v8, v8, v7, s[16:17]
-; GFX7-NEXT:    v_mul_f32_e32 v7, 1.0, v23
-; GFX7-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:32
+; GFX7-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v5, v0, v5, s[12:13]
+; GFX7-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:20
 ; GFX7-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX7-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NEXT:    s_waitcnt vmcnt(4)
 ; GFX7-NEXT:    v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v20, vcc
-; GFX7-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:16
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v3
-; GFX7-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
-; GFX7-NEXT:    s_waitcnt vmcnt(2)
-; GFX7-NEXT:    v_mul_f32_e32 v23, 1.0, v23
-; GFX7-NEXT:    v_cndmask_b32_e64 v7, v23, v7, s[14:15]
-; GFX7-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX7-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v20, 1.0, v20
-; GFX7-NEXT:    v_cndmask_b32_e32 v19, v20, v19, vcc
-; GFX7-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:4
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8
-; GFX7-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-NEXT:    s_waitcnt vmcnt(3)
 ; GFX7-NEXT:    v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v18, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_mul_f32_e32 v18, 1.0, v20
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    s_waitcnt vmcnt(2)
 ; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v2, v17, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v18, v16, vcc
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v2, v18, s[6:7]
+; GFX7-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v19, v1, v19, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v3, v17, s[4:5]
 ; GFX7-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX7-NEXT:    v_and_b32_e32 v2, 0xffff0000, v3
 ; GFX7-NEXT:    v_and_b32_e32 v3, 0xffff0000, v19
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v20, v0, v20, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v4, v16, vcc
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v4, 0xffff0000, v20
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_vselect_v16bf16:
@@ -41486,19 +41486,19 @@ define <16 x bfloat> @v_vselect_v16bf16(<16 x i1> %cond, <16 x bfloat> %a, <16 x
 ; GFX8-NEXT:    v_cndmask_b32_e64 v7, v30, v22, s[26:27]
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
 ; GFX8-NEXT:    v_cndmask_b32_e64 v8, v29, v21, s[22:23]
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, v28, v20, s[18:19]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v28, v20, s[18:19]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v12, v27, v19, s[14:15]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v13, v26, v18, s[10:11]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v14, v25, v17, s[6:7]
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX8-NEXT:    v_or_b32_sdwa v6, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_sdwa v4, v9, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v4, v11, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_cndmask_b32_e64 v10, v0, v23, s[40:41]
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, v0, v23, s[40:41]
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; GFX8-NEXT:    v_cndmask_b32_e64 v11, v0, v1, s[42:43]
+; GFX8-NEXT:    v_cndmask_b32_e64 v10, v0, v1, s[42:43]
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 16, v19
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 16, v27
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v1, v0, s[16:17]
@@ -41515,153 +41515,153 @@ define <16 x bfloat> @v_vselect_v16bf16(<16 x i1> %cond, <16 x bfloat> %a, <16 x
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v11
+; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v10
 ; GFX8-NEXT:    v_or_b32_sdwa v0, v15, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v1, v14, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v2, v13, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_sdwa v7, v10, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v7, v9, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_vselect_v16bf16:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_and_b32_e32 v12, 1, v12
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
-; GFX9-NEXT:    v_and_b32_e32 v13, 1, v13
-; GFX9-NEXT:    v_cndmask_b32_e32 v12, v30, v22, vcc
-; GFX9-NEXT:    v_lshrrev_b32_e32 v22, 16, v22
-; GFX9-NEXT:    v_lshrrev_b32_e32 v30, 16, v30
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
-; GFX9-NEXT:    v_and_b32_e32 v10, 1, v10
-; GFX9-NEXT:    v_cndmask_b32_e32 v13, v30, v22, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v10
-; GFX9-NEXT:    v_and_b32_e32 v10, 1, v11
-; GFX9-NEXT:    v_cndmask_b32_e32 v11, v29, v21, vcc
-; GFX9-NEXT:    v_lshrrev_b32_e32 v21, 16, v21
-; GFX9-NEXT:    v_lshrrev_b32_e32 v22, 16, v29
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v10
-; GFX9-NEXT:    v_cndmask_b32_e32 v10, v22, v21, vcc
-; GFX9-NEXT:    buffer_load_dword v21, off, s[0:3], s32
-; GFX9-NEXT:    v_and_b32_e32 v8, 1, v8
-; GFX9-NEXT:    v_and_b32_e32 v9, 1, v9
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v8
-; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 16, v20
-; GFX9-NEXT:    v_cndmask_b32_e32 v20, v28, v20, vcc
-; GFX9-NEXT:    v_lshrrev_b32_e32 v22, 16, v28
 ; GFX9-NEXT:    v_and_b32_e32 v6, 1, v6
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
-; GFX9-NEXT:    v_and_b32_e32 v7, 1, v7
-; GFX9-NEXT:    v_cndmask_b32_e32 v8, v22, v8, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v6
-; GFX9-NEXT:    v_lshrrev_b32_e32 v9, 16, v19
-; GFX9-NEXT:    v_lshrrev_b32_e32 v22, 16, v27
+; GFX9-NEXT:    v_and_b32_e32 v6, 1, v8
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v6
+; GFX9-NEXT:    v_and_b32_e32 v6, 1, v10
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[6:7], 1, v6
+; GFX9-NEXT:    v_and_b32_e32 v6, 1, v12
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], 1, v6
+; GFX9-NEXT:    v_and_b32_e32 v8, 1, v13
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v30, v22, s[8:9]
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], 1, v8
+; GFX9-NEXT:    buffer_load_dword v8, off, s[0:3], s32
+; GFX9-NEXT:    v_lshrrev_b32_e32 v10, 16, v22
+; GFX9-NEXT:    v_lshrrev_b32_e32 v12, 16, v30
+; GFX9-NEXT:    v_and_b32_e32 v11, 1, v11
+; GFX9-NEXT:    v_and_b32_e32 v9, 1, v9
+; GFX9-NEXT:    v_and_b32_e32 v7, 1, v7
 ; GFX9-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT:    v_and_b32_e32 v13, 1, v14
+; GFX9-NEXT:    v_cndmask_b32_e64 v10, v12, v10, s[8:9]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v12, 16, v21
+; GFX9-NEXT:    v_cndmask_b32_e64 v14, v29, v21, s[6:7]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v21, 16, v29
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[6:7], 1, v11
+; GFX9-NEXT:    v_lshrrev_b32_e32 v11, 16, v20
+; GFX9-NEXT:    v_cndmask_b32_e64 v20, v28, v20, s[4:5]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v22, 16, v19
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v9
+; GFX9-NEXT:    v_lshrrev_b32_e32 v9, 16, v27
 ; GFX9-NEXT:    v_cndmask_b32_e32 v19, v27, v19, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX9-NEXT:    v_cndmask_b32_e64 v12, v21, v12, s[6:7]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v21, 16, v28
 ; GFX9-NEXT:    v_and_b32_e32 v5, 1, v5
-; GFX9-NEXT:    v_cndmask_b32_e32 v9, v22, v9, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v9, v9, v22, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v4
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v18
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v21, v11, s[4:5]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v21, 16, v18
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v27, 16, v26
-; GFX9-NEXT:    v_and_b32_e32 v14, 1, v14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v26, v18, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v5
 ; GFX9-NEXT:    v_and_b32_e32 v15, 1, v15
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v27, v6, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
-; GFX9-NEXT:    v_and_b32_e32 v2, 1, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v27, v21, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v7, 16, v23
+; GFX9-NEXT:    v_and_b32_e32 v2, 1, v2
 ; GFX9-NEXT:    v_and_b32_e32 v3, 1, v3
 ; GFX9-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GFX9-NEXT:    s_mov_b32 s4, 0x5040100
+; GFX9-NEXT:    v_perm_b32 v6, v10, v6, s4
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_cndmask_b32_e32 v14, v21, v23, vcc
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v21
+; GFX9-NEXT:    v_cndmask_b32_e32 v13, v8, v23, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
-; GFX9-NEXT:    v_cndmask_b32_e32 v7, v6, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v7, v8, v7, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v25, v17, vcc
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v17
+; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 16, v17
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v15, 16, v25
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v3
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v15, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v15, v8, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v24, v16, vcc
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v16
+; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 16, v16
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v15, 16, v24
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v15, v6, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v15, v8, vcc
 ; GFX9-NEXT:    v_perm_b32 v0, v1, v0, s4
 ; GFX9-NEXT:    v_perm_b32 v1, v3, v2, s4
 ; GFX9-NEXT:    v_perm_b32 v2, v5, v4, s4
 ; GFX9-NEXT:    v_perm_b32 v3, v9, v19, s4
-; GFX9-NEXT:    v_perm_b32 v4, v8, v20, s4
-; GFX9-NEXT:    v_perm_b32 v5, v10, v11, s4
-; GFX9-NEXT:    v_perm_b32 v6, v13, v12, s4
-; GFX9-NEXT:    v_perm_b32 v7, v7, v14, s4
+; GFX9-NEXT:    v_perm_b32 v4, v11, v20, s4
+; GFX9-NEXT:    v_perm_b32 v5, v12, v14, s4
+; GFX9-NEXT:    v_perm_b32 v7, v7, v13, s4
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_vselect_v16bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT:    v_and_b32_e32 v12, 1, v12
 ; GFX10-NEXT:    v_and_b32_e32 v13, 1, v13
 ; GFX10-NEXT:    v_and_b32_e32 v10, 1, v10
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v33, 16, v22
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v34, 16, v30
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
 ; GFX10-NEXT:    v_and_b32_e32 v11, 1, v11
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v13
 ; GFX10-NEXT:    v_and_b32_e32 v8, 1, v8
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v35, 16, v21
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v29
-; GFX10-NEXT:    v_cndmask_b32_e32 v22, v30, v22, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v13
 ; GFX10-NEXT:    v_and_b32_e32 v9, 1, v9
+; GFX10-NEXT:    v_cndmask_b32_e32 v33, v34, v33, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v10
 ; GFX10-NEXT:    v_and_b32_e32 v6, 1, v6
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v20
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v38, 16, v28
-; GFX10-NEXT:    v_cndmask_b32_e32 v33, v34, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v10
 ; GFX10-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v29, v21, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v11
 ; GFX10-NEXT:    v_and_b32_e32 v2, 1, v2
 ; GFX10-NEXT:    v_and_b32_e32 v3, 1, v3
 ; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v29, v21, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v11
+; GFX10-NEXT:    v_and_b32_e32 v12, 1, v12
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v36, v35, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v8
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v51, 16, v17
-; GFX10-NEXT:    v_lshrrev_b32_e32 v12, 16, v25
+; GFX10-NEXT:    v_lshrrev_b32_e32 v13, 16, v25
 ; GFX10-NEXT:    v_and_b32_e32 v1, 1, v1
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v12
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v28, v20, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v9
 ; GFX10-NEXT:    v_and_b32_e32 v5, 1, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v36, v35, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v30, 16, v16
-; GFX10-NEXT:    v_lshrrev_b32_e32 v13, 16, v24
+; GFX10-NEXT:    v_lshrrev_b32_e32 v12, 16, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v22, v30, v22, s4
+; GFX10-NEXT:    v_lshrrev_b32_e32 v30, 16, v24
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v38, v37, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v6
 ; GFX10-NEXT:    v_and_b32_e32 v7, 1, v7
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v49, 16, v18
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v28, v20, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v9
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v50, 16, v26
 ; GFX10-NEXT:    v_and_b32_e32 v14, 1, v14
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v27, v19, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v4
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v39, 16, v19
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v48, 16, v27
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v38, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v6
 ; GFX10-NEXT:    v_and_b32_e32 v15, 1, v15
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v32, 16, v23
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v27, v19, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v4
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v26, v18, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
 ; GFX10-NEXT:    v_cndmask_b32_e32 v2, v25, v17, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v12, v51, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v13, v51, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v24, v16, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v13, v30, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v30, v12, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v5
 ; GFX10-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
 ; GFX10-NEXT:    v_cndmask_b32_e32 v5, v50, v49, vcc_lo
@@ -42581,35 +42581,35 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
 ; GFX8-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:44
 ; GFX8-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:112
 ; GFX8-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:48
-; GFX8-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:116
-; GFX8-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:52
-; GFX8-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:120
-; GFX8-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:56
-; GFX8-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:124
-; GFX8-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
-; GFX8-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:128
-; GFX8-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GFX8-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:116
+; GFX8-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:52
+; GFX8-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:120
+; GFX8-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:56
+; GFX8-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:124
+; GFX8-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:60
+; GFX8-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:128
+; GFX8-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:64
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v29
+; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v25
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v28, 16, v32
-; GFX8-NEXT:    v_cndmask_b32_e64 v28, v33, v28, s[38:39]
-; GFX8-NEXT:    v_cndmask_b32_e64 v29, v29, v32, s[36:37]
-; GFX8-NEXT:    v_lshrrev_b32_e32 v32, 16, v31
-; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v30
-; GFX8-NEXT:    v_cndmask_b32_e64 v32, v33, v32, s[34:35]
-; GFX8-NEXT:    v_cndmask_b32_e64 v30, v30, v31, s[30:31]
-; GFX8-NEXT:    v_lshrrev_b32_e32 v31, 16, v27
-; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v26
-; GFX8-NEXT:    v_cndmask_b32_e64 v31, v33, v31, s[90:91]
-; GFX8-NEXT:    v_cndmask_b32_e64 v26, v26, v27, s[88:89]
-; GFX8-NEXT:    v_lshrrev_b32_e32 v27, 16, v25
-; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v24
-; GFX8-NEXT:    v_cndmask_b32_e64 v27, v33, v27, s[78:79]
-; GFX8-NEXT:    v_cndmask_b32_e64 v24, v24, v25, s[76:77]
-; GFX8-NEXT:    v_lshrrev_b32_e32 v25, 16, v23
+; GFX8-NEXT:    v_lshrrev_b32_e32 v24, 16, v26
+; GFX8-NEXT:    v_cndmask_b32_e64 v24, v33, v24, s[38:39]
+; GFX8-NEXT:    v_cndmask_b32_e64 v25, v25, v26, s[36:37]
+; GFX8-NEXT:    v_lshrrev_b32_e32 v26, 16, v28
+; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v27
+; GFX8-NEXT:    v_cndmask_b32_e64 v26, v33, v26, s[34:35]
+; GFX8-NEXT:    v_cndmask_b32_e64 v27, v27, v28, s[30:31]
+; GFX8-NEXT:    v_lshrrev_b32_e32 v28, 16, v30
+; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v29
+; GFX8-NEXT:    v_cndmask_b32_e64 v28, v33, v28, s[90:91]
+; GFX8-NEXT:    v_cndmask_b32_e64 v29, v29, v30, s[88:89]
+; GFX8-NEXT:    v_lshrrev_b32_e32 v30, 16, v32
+; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v31
+; GFX8-NEXT:    v_cndmask_b32_e64 v30, v33, v30, s[78:79]
+; GFX8-NEXT:    v_cndmask_b32_e64 v31, v31, v32, s[76:77]
+; GFX8-NEXT:    v_lshrrev_b32_e32 v32, 16, v23
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v22
-; GFX8-NEXT:    v_cndmask_b32_e64 v25, v33, v25, s[74:75]
+; GFX8-NEXT:    v_cndmask_b32_e64 v32, v33, v32, s[74:75]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v22, v22, v23, s[72:73]
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v23, 16, v21
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v33, 16, v20
@@ -42674,19 +42674,19 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v8, 16, v19
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v9, 16, v21
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v10, 16, v23
-; GFX8-NEXT:    v_lshlrev_b32_e32 v11, 16, v25
-; GFX8-NEXT:    v_lshlrev_b32_e32 v12, 16, v27
-; GFX8-NEXT:    v_lshlrev_b32_e32 v13, 16, v31
-; GFX8-NEXT:    v_lshlrev_b32_e32 v14, 16, v32
-; GFX8-NEXT:    v_lshlrev_b32_e32 v15, 16, v28
+; GFX8-NEXT:    v_lshlrev_b32_e32 v11, 16, v32
+; GFX8-NEXT:    v_lshlrev_b32_e32 v12, 16, v30
+; GFX8-NEXT:    v_lshlrev_b32_e32 v13, 16, v28
+; GFX8-NEXT:    v_lshlrev_b32_e32 v14, 16, v26
+; GFX8-NEXT:    v_lshlrev_b32_e32 v15, 16, v24
 ; GFX8-NEXT:    v_or_b32_sdwa v8, v16, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v9, v18, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v10, v20, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v11, v22, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_sdwa v12, v24, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_sdwa v13, v26, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_sdwa v14, v30, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_or_b32_sdwa v15, v29, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v12, v31, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v13, v29, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v14, v27, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_sdwa v15, v25, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_readlane_b32 s39, v34, 7
 ; GFX8-NEXT:    v_readlane_b32 s38, v34, 6
 ; GFX8-NEXT:    v_readlane_b32 s37, v34, 5
@@ -42806,19 +42806,19 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
 ; GFX9-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:120
 ; GFX9-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:56
 ; GFX9-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:124
-; GFX9-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:60
+; GFX9-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:60
 ; GFX9-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:128
 ; GFX9-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:64
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_cndmask_b32_e64 v29, v31, v32, s[34:35]
+; GFX9-NEXT:    v_cndmask_b32_e64 v30, v31, v32, s[34:35]
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v32, 16, v32
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v31, 16, v31
 ; GFX9-NEXT:    v_cndmask_b32_e64 v31, v31, v32, s[30:31]
-; GFX9-NEXT:    v_cndmask_b32_e64 v32, v28, v30, s[94:95]
-; GFX9-NEXT:    v_lshrrev_b32_e32 v30, 16, v30
+; GFX9-NEXT:    v_cndmask_b32_e64 v32, v28, v29, s[94:95]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v29, 16, v29
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v28, 16, v28
-; GFX9-NEXT:    v_cndmask_b32_e64 v28, v28, v30, s[92:93]
-; GFX9-NEXT:    v_cndmask_b32_e64 v30, v26, v27, s[90:91]
+; GFX9-NEXT:    v_cndmask_b32_e64 v28, v28, v29, s[92:93]
+; GFX9-NEXT:    v_cndmask_b32_e64 v29, v26, v27, s[90:91]
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v27, 16, v27
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v26, 16, v26
 ; GFX9-NEXT:    v_cndmask_b32_e64 v26, v26, v27, s[88:89]
@@ -42888,9 +42888,9 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
 ; GFX9-NEXT:    v_perm_b32 v10, v20, v23, s4
 ; GFX9-NEXT:    v_perm_b32 v11, v22, v25, s4
 ; GFX9-NEXT:    v_perm_b32 v12, v24, v27, s4
-; GFX9-NEXT:    v_perm_b32 v13, v26, v30, s4
+; GFX9-NEXT:    v_perm_b32 v13, v26, v29, s4
 ; GFX9-NEXT:    v_perm_b32 v14, v28, v32, s4
-; GFX9-NEXT:    v_perm_b32 v15, v31, v29, s4
+; GFX9-NEXT:    v_perm_b32 v15, v31, v30, s4
 ; GFX9-NEXT:    v_readlane_b32 s35, v33, 3
 ; GFX9-NEXT:    v_readlane_b32 s34, v33, 2
 ; GFX9-NEXT:    v_readlane_b32 s31, v33, 1
@@ -42904,206 +42904,186 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
 ; GFX10-LABEL: v_vselect_v32bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    s_clause 0xa
-; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:28
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:92
-; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:40
-; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:104
-; GFX10-NEXT:    buffer_load_ushort v35, off, s[0:3], s32
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:128
-; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:64
-; GFX10-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:96
-; GFX10-NEXT:    buffer_load_dword v39, off, s[0:3], s32 offset:108
-; GFX10-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:44
-; GFX10-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:112
-; GFX10-NEXT:    v_and_b32_e32 v30, 1, v30
-; GFX10-NEXT:    v_and_b32_e32 v18, 1, v18
-; GFX10-NEXT:    v_and_b32_e32 v12, 1, v12
-; GFX10-NEXT:    v_and_b32_e32 v13, 1, v13
-; GFX10-NEXT:    v_and_b32_e32 v19, 1, v19
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v30
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 1, v18
-; GFX10-NEXT:    v_and_b32_e32 v28, 1, v28
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v13
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 1, v19
-; GFX10-NEXT:    v_and_b32_e32 v26, 1, v26
-; GFX10-NEXT:    v_and_b32_e32 v24, 1, v24
-; GFX10-NEXT:    v_and_b32_e32 v22, 1, v22
-; GFX10-NEXT:    v_and_b32_e32 v20, 1, v20
-; GFX10-NEXT:    v_and_b32_e32 v21, 1, v21
-; GFX10-NEXT:    v_and_b32_e32 v16, 1, v16
-; GFX10-NEXT:    v_and_b32_e32 v14, 1, v14
-; GFX10-NEXT:    v_and_b32_e32 v17, 1, v17
-; GFX10-NEXT:    v_and_b32_e32 v15, 1, v15
-; GFX10-NEXT:    v_and_b32_e32 v10, 1, v10
-; GFX10-NEXT:    v_and_b32_e32 v8, 1, v8
-; GFX10-NEXT:    v_and_b32_e32 v6, 1, v6
-; GFX10-NEXT:    v_and_b32_e32 v4, 1, v4
-; GFX10-NEXT:    v_and_b32_e32 v2, 1, v2
 ; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX10-NEXT:    v_and_b32_e32 v11, 1, v11
-; GFX10-NEXT:    v_and_b32_e32 v7, 1, v7
-; GFX10-NEXT:    v_and_b32_e32 v3, 1, v3
 ; GFX10-NEXT:    v_and_b32_e32 v1, 1, v1
-; GFX10-NEXT:    v_and_b32_e32 v5, 1, v5
-; GFX10-NEXT:    v_and_b32_e32 v9, 1, v9
-; GFX10-NEXT:    s_waitcnt vmcnt(10)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v30, 16, v31
-; GFX10-NEXT:    s_waitcnt vmcnt(9)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v50, 16, v32
-; GFX10-NEXT:    s_waitcnt vmcnt(8)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v13, 16, v33
-; GFX10-NEXT:    s_waitcnt vmcnt(7)
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v34, v33, s6
-; GFX10-NEXT:    s_waitcnt vmcnt(6)
-; GFX10-NEXT:    v_and_b32_e32 v35, 1, v35
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 1, v12
-; GFX10-NEXT:    s_waitcnt vmcnt(4)
-; GFX10-NEXT:    v_cndmask_b32_e32 v54, v36, v37, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v35
-; GFX10-NEXT:    v_lshrrev_b32_e32 v51, 16, v34
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v32, v31, s6
-; GFX10-NEXT:    s_clause 0x6
-; GFX10-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:68
-; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:72
-; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:8
-; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:76
-; GFX10-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:12
-; GFX10-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:80
-; GFX10-NEXT:    v_cndmask_b32_e64 v30, v50, v30, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v35, v36, v37, vcc_lo
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:124
-; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:60
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v28
-; GFX10-NEXT:    v_and_b32_e32 v28, 1, v29
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v51, v13, s5
-; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v50, 16, v52
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v29, v36, v37, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v28
-; GFX10-NEXT:    v_cndmask_b32_e32 v28, v36, v37, vcc_lo
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:120
-; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:56
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v26
-; GFX10-NEXT:    v_and_b32_e32 v26, 1, v27
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v27, v36, v37, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v26
-; GFX10-NEXT:    v_cndmask_b32_e32 v26, v36, v37, vcc_lo
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:116
-; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:52
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v24
-; GFX10-NEXT:    v_and_b32_e32 v24, 1, v25
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v25, v36, v37, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v24
-; GFX10-NEXT:    v_cndmask_b32_e32 v24, v36, v37, vcc_lo
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:48
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v22
-; GFX10-NEXT:    v_and_b32_e32 v22, 1, v23
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v49
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v23, v49, v36, vcc_lo
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v22
-; GFX10-NEXT:    v_lshrrev_b32_e32 v49, 16, v53
-; GFX10-NEXT:    v_cndmask_b32_e32 v22, v37, v36, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v20
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v48
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v39
-; GFX10-NEXT:    v_cndmask_b32_e32 v20, v39, v48, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v21
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v39, off, s[0:3], s32 offset:32
-; GFX10-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:16
-; GFX10-NEXT:    v_cndmask_b32_e32 v21, v37, v36, vcc_lo
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:100
-; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:36
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v3
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v5
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s7, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s8, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v7
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s9, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v6
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s10, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v9
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s11, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v8
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s12, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v11
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s13, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v10
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s14, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v13
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s15, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v12
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s16, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v15
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s17, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v14
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s18, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v17
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s19, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s20, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v19
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s21, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s22, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v21
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s23, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v20
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s24, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v23
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s25, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v22
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s26, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v25
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s27, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v24
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s28, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v27
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s29, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v26
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s40, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v29
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s41, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v28
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s42, 1, v0
+; GFX10-NEXT:    buffer_load_ushort v0, off, s[0:3], s32
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v36, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v14
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
-; GFX10-NEXT:    v_cndmask_b32_e32 v14, v38, v39, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v17
-; GFX10-NEXT:    v_lshrrev_b32_e32 v39, 16, v39
-; GFX10-NEXT:    v_lshrrev_b32_e32 v38, 16, v38
-; GFX10-NEXT:    v_cndmask_b32_e32 v17, v36, v37, vcc_lo
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:88
-; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:24
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v15
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v38, v39, vcc_lo
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:84
-; GFX10-NEXT:    buffer_load_dword v39, off, s[0:3], s32 offset:20
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v10
-; GFX10-NEXT:    s_waitcnt vmcnt(2)
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v36, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v37, 16, v37
-; GFX10-NEXT:    v_lshrrev_b32_e32 v36, 16, v36
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s43, 1, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 1, v30
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s44, 1, v0
+; GFX10-NEXT:    s_clause 0x1f
+; GFX10-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:68
+; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4
+; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:72
+; GFX10-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX10-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:76
+; GFX10-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:12
+; GFX10-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:80
+; GFX10-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:16
+; GFX10-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:84
+; GFX10-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:20
+; GFX10-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:88
+; GFX10-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:24
+; GFX10-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:92
+; GFX10-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:28
+; GFX10-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:96
+; GFX10-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:32
+; GFX10-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:100
+; GFX10-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:36
+; GFX10-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:104
+; GFX10-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:40
+; GFX10-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:108
+; GFX10-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:44
+; GFX10-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:112
+; GFX10-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:48
+; GFX10-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:116
+; GFX10-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:52
+; GFX10-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:120
+; GFX10-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:56
+; GFX10-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:124
+; GFX10-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:60
+; GFX10-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:128
+; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:64
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v38, v39, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v6
-; GFX10-NEXT:    v_lshrrev_b32_e32 v39, 16, v39
-; GFX10-NEXT:    v_lshrrev_b32_e32 v38, 16, v38
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v53, v48, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v4
-; GFX10-NEXT:    v_lshrrev_b32_e32 v48, 16, v48
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v34, v52, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
-; GFX10-NEXT:    v_lshrrev_b32_e32 v34, 16, v34
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v32, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    v_lshrrev_b32_e32 v33, 16, v33
-; GFX10-NEXT:    v_lshrrev_b32_e32 v32, 16, v32
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v19, v31, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v11
+; GFX10-NEXT:    v_cndmask_b32_e64 v32, v30, v31, s44
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v31, 16, v31
+; GFX10-NEXT:    v_lshrrev_b32_e32 v30, 16, v30
+; GFX10-NEXT:    v_cndmask_b32_e64 v30, v30, v31, s43
+; GFX10-NEXT:    v_cndmask_b32_e64 v31, v28, v29, s42
+; GFX10-NEXT:    v_lshrrev_b32_e32 v29, 16, v29
+; GFX10-NEXT:    v_lshrrev_b32_e32 v28, 16, v28
+; GFX10-NEXT:    v_cndmask_b32_e64 v28, v28, v29, s41
+; GFX10-NEXT:    v_cndmask_b32_e64 v29, v26, v27, s40
+; GFX10-NEXT:    v_lshrrev_b32_e32 v27, 16, v27
+; GFX10-NEXT:    v_lshrrev_b32_e32 v26, 16, v26
+; GFX10-NEXT:    v_cndmask_b32_e64 v26, v26, v27, s29
+; GFX10-NEXT:    v_cndmask_b32_e64 v27, v24, v25, s28
+; GFX10-NEXT:    v_lshrrev_b32_e32 v25, 16, v25
+; GFX10-NEXT:    v_lshrrev_b32_e32 v24, 16, v24
+; GFX10-NEXT:    v_cndmask_b32_e64 v24, v24, v25, s27
+; GFX10-NEXT:    v_cndmask_b32_e64 v25, v22, v23, s26
+; GFX10-NEXT:    v_lshrrev_b32_e32 v23, 16, v23
+; GFX10-NEXT:    v_lshrrev_b32_e32 v22, 16, v22
+; GFX10-NEXT:    v_cndmask_b32_e64 v22, v22, v23, s25
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v20, v21, s24
+; GFX10-NEXT:    v_lshrrev_b32_e32 v21, 16, v21
+; GFX10-NEXT:    v_lshrrev_b32_e32 v20, 16, v20
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v20, v21, s23
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v18, v19, s22
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v19, 16, v19
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v36, v37, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v7
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v49, v48, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v32, v33, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v19, v31, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v5
-; GFX10-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v34, v50, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v9
-; GFX10-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v3, v7, v6, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v6, v30, v12, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v2, v5, v4, 0x5040100
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v38, v39, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v5, v11, v10, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v7, v15, v14, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v10, v21, v20, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v11, v22, v23, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v4, v9, v8, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v8, v17, v16, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v9, v13, v18, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v12, v24, v25, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v13, v26, v27, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v14, v28, v29, 0x5040100
-; GFX10-NEXT:    v_perm_b32 v15, v35, v54, 0x5040100
+; GFX10-NEXT:    v_lshrrev_b32_e32 v18, 16, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v19, s21
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v16, v17, s20
+; GFX10-NEXT:    v_lshrrev_b32_e32 v17, 16, v17
+; GFX10-NEXT:    v_lshrrev_b32_e32 v16, 16, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, v17, s19
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v14, v15, s18
+; GFX10-NEXT:    v_lshrrev_b32_e32 v15, 16, v15
+; GFX10-NEXT:    v_lshrrev_b32_e32 v14, 16, v14
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v15, s17
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v12, v13, s16
+; GFX10-NEXT:    v_lshrrev_b32_e32 v13, 16, v13
+; GFX10-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v13, s15
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v10, v11, s14
+; GFX10-NEXT:    v_lshrrev_b32_e32 v11, 16, v11
+; GFX10-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v11, s13
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v8, v9, s12
+; GFX10-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
+; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v9, s11
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v6, v7, s10
+; GFX10-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
+; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s9
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v4, v5, s8
+; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 16, v4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v5, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v2, v3, s6
+; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX10-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v0, v1, s4
+; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v1, v2, v5, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v2, v4, v7, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v4, v8, v11, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v5, v10, v13, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v0, v0, v3, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v3, v6, v9, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v6, v12, v15, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v7, v14, v17, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v8, v16, v19, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v9, v18, v21, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v10, v20, v23, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v11, v22, v25, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v12, v24, v27, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v13, v26, v29, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v14, v28, v31, 0x5040100
+; GFX10-NEXT:    v_perm_b32 v15, v30, v32, 0x5040100
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_vselect_v32bf16:
@@ -45532,14 +45512,14 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX11FAKE16-NEXT:    v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5
 ; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
 ; GFX11FAKE16-NEXT:    v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11FAKE16-NEXT:    v_bfe_u32 v10, v6, 16, 1
-; GFX11FAKE16-NEXT:    v_mul_f32_e32 v7, v9, v7
 ; GFX11FAKE16-NEXT:    v_or_b32_e32 v3, 0x400000, v6
 ; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    v_mul_f32_e32 v7, v9, v7
 ; GFX11FAKE16-NEXT:    v_add3_u32 v10, v10, v6, 0x7fff
 ; GFX11FAKE16-NEXT:    v_or_b32_e32 v6, 0x400000, v1
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11FAKE16-NEXT:    v_bfe_u32 v9, v7, 16, 1
 ; GFX11FAKE16-NEXT:    v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, v0, v2
 ; GFX11FAKE16-NEXT:    v_bfe_u32 v2, v1, 16, 1
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
index 96603c10787d6..acf7dc3cae9a2 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
@@ -1253,12 +1253,12 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) inreg %src, ptr a
 ; SDAG-GFX1100-NEXT:    s_load_b128 s[0:3], s[4:5], 0x44
 ; SDAG-GFX1100-NEXT:    s_mov_b32 s5, s12
 ; SDAG-GFX1100-NEXT:    s_waitcnt lgkmcnt(0)
-; SDAG-GFX1100-NEXT:    v_mov_b32_e32 v5, s0
 ; SDAG-GFX1100-NEXT:    s_mov_b32 s4, s3
-; SDAG-GFX1100-NEXT:    s_mov_b32 s3, s12
+; SDAG-GFX1100-NEXT:    v_mov_b32_e32 v5, s0
 ; SDAG-GFX1100-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; SDAG-GFX1100-NEXT:    s_mov_b32 s13, s2
 ; SDAG-GFX1100-NEXT:    s_mov_b32 s2, s1
+; SDAG-GFX1100-NEXT:    s_mov_b32 s3, s12
 ; SDAG-GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; SDAG-GFX1100-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; SDAG-GFX1100-NEXT:    s_waitcnt vmcnt(0)
@@ -1324,12 +1324,12 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) inreg %src, ptr a
 ; GISEL-GFX1100-NEXT:    s_load_b32 s7, s[4:5], 0x54
 ; GISEL-GFX1100-NEXT:    s_mov_b32 s4, s13
 ; GISEL-GFX1100-NEXT:    s_waitcnt lgkmcnt(0)
-; GISEL-GFX1100-NEXT:    v_mov_b32_e32 v5, s8
 ; GISEL-GFX1100-NEXT:    s_mov_b32 s12, s9
 ; GISEL-GFX1100-NEXT:    s_mov_b32 s5, s10
-; GISEL-GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GISEL-GFX1100-NEXT:    v_mov_b32_e32 v5, s8
 ; GISEL-GFX1100-NEXT:    s_or_b64 s[4:5], s[12:13], s[4:5]
 ; GISEL-GFX1100-NEXT:    s_mov_b32 s12, s11
+; GISEL-GFX1100-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GISEL-GFX1100-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
 ; GISEL-GFX1100-NEXT:    s_waitcnt vmcnt(0)
 ; GISEL-GFX1100-NEXT:    buffer_store_b128 v[0:3], v5, s[4:7], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
index 178b138b57141..acf2f8add7670 100644
--- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
@@ -6064,8 +6064,8 @@ define void @stack_12xv3i32() #0 {
 ; GFX11-NEXT:    s_add_i32 s0, s32, 16
 ; GFX11-NEXT:    scratch_store_b128 off, v[0:3], s32
 ; GFX11-NEXT:    scratch_store_b32 off, v4, s0
-; GFX11-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, 0
-; GFX11-NEXT:    v_dual_mov_b32 v3, 1 :: v_dual_mov_b32 v2, 0
+; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 1
+; GFX11-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
 ; GFX11-NEXT:    v_dual_mov_b32 v5, 1 :: v_dual_mov_b32 v4, 1
 ; GFX11-NEXT:    v_dual_mov_b32 v7, 2 :: v_dual_mov_b32 v6, 2
 ; GFX11-NEXT:    v_dual_mov_b32 v9, 3 :: v_dual_mov_b32 v8, 2
@@ -6772,10 +6772,10 @@ define void @stack_8xv5i32() #0 {
 ; GFX11-NEXT:    s_add_i32 s1, s32, 16
 ; GFX11-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX11-NEXT:    scratch_store_b128 off, v[0:3], s32
+; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 0
 ; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    scratch_store_b32 off, v8, s0
 ; GFX11-NEXT:    scratch_store_b128 off, v[4:7], s1
-; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 0
 ; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, 1
 ; GFX11-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v7, 1
 ; GFX11-NEXT:    v_dual_mov_b32 v6, 1 :: v_dual_mov_b32 v9, 1
@@ -7150,21 +7150,21 @@ define void @stack_8xv5f32() #0 {
 ; GFX11-NEXT:    scratch_store_b128 off, v[0:3], s32
 ; GFX11-NEXT:    scratch_store_b32 off, v8, s0
 ; GFX11-NEXT:    scratch_store_b128 off, v[4:7], s1
-; GFX11-NEXT:    v_mov_b32_e32 v6, 1.0
 ; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
 ; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0
 ; GFX11-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v5, 1.0
-; GFX11-NEXT:    v_dual_mov_b32 v7, 1.0 :: v_dual_mov_b32 v8, 1.0
-; GFX11-NEXT:    v_dual_mov_b32 v11, 2.0 :: v_dual_mov_b32 v10, 2.0
-; GFX11-NEXT:    v_dual_mov_b32 v13, 2.0 :: v_dual_mov_b32 v12, 2.0
-; GFX11-NEXT:    v_dual_mov_b32 v15, 0x40400000 :: v_dual_mov_b32 v14, 2.0
-; GFX11-NEXT:    v_dual_mov_b32 v17, 0x40400000 :: v_dual_mov_b32 v16, 0x40400000
-; GFX11-NEXT:    v_dual_mov_b32 v19, 0x40400000 :: v_dual_mov_b32 v18, 0x40400000
-; GFX11-NEXT:    v_dual_mov_b32 v21, 4.0 :: v_dual_mov_b32 v20, 4.0
-; GFX11-NEXT:    v_dual_mov_b32 v23, 4.0 :: v_dual_mov_b32 v22, 4.0
-; GFX11-NEXT:    v_dual_mov_b32 v25, 0x40a00000 :: v_dual_mov_b32 v24, 4.0
-; GFX11-NEXT:    v_dual_mov_b32 v27, 0x40a00000 :: v_dual_mov_b32 v26, 0x40a00000
-; GFX11-NEXT:    v_dual_mov_b32 v29, 0x40a00000 :: v_dual_mov_b32 v28, 0x40a00000
+; GFX11-NEXT:    v_dual_mov_b32 v6, 1.0 :: v_dual_mov_b32 v7, 1.0
+; GFX11-NEXT:    v_dual_mov_b32 v8, 1.0 :: v_dual_mov_b32 v11, 2.0
+; GFX11-NEXT:    v_dual_mov_b32 v10, 2.0 :: v_dual_mov_b32 v13, 2.0
+; GFX11-NEXT:    v_dual_mov_b32 v12, 2.0 :: v_dual_mov_b32 v15, 0x40400000
+; GFX11-NEXT:    v_dual_mov_b32 v14, 2.0 :: v_dual_mov_b32 v17, 0x40400000
+; GFX11-NEXT:    v_dual_mov_b32 v16, 0x40400000 :: v_dual_mov_b32 v19, 0x40400000
+; GFX11-NEXT:    v_dual_mov_b32 v18, 0x40400000 :: v_dual_mov_b32 v21, 4.0
+; GFX11-NEXT:    v_dual_mov_b32 v20, 4.0 :: v_dual_mov_b32 v23, 4.0
+; GFX11-NEXT:    v_dual_mov_b32 v22, 4.0 :: v_dual_mov_b32 v25, 0x40a00000
+; GFX11-NEXT:    v_dual_mov_b32 v24, 4.0 :: v_dual_mov_b32 v27, 0x40a00000
+; GFX11-NEXT:    v_dual_mov_b32 v26, 0x40a00000 :: v_dual_mov_b32 v29, 0x40a00000
+; GFX11-NEXT:    v_mov_b32_e32 v28, 0x40a00000
 ; GFX11-NEXT:    v_mov_b32_e32 v30, 0x40c00000
 ; GFX11-NEXT:    s_getpc_b64 s[0:1]
 ; GFX11-NEXT:    s_add_u32 s0, s0, external_void_func_8xv5f32 at rel32@lo+4
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index aabcd69c88ca3..d0ae30f813a72 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -2847,11 +2847,10 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX11-NEXT:    s_addc_u32 s12, s5, 0
 ; GFX11-NEXT:    s_add_u32 s13, s1, 2
 ; GFX11-NEXT:    s_addc_u32 s14, s5, 0
-; GFX11-NEXT:    v_mov_b32_e32 v2, s13
 ; GFX11-NEXT:    s_cmp_lg_u32 s7, 0
 ; GFX11-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v0
 ; GFX11-NEXT:    s_subb_u32 s0, s11, s0
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    v_mov_b32_e32 v2, s13
 ; GFX11-NEXT:    s_cmp_ge_u32 s0, s3
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
 ; GFX11-NEXT:    s_cselect_b32 s7, -1, 0
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 92ece0d007fe2..a564f92fd4c51 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -974,46 +974,46 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_with_select(ptr addrspace(1) no
 ; VI-NEXT:    v_mov_b32_e32 v9, s5
 ; VI-NEXT:    v_mov_b32_e32 v8, s4
 ; VI-NEXT:    s_add_u32 s4, s2, 2
-; VI-NEXT:    s_addc_u32 s5, s3, 0
-; VI-NEXT:    v_mov_b32_e32 v11, s5
-; VI-NEXT:    v_mov_b32_e32 v10, s4
-; VI-NEXT:    s_add_u32 s4, s2, 1
-; VI-NEXT:    flat_load_ubyte v12, v[0:1]
-; VI-NEXT:    flat_load_ubyte v13, v[2:3]
-; VI-NEXT:    flat_load_ubyte v4, v[4:5]
-; VI-NEXT:    flat_load_ubyte v5, v[6:7]
+; VI-NEXT:    flat_load_ubyte v10, v[0:1]
+; VI-NEXT:    flat_load_ubyte v11, v[2:3]
+; VI-NEXT:    flat_load_ubyte v12, v[4:5]
+; VI-NEXT:    flat_load_ubyte v6, v[6:7]
+; VI-NEXT:    flat_load_ubyte v7, v[8:9]
 ; VI-NEXT:    s_addc_u32 s5, s3, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
-; VI-NEXT:    flat_load_ubyte v6, v[8:9]
-; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_load_ubyte v7, v[10:11]
+; VI-NEXT:    s_add_u32 s4, s2, 1
+; VI-NEXT:    s_addc_u32 s5, s3, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_mov_b32_e32 v5, s3
+; VI-NEXT:    v_mov_b32_e32 v4, s2
 ; VI-NEXT:    flat_load_ubyte v0, v[0:1]
 ; VI-NEXT:    flat_load_ubyte v2, v[2:3]
+; VI-NEXT:    flat_load_ubyte v3, v[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v1, 0
 ; VI-NEXT:    s_waitcnt vmcnt(7)
-; VI-NEXT:    v_lshlrev_b32_e32 v3, 8, v12
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 8, v10
 ; VI-NEXT:    s_waitcnt vmcnt(6)
-; VI-NEXT:    v_or_b32_e32 v3, v3, v13
+; VI-NEXT:    v_or_b32_e32 v4, v4, v11
 ; VI-NEXT:    s_waitcnt vmcnt(5)
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
+; VI-NEXT:    v_lshlrev_b32_e32 v5, 8, v12
 ; VI-NEXT:    s_waitcnt vmcnt(4)
-; VI-NEXT:    v_or_b32_sdwa v4, v4, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT:    v_or_b32_e32 v3, v4, v3
-; VI-NEXT:    v_ffbh_u32_e32 v3, v3
+; VI-NEXT:    v_or_b32_sdwa v5, v5, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v4, v5, v4
 ; VI-NEXT:    s_waitcnt vmcnt(3)
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 8, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v5, 8, v7
+; VI-NEXT:    v_ffbh_u32_e32 v4, v4
 ; VI-NEXT:    s_waitcnt vmcnt(2)
-; VI-NEXT:    v_or_b32_sdwa v4, v4, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v5, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; VI-NEXT:    s_waitcnt vmcnt(1)
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
 ; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_or_b32_e32 v2, v2, v3
 ; VI-NEXT:    v_or_b32_e32 v0, v0, v2
-; VI-NEXT:    v_or_b32_e32 v0, v4, v0
 ; VI-NEXT:    v_ffbh_u32_e32 v0, v0
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
-; VI-NEXT:    v_min_u32_e32 v0, v0, v3
+; VI-NEXT:    v_min_u32_e32 v0, v0, v4
 ; VI-NEXT:    v_mov_b32_e32 v3, s1
 ; VI-NEXT:    v_min_u32_e32 v0, 64, v0
 ; VI-NEXT:    v_mov_b32_e32 v2, s0
diff --git a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
index 3c45596fba14b..f5829d9c92253 100644
--- a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
@@ -938,46 +938,46 @@ define amdgpu_kernel void @v_cttz_zero_undef_i64_with_select(ptr addrspace(1) no
 ; VI-NEXT:    v_mov_b32_e32 v9, s5
 ; VI-NEXT:    v_mov_b32_e32 v8, s4
 ; VI-NEXT:    s_add_u32 s4, s2, 2
-; VI-NEXT:    s_addc_u32 s5, s3, 0
-; VI-NEXT:    v_mov_b32_e32 v11, s5
-; VI-NEXT:    v_mov_b32_e32 v10, s4
-; VI-NEXT:    flat_load_ubyte v12, v[0:1]
-; VI-NEXT:    flat_load_ubyte v13, v[2:3]
-; VI-NEXT:    flat_load_ubyte v4, v[4:5]
-; VI-NEXT:    flat_load_ubyte v5, v[6:7]
-; VI-NEXT:    s_add_u32 s4, s2, 1
-; VI-NEXT:    flat_load_ubyte v6, v[8:9]
+; VI-NEXT:    flat_load_ubyte v10, v[0:1]
+; VI-NEXT:    flat_load_ubyte v11, v[2:3]
+; VI-NEXT:    flat_load_ubyte v12, v[4:5]
+; VI-NEXT:    flat_load_ubyte v6, v[6:7]
+; VI-NEXT:    flat_load_ubyte v7, v[8:9]
 ; VI-NEXT:    s_addc_u32 s5, s3, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
-; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_load_ubyte v7, v[10:11]
+; VI-NEXT:    s_add_u32 s4, s2, 1
+; VI-NEXT:    s_addc_u32 s5, s3, 0
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_mov_b32_e32 v5, s3
+; VI-NEXT:    v_mov_b32_e32 v4, s2
 ; VI-NEXT:    flat_load_ubyte v0, v[0:1]
 ; VI-NEXT:    flat_load_ubyte v2, v[2:3]
+; VI-NEXT:    flat_load_ubyte v3, v[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v1, 0
 ; VI-NEXT:    s_waitcnt vmcnt(7)
-; VI-NEXT:    v_lshlrev_b32_e32 v3, 8, v12
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 8, v10
 ; VI-NEXT:    s_waitcnt vmcnt(6)
-; VI-NEXT:    v_or_b32_e32 v3, v3, v13
+; VI-NEXT:    v_or_b32_e32 v4, v4, v11
 ; VI-NEXT:    s_waitcnt vmcnt(5)
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
+; VI-NEXT:    v_lshlrev_b32_e32 v5, 8, v12
 ; VI-NEXT:    s_waitcnt vmcnt(4)
-; VI-NEXT:    v_or_b32_sdwa v4, v4, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT:    v_or_b32_e32 v3, v4, v3
+; VI-NEXT:    v_or_b32_sdwa v5, v5, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v4, v5, v4
 ; VI-NEXT:    s_waitcnt vmcnt(3)
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 8, v6
-; VI-NEXT:    v_ffbl_b32_e32 v3, v3
-; VI-NEXT:    v_add_u32_e32 v3, vcc, 32, v3
+; VI-NEXT:    v_lshlrev_b32_e32 v5, 8, v7
+; VI-NEXT:    v_ffbl_b32_e32 v4, v4
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 32, v4
 ; VI-NEXT:    s_waitcnt vmcnt(2)
-; VI-NEXT:    v_or_b32_sdwa v4, v4, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v5, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; VI-NEXT:    s_waitcnt vmcnt(1)
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
 ; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_or_b32_e32 v2, v2, v3
 ; VI-NEXT:    v_or_b32_e32 v0, v0, v2
-; VI-NEXT:    v_or_b32_e32 v0, v4, v0
 ; VI-NEXT:    v_ffbl_b32_e32 v0, v0
-; VI-NEXT:    v_min_u32_e32 v0, v3, v0
+; VI-NEXT:    v_min_u32_e32 v0, v4, v0
 ; VI-NEXT:    v_mov_b32_e32 v3, s1
 ; VI-NEXT:    v_min_u32_e32 v0, 64, v0
 ; VI-NEXT:    v_mov_b32_e32 v2, s0
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index b4b9c2d3e0135..7ceeda810e5e6 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -1929,25 +1929,25 @@ define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1)
 ; GFX11-NEXT:    v_med3_i32 v1, s3, 0, 13
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, s4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_or_b32 s3, s5, s3
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-NEXT:    s_or_b32 s5, s3, 0x1000
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_lshr_b32 s7, s5, s6
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_lshl_b32 s6, s7, s6
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_cmp_lg_u32 s6, s5
 ; GFX11-NEXT:    s_cselect_b32 s5, 1, 0
 ; GFX11-NEXT:    s_addk_i32 s2, 0xfc10
 ; GFX11-NEXT:    s_or_b32 s5, s7, s5
 ; GFX11-NEXT:    s_lshl_b32 s6, s2, 12
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_or_b32 s6, s3, s6
 ; GFX11-NEXT:    s_cmp_lt_i32 s2, 1
 ; GFX11-NEXT:    s_cselect_b32 s5, s5, s6
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_and_b32 s6, s5, 7
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_cmp_gt_i32 s6, 5
 ; GFX11-NEXT:    s_cselect_b32 s7, 1, 0
 ; GFX11-NEXT:    s_cmp_eq_u32 s6, 3
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.ll b/llvm/test/CodeGen/AMDGPU/fdiv.ll
index 33910947e6fac..b826e6c469d8e 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.ll
@@ -1164,8 +1164,8 @@ define amdgpu_kernel void @s_fdiv_v2f32(ptr addrspace(1) %out, <2 x float> %a, <
 ; GFX11-NEXT:    v_fma_f32 v5, -v2, v4, v0
 ; GFX11-NEXT:    v_fmac_f32_e32 v4, v5, v3
 ; GFX11-NEXT:    v_fma_f32 v0, -v2, v4, v0
-; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    s_denorm_mode 12
+; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    v_div_fmas_f32 v0, v0, v3, v4
 ; GFX11-NEXT:    v_div_fixup_f32 v0, v0, s2, s0
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.ll b/llvm/test/CodeGen/AMDGPU/fmed3.ll
index db0c5362bdc5f..c583b5b4d3e9a 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3.ll
@@ -8300,10 +8300,10 @@ define amdgpu_kernel void @two_non_inline_constant_multi_use(ptr addrspace(1) %o
 ; GFX11-SDAG-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX11-SDAG-NEXT:    s_mov_b32 s2, 0x41000000
 ; GFX11-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-NEXT:    v_add_f32_e32 v3, 0x41800000, v1
 ; GFX11-SDAG-NEXT:    v_add_f32_e32 v2, 0.5, v1
+; GFX11-SDAG-NEXT:    v_add_f32_e32 v3, 0x41800000, v1
 ; GFX11-SDAG-NEXT:    v_add_f32_e32 v1, 0x41000000, v1
-; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-SDAG-NEXT:    v_med3_f32 v2, v2, s2, 0x41800000
 ; GFX11-SDAG-NEXT:    global_store_b32 v0, v2, s[0:1]
 ; GFX11-SDAG-NEXT:    global_store_b32 v[0:1], v3, off dlc
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 924378eb2376d..1b092b283290a 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -497,7 +497,7 @@ define double @fneg_xor_select_i64_user_with_srcmods(i1 %cond, i64 %arg0, i64 %a
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v2, v4, v2
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v1, v3, v1
 ; GFX11-NEXT:    v_add_f64 v[0:1], -v[1:2], 2.0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %select = select i1 %cond, i64 %arg0, i64 %arg1
diff --git a/llvm/test/CodeGen/AMDGPU/freeze.ll b/llvm/test/CodeGen/AMDGPU/freeze.ll
index ff9b0641e43d8..2040aedc250e6 100644
--- a/llvm/test/CodeGen/AMDGPU/freeze.ll
+++ b/llvm/test/CodeGen/AMDGPU/freeze.ll
@@ -2031,9 +2031,9 @@ define void @freeze_v15i32(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) {
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v8, vcc, 16, v0
 ; GFX8-GISEL-NEXT:    flat_load_dwordx4 v[4:7], v[0:1]
 ; GFX8-GISEL-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-GISEL-NEXT:    flat_load_dwordx4 v[8:11], v[8:9]
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v12, vcc, 32, v0
 ; GFX8-GISEL-NEXT:    v_addc_u32_e32 v13, vcc, 0, v1, vcc
+; GFX8-GISEL-NEXT:    flat_load_dwordx4 v[8:11], v[8:9]
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v0, vcc, 48, v0
 ; GFX8-GISEL-NEXT:    flat_load_dwordx4 v[12:15], v[12:13]
 ; GFX8-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -10417,9 +10417,9 @@ define void @freeze_v8p3(ptr addrspace(3) %ptra, ptr addrspace(3) %ptrb) {
 ; GFX6-GISEL-LABEL: freeze_v8p3:
 ; GFX6-GISEL:       ; %bb.0:
 ; GFX6-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-GISEL-NEXT:    s_mov_b32 m0, -1
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v4, vcc, 8, v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v6, vcc, 16, v0
-; GFX6-GISEL-NEXT:    s_mov_b32 m0, -1
 ; GFX6-GISEL-NEXT:    ds_read_b64 v[2:3], v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 24, v0
 ; GFX6-GISEL-NEXT:    ds_read_b64 v[4:5], v4
@@ -11631,9 +11631,9 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX6-GISEL:       ; %bb.0:
 ; GFX6-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
-; GFX6-GISEL-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX6-GISEL-NEXT:    buffer_load_dword v3, v0, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    buffer_load_dword v2, v2, s[0:3], 0 offen
-; GFX6-GISEL-NEXT:    v_add_i32_e32 v3, vcc, 8, v0
+; GFX6-GISEL-NEXT:    v_add_i32_e32 v4, vcc, 8, v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v5, vcc, 12, v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v6, vcc, 16, v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v7, vcc, 20, v0
@@ -11643,7 +11643,7 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v11, vcc, 36, v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v12, vcc, 40, v0
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v13, vcc, 44, v0
-; GFX6-GISEL-NEXT:    buffer_load_dword v3, v3, s[0:3], 0 offen
+; GFX6-GISEL-NEXT:    buffer_load_dword v4, v4, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    buffer_load_dword v5, v5, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    buffer_load_dword v6, v6, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    buffer_load_dword v7, v7, s[0:3], 0 offen
@@ -11658,8 +11658,8 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v15, vcc, 52, v0
 ; GFX6-GISEL-NEXT:    buffer_load_dword v15, v15, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v16, vcc, 56, v0
-; GFX6-GISEL-NEXT:    buffer_load_dword v16, v16, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 60, v0
+; GFX6-GISEL-NEXT:    buffer_load_dword v16, v16, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v17, vcc, 4, v1
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v18, vcc, 8, v1
@@ -11672,19 +11672,19 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX6-GISEL-NEXT:    s_waitcnt vmcnt(12)
 ; GFX6-GISEL-NEXT:    buffer_store_dword v6, v2, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v2, vcc, 40, v1
-; GFX6-GISEL-NEXT:    buffer_store_dword v3, v18, s[0:3], 0 offen
+; GFX6-GISEL-NEXT:    buffer_store_dword v4, v18, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    s_waitcnt expcnt(0)
-; GFX6-GISEL-NEXT:    v_add_i32_e32 v3, vcc, 24, v1
+; GFX6-GISEL-NEXT:    v_add_i32_e32 v4, vcc, 24, v1
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v18, vcc, 28, v1
 ; GFX6-GISEL-NEXT:    buffer_store_dword v5, v19, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v5, vcc, 32, v1
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v19, vcc, 36, v1
 ; GFX6-GISEL-NEXT:    v_add_i32_e32 v6, vcc, 44, v1
-; GFX6-GISEL-NEXT:    buffer_store_dword v4, v1, s[0:3], 0 offen
+; GFX6-GISEL-NEXT:    buffer_store_dword v3, v1, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    s_waitcnt vmcnt(14)
 ; GFX6-GISEL-NEXT:    buffer_store_dword v7, v17, s[0:3], 0 offen
-; GFX6-GISEL-NEXT:    buffer_store_dword v8, v3, s[0:3], 0 offen
+; GFX6-GISEL-NEXT:    buffer_store_dword v8, v4, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    s_waitcnt vmcnt(14)
 ; GFX6-GISEL-NEXT:    buffer_store_dword v9, v18, s[0:3], 0 offen
 ; GFX6-GISEL-NEXT:    buffer_store_dword v10, v5, s[0:3], 0 offen
@@ -11785,9 +11785,9 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX7-GISEL:       ; %bb.0:
 ; GFX7-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
-; GFX7-GISEL-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX7-GISEL-NEXT:    buffer_load_dword v3, v0, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    buffer_load_dword v2, v2, s[0:3], 0 offen
-; GFX7-GISEL-NEXT:    v_add_i32_e32 v3, vcc, 8, v0
+; GFX7-GISEL-NEXT:    v_add_i32_e32 v4, vcc, 8, v0
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v5, vcc, 12, v0
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v6, vcc, 16, v0
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v7, vcc, 20, v0
@@ -11797,7 +11797,7 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v11, vcc, 36, v0
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v12, vcc, 40, v0
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v13, vcc, 44, v0
-; GFX7-GISEL-NEXT:    buffer_load_dword v3, v3, s[0:3], 0 offen
+; GFX7-GISEL-NEXT:    buffer_load_dword v4, v4, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    buffer_load_dword v5, v5, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    buffer_load_dword v6, v6, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    buffer_load_dword v7, v7, s[0:3], 0 offen
@@ -11812,8 +11812,8 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v15, vcc, 52, v0
 ; GFX7-GISEL-NEXT:    buffer_load_dword v15, v15, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v16, vcc, 56, v0
-; GFX7-GISEL-NEXT:    buffer_load_dword v16, v16, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 60, v0
+; GFX7-GISEL-NEXT:    buffer_load_dword v16, v16, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v17, vcc, 4, v1
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v18, vcc, 8, v1
@@ -11825,17 +11825,17 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX7-GISEL-NEXT:    s_waitcnt vmcnt(12)
 ; GFX7-GISEL-NEXT:    buffer_store_dword v6, v2, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v2, vcc, 40, v1
-; GFX7-GISEL-NEXT:    buffer_store_dword v3, v18, s[0:3], 0 offen
-; GFX7-GISEL-NEXT:    v_add_i32_e32 v3, vcc, 24, v1
+; GFX7-GISEL-NEXT:    buffer_store_dword v4, v18, s[0:3], 0 offen
+; GFX7-GISEL-NEXT:    v_add_i32_e32 v4, vcc, 24, v1
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v18, vcc, 28, v1
 ; GFX7-GISEL-NEXT:    buffer_store_dword v5, v19, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v5, vcc, 32, v1
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v19, vcc, 36, v1
 ; GFX7-GISEL-NEXT:    v_add_i32_e32 v6, vcc, 44, v1
-; GFX7-GISEL-NEXT:    buffer_store_dword v4, v1, s[0:3], 0 offen
+; GFX7-GISEL-NEXT:    buffer_store_dword v3, v1, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    s_waitcnt vmcnt(14)
 ; GFX7-GISEL-NEXT:    buffer_store_dword v7, v17, s[0:3], 0 offen
-; GFX7-GISEL-NEXT:    buffer_store_dword v8, v3, s[0:3], 0 offen
+; GFX7-GISEL-NEXT:    buffer_store_dword v8, v4, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    s_waitcnt vmcnt(14)
 ; GFX7-GISEL-NEXT:    buffer_store_dword v9, v18, s[0:3], 0 offen
 ; GFX7-GISEL-NEXT:    buffer_store_dword v10, v5, s[0:3], 0 offen
@@ -11861,9 +11861,9 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX8-GISEL:       ; %bb.0:
 ; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v2, vcc, 4, v0
-; GFX8-GISEL-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX8-GISEL-NEXT:    buffer_load_dword v3, v0, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    buffer_load_dword v2, v2, s[0:3], 0 offen
-; GFX8-GISEL-NEXT:    v_add_u32_e32 v3, vcc, 8, v0
+; GFX8-GISEL-NEXT:    v_add_u32_e32 v4, vcc, 8, v0
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v5, vcc, 12, v0
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v6, vcc, 16, v0
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v7, vcc, 20, v0
@@ -11873,7 +11873,7 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v11, vcc, 36, v0
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v12, vcc, 40, v0
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v13, vcc, 44, v0
-; GFX8-GISEL-NEXT:    buffer_load_dword v3, v3, s[0:3], 0 offen
+; GFX8-GISEL-NEXT:    buffer_load_dword v4, v4, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    buffer_load_dword v5, v5, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    buffer_load_dword v6, v6, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    buffer_load_dword v7, v7, s[0:3], 0 offen
@@ -11888,8 +11888,8 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v15, vcc, 52, v0
 ; GFX8-GISEL-NEXT:    buffer_load_dword v15, v15, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v16, vcc, 56, v0
-; GFX8-GISEL-NEXT:    buffer_load_dword v16, v16, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v0, vcc, 60, v0
+; GFX8-GISEL-NEXT:    buffer_load_dword v16, v16, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v17, vcc, 4, v1
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v18, vcc, 8, v1
@@ -11901,17 +11901,17 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
 ; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(12)
 ; GFX8-GISEL-NEXT:    buffer_store_dword v6, v2, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v2, vcc, 40, v1
-; GFX8-GISEL-NEXT:    buffer_store_dword v3, v18, s[0:3], 0 offen
-; GFX8-GISEL-NEXT:    v_add_u32_e32 v3, vcc, 24, v1
+; GFX8-GISEL-NEXT:    buffer_store_dword v4, v18, s[0:3], 0 offen
+; GFX8-GISEL-NEXT:    v_add_u32_e32 v4, vcc, 24, v1
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v18, vcc, 28, v1
 ; GFX8-GISEL-NEXT:    buffer_store_dword v5, v19, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v5, vcc, 32, v1
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v19, vcc, 36, v1
 ; GFX8-GISEL-NEXT:    v_add_u32_e32 v6, vcc, 44, v1
-; GFX8-GISEL-NEXT:    buffer_store_dword v4, v1, s[0:3], 0 offen
+; GFX8-GISEL-NEXT:    buffer_store_dword v3, v1, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(14)
 ; GFX8-GISEL-NEXT:    buffer_store_dword v7, v17, s[0:3], 0 offen
-; GFX8-GISEL-NEXT:    buffer_store_dword v8, v3, s[0:3], 0 offen
+; GFX8-GISEL-NEXT:    buffer_store_dword v8, v4, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(14)
 ; GFX8-GISEL-NEXT:    buffer_store_dword v9, v18, s[0:3], 0 offen
 ; GFX8-GISEL-NEXT:    buffer_store_dword v10, v5, s[0:3], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/function-args-inreg.ll b/llvm/test/CodeGen/AMDGPU/function-args-inreg.ll
index 0db2a1679197e..831d10480c51c 100644
--- a/llvm/test/CodeGen/AMDGPU/function-args-inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/function-args-inreg.ll
@@ -1591,7 +1591,7 @@ define void @too_many_args_use_workitem_id_x_inreg(
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b32 v[0:1], v18, off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    v_dual_mov_b32 v15, s21 :: v_dual_mov_b32 v14, s20
+; GFX11-NEXT:    v_dual_mov_b32 v14, s20 :: v_dual_mov_b32 v15, s21
 ; GFX11-NEXT:    v_dual_mov_b32 v16, s22 :: v_dual_mov_b32 v17, s23
 ; GFX11-NEXT:    v_mov_b32_e32 v18, s24
 ; GFX11-NEXT:    global_store_b32 v[0:1], v14, off dlc
@@ -1604,8 +1604,8 @@ define void @too_many_args_use_workitem_id_x_inreg(
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b32 v[0:1], v18, off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT:    v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v16, s27
+; GFX11-NEXT:    v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s26
+; GFX11-NEXT:    v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s28
 ; GFX11-NEXT:    v_mov_b32_e32 v18, s29
 ; GFX11-NEXT:    global_store_b32 v[0:1], v14, off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
index ca9cb456fa19f..e40917d4307fb 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
@@ -3670,10 +3670,10 @@ define amdgpu_gfx void @test_call_external_void_func_v5i8() #0 {
 ; GFX11-FAKE16-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX11-FAKE16-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, v5
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[3:4], 24, v[5:6]
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 8, v5
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v5
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, v5
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v6
 ; GFX11-FAKE16-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX11-FAKE16-NEXT:    v_readlane_b32 s31, v40, 1
@@ -4186,9 +4186,10 @@ define amdgpu_gfx void @test_call_external_void_func_v32i8() #0 {
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v12, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v20, v17
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v24, v18
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v28, v19 :: v_dual_mov_b32 v19, v34
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v28, v19
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v36 :: v_dual_mov_b32 v3, v37
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v17, v32 :: v_dual_mov_b32 v18, v33
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v19, v34
 ; GFX11-FAKE16-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX11-FAKE16-NEXT:    v_readlane_b32 s31, v40, 1
 ; GFX11-FAKE16-NEXT:    v_readlane_b32 s30, v40, 0
@@ -5346,14 +5347,14 @@ define amdgpu_gfx void @test_call_external_void_func_v5i8_ret() #0 {
 ; GFX11-FAKE16-NEXT:    v_writelane_b32 v42, s30, 0
 ; GFX11-FAKE16-NEXT:    v_writelane_b32 v42, s31, 1
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, v5
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[3:4], 24, v[5:6]
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 8, v5
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v5
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, v5
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v6
 ; GFX11-FAKE16-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b16 v1, 8, v1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xff, v0
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b16 v3, 8, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xff, v2
@@ -5639,16 +5640,16 @@ define amdgpu_gfx void @test_call_external_void_func_v8i8_ret() #0 {
 ; GFX11-FAKE16-NEXT:    v_writelane_b32 v42, s30, 0
 ; GFX11-FAKE16-NEXT:    v_writelane_b32 v42, s31, 1
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v8, 8, v0
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 24, v0
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 8, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v6, 16, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v7, 24, v1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, v8
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, v8
 ; GFX11-FAKE16-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b16 v5, 8, v5
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xff, v4
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b16 v7, 8, v7
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xff, v6
@@ -6197,9 +6198,10 @@ define amdgpu_gfx void @test_call_external_void_func_v32i8_ret() #0 {
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v12, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v20, v17
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v24, v18
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v28, v19 :: v_dual_mov_b32 v19, v34
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v28, v19
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v36 :: v_dual_mov_b32 v3, v37
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v17, v32 :: v_dual_mov_b32 v18, v33
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v19, v34
 ; GFX11-FAKE16-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b16 v9, 8, v9
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v8, 0xff, v8
@@ -9903,8 +9905,8 @@ define amdgpu_gfx void @test_call_external_void_func_v16i8() #0 {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v15, 24, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v1
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v2
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v3, v18
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, v16 :: v_dual_mov_b32 v2, v17
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v1, v16
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v17 :: v_dual_mov_b32 v3, v18
 ; GFX11-FAKE16-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX11-FAKE16-NEXT:    v_readlane_b32 s31, v40, 1
 ; GFX11-FAKE16-NEXT:    v_readlane_b32 s30, v40, 0
@@ -17250,22 +17252,21 @@ define amdgpu_gfx void @stack_8xv5f32() #0 {
 ; GFX11-NEXT:    s_add_i32 s0, s32, 16
 ; GFX11-NEXT:    scratch_store_b128 off, v[0:3], s32
 ; GFX11-NEXT:    scratch_store_b128 off, v[4:7], s0
-; GFX11-NEXT:    v_mov_b32_e32 v6, 1.0
 ; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
 ; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0
 ; GFX11-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v5, 1.0
-; GFX11-NEXT:    v_dual_mov_b32 v7, 1.0 :: v_dual_mov_b32 v8, 1.0
-; GFX11-NEXT:    v_dual_mov_b32 v9, 1.0 :: v_dual_mov_b32 v10, 2.0
-; GFX11-NEXT:    v_dual_mov_b32 v11, 2.0 :: v_dual_mov_b32 v12, 2.0
-; GFX11-NEXT:    v_dual_mov_b32 v13, 2.0 :: v_dual_mov_b32 v14, 2.0
-; GFX11-NEXT:    v_dual_mov_b32 v15, 0x40400000 :: v_dual_mov_b32 v16, 0x40400000
-; GFX11-NEXT:    v_dual_mov_b32 v17, 0x40400000 :: v_dual_mov_b32 v18, 0x40400000
-; GFX11-NEXT:    v_dual_mov_b32 v19, 0x40400000 :: v_dual_mov_b32 v20, 4.0
-; GFX11-NEXT:    v_dual_mov_b32 v21, 4.0 :: v_dual_mov_b32 v22, 4.0
-; GFX11-NEXT:    v_dual_mov_b32 v23, 4.0 :: v_dual_mov_b32 v24, 4.0
-; GFX11-NEXT:    v_dual_mov_b32 v25, 0x40a00000 :: v_dual_mov_b32 v26, 0x40a00000
-; GFX11-NEXT:    v_dual_mov_b32 v27, 0x40a00000 :: v_dual_mov_b32 v28, 0x40a00000
-; GFX11-NEXT:    v_mov_b32_e32 v29, 0x40a00000
+; GFX11-NEXT:    v_dual_mov_b32 v6, 1.0 :: v_dual_mov_b32 v7, 1.0
+; GFX11-NEXT:    v_dual_mov_b32 v8, 1.0 :: v_dual_mov_b32 v9, 1.0
+; GFX11-NEXT:    v_dual_mov_b32 v10, 2.0 :: v_dual_mov_b32 v11, 2.0
+; GFX11-NEXT:    v_dual_mov_b32 v12, 2.0 :: v_dual_mov_b32 v13, 2.0
+; GFX11-NEXT:    v_dual_mov_b32 v14, 2.0 :: v_dual_mov_b32 v15, 0x40400000
+; GFX11-NEXT:    v_dual_mov_b32 v16, 0x40400000 :: v_dual_mov_b32 v17, 0x40400000
+; GFX11-NEXT:    v_dual_mov_b32 v18, 0x40400000 :: v_dual_mov_b32 v19, 0x40400000
+; GFX11-NEXT:    v_dual_mov_b32 v20, 4.0 :: v_dual_mov_b32 v21, 4.0
+; GFX11-NEXT:    v_dual_mov_b32 v22, 4.0 :: v_dual_mov_b32 v23, 4.0
+; GFX11-NEXT:    v_dual_mov_b32 v24, 4.0 :: v_dual_mov_b32 v25, 0x40a00000
+; GFX11-NEXT:    v_dual_mov_b32 v26, 0x40a00000 :: v_dual_mov_b32 v27, 0x40a00000
+; GFX11-NEXT:    v_dual_mov_b32 v28, 0x40a00000 :: v_dual_mov_b32 v29, 0x40a00000
 ; GFX11-NEXT:    v_mov_b32_e32 v30, 0x40c00000
 ; GFX11-NEXT:    v_mov_b32_e32 v31, 0x40e00000
 ; GFX11-NEXT:    s_mov_b32 s1, external_void_func_8xv5f32 at abs32@hi
diff --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
index 6384fdba7a45a..75619532a2e37 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
@@ -3290,7 +3290,7 @@ define amdgpu_gfx void @call_72xi32() #1 {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    scratch_store_b128 off, v[28:31], s33 offset:1536 ; 16-byte Folded Spill
 ; GFX11-NEXT:    scratch_store_b128 off, v[32:35], s32
-; GFX11-NEXT:    v_dual_mov_b32 v31, v47 :: v_dual_mov_b32 v32, v36
+; GFX11-NEXT:    v_mov_b32_e32 v32, v36
 ; GFX11-NEXT:    v_dual_mov_b32 v33, v48 :: v_dual_mov_b32 v34, v49
 ; GFX11-NEXT:    v_dual_mov_b32 v35, v50 :: v_dual_mov_b32 v48, v51
 ; GFX11-NEXT:    v_dual_mov_b32 v49, v52 :: v_dual_mov_b32 v50, v53
@@ -3317,7 +3317,6 @@ define amdgpu_gfx void @call_72xi32() #1 {
 ; GFX11-NEXT:    s_add_i32 s2, s32, 0x70
 ; GFX11-NEXT:    v_mov_b32_e32 v6, v17
 ; GFX11-NEXT:    scratch_store_b128 off, v[12:15], s2
-; GFX11-NEXT:    v_mov_b32_e32 v13, v24
 ; GFX11-NEXT:    s_add_i32 s2, s32, 0x6c
 ; GFX11-NEXT:    v_mov_b32_e32 v7, v18
 ; GFX11-NEXT:    scratch_store_b32 off, v0, s2
@@ -3328,26 +3327,27 @@ define amdgpu_gfx void @call_72xi32() #1 {
 ; GFX11-NEXT:    v_dual_mov_b32 v12, v23 :: v_dual_mov_b32 v29, v45
 ; GFX11-NEXT:    scratch_store_b128 off, v[40:43], s2
 ; GFX11-NEXT:    s_add_i32 s2, s32, 64
-; GFX11-NEXT:    v_mov_b32_e32 v14, v25
+; GFX11-NEXT:    v_mov_b32_e32 v13, v24
 ; GFX11-NEXT:    scratch_store_b128 off, v[52:55], s2
 ; GFX11-NEXT:    s_add_i32 s2, s32, 48
-; GFX11-NEXT:    v_mov_b32_e32 v16, v27
+; GFX11-NEXT:    v_mov_b32_e32 v14, v25
 ; GFX11-NEXT:    scratch_store_b128 off, v[36:39], s2
 ; GFX11-NEXT:    s_add_i32 s2, s32, 32
-; GFX11-NEXT:    v_mov_b32_e32 v30, v46
+; GFX11-NEXT:    v_mov_b32_e32 v16, v27
 ; GFX11-NEXT:    scratch_store_b128 off, v[48:51], s2
 ; GFX11-NEXT:    s_add_i32 s2, s32, 16
+; GFX11-NEXT:    v_mov_b32_e32 v30, v46
 ; GFX11-NEXT:    scratch_store_b128 off, v[32:35], s2
-; GFX11-NEXT:    scratch_load_b128 v[1:4], off, s33 offset:1584 ; 16-byte Folded Reload
-; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_mov_b32_e32 v1, 42
-; GFX11-NEXT:    s_clause 0x2
+; GFX11-NEXT:    s_clause 0x3
+; GFX11-NEXT:    scratch_load_b128 v[1:4], off, s33 offset:1584
 ; GFX11-NEXT:    scratch_load_b128 v[17:20], off, s33 offset:1568
 ; GFX11-NEXT:    scratch_load_b128 v[21:24], off, s33 offset:1552
 ; GFX11-NEXT:    scratch_load_b128 v[25:28], off, s33 offset:1536
 ; GFX11-NEXT:    s_add_i32 s2, s33, 0x400
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    v_mov_b32_e32 v0, s2
+; GFX11-NEXT:    v_dual_mov_b32 v31, v47 :: v_dual_mov_b32 v0, s2
+; GFX11-NEXT:    s_waitcnt vmcnt(3)
+; GFX11-NEXT:    v_mov_b32_e32 v1, 42
 ; GFX11-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX11-NEXT:    s_clause 0xb
 ; GFX11-NEXT:    scratch_load_b32 v59, off, s33
diff --git a/llvm/test/CodeGen/AMDGPU/half.ll b/llvm/test/CodeGen/AMDGPU/half.ll
index f767511370eee..117cf40de72d2 100644
--- a/llvm/test/CodeGen/AMDGPU/half.ll
+++ b/llvm/test/CodeGen/AMDGPU/half.ll
@@ -2952,8 +2952,8 @@ define amdgpu_kernel void @global_truncstore_v16f32_to_v16f16(ptr addrspace(1) %
 ; CI-NEXT:    s_add_u32 s2, s2, 16
 ; CI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
 ; CI-NEXT:    v_mov_b32_e32 v5, s5
-; CI-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
 ; CI-NEXT:    s_addc_u32 s3, s3, 0
+; CI-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
 ; CI-NEXT:    v_mov_b32_e32 v13, s3
 ; CI-NEXT:    v_mov_b32_e32 v12, s2
 ; CI-NEXT:    flat_load_dwordx4 v[8:11], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/i1-to-bf16.ll b/llvm/test/CodeGen/AMDGPU/i1-to-bf16.ll
index e82801eadc936..0dfeb3454dad5 100644
--- a/llvm/test/CodeGen/AMDGPU/i1-to-bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/i1-to-bf16.ll
@@ -245,11 +245,11 @@ define <2 x bfloat> @v_uitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
@@ -273,8 +273,8 @@ define <2 x bfloat> @v_uitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -305,12 +305,12 @@ define <2 x bfloat> @v_uitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc_lo
@@ -341,9 +341,9 @@ define <2 x bfloat> @v_uitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -591,8 +591,8 @@ define <3 x bfloat> @v_uitofp_v3i1_to_v3bf16(<3 x i1> %num) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v5, v5, v1, 0x7fff
@@ -682,9 +682,9 @@ define <3 x bfloat> @v_uitofp_v3i1_to_v3bf16(<3 x i1> %num) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v5, v1, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v5, v5, v1, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -1587,11 +1587,11 @@ define <2 x bfloat> @v_sitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
@@ -1615,8 +1615,8 @@ define <2 x bfloat> @v_sitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -1647,12 +1647,12 @@ define <2 x bfloat> @v_sitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc_lo
@@ -1683,9 +1683,9 @@ define <2 x bfloat> @v_sitofp_v2i1_to_v2bf16(<2 x i1> %num) {
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v3, v3, v1, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -1935,8 +1935,8 @@ define <3 x bfloat> @v_sitofp_v3i1_to_v3bf16(<3 x i1> %num) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, -1.0, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v5, v5, v1, 0x7fff
@@ -2026,9 +2026,9 @@ define <3 x bfloat> @v_sitofp_v3i1_to_v3bf16(<3 x i1> %num) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, -1.0, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v5, v1, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v5, v5, v1, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
diff --git a/llvm/test/CodeGen/AMDGPU/idiv-licm.ll b/llvm/test/CodeGen/AMDGPU/idiv-licm.ll
index a71995a798410..ecbf5dfeb3af1 100644
--- a/llvm/test/CodeGen/AMDGPU/idiv-licm.ll
+++ b/llvm/test/CodeGen/AMDGPU/idiv-licm.ll
@@ -959,11 +959,10 @@ define amdgpu_kernel void @sdiv16_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-NEXT:    s_and_b32 s5, s5, exec_lo
 ; GFX11-NEXT:    s_cselect_b32 s4, s4, 0
 ; GFX11-NEXT:    s_and_b32 s5, 0xffff, s3
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    v_add_nc_u32_e32 v2, s4, v2
-; GFX11-NEXT:    s_lshl_b32 s5, s5, 1
 ; GFX11-NEXT:    s_add_i32 s3, s3, 1
-; GFX11-NEXT:    v_mov_b32_e32 v3, s5
+; GFX11-NEXT:    s_lshl_b32 s5, s5, 1
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    v_dual_mov_b32 v3, s5 :: v_dual_add_nc_u32 v2, s4, v2
 ; GFX11-NEXT:    s_and_b32 s4, s3, 0xffff
 ; GFX11-NEXT:    s_cmpk_eq_i32 s4, 0x400
 ; GFX11-NEXT:    global_store_b16 v3, v2, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll
index 82d62910bcb00..df77757443391 100644
--- a/llvm/test/CodeGen/AMDGPU/idot4u.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll
@@ -1451,8 +1451,8 @@ define amdgpu_kernel void @udot4_multiuse_add1(ptr addrspace(1) %src1,
 ; GFX11-DL-NEXT:    v_bfe_u32 v3, v0, 8, 8
 ; GFX11-DL-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-DL-NEXT:    v_dot4_u32_u8 v0, v1, v0, s0
-; GFX11-DL-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-DL-NEXT:    s_add_i32 s0, s0, s0
+; GFX11-DL-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX11-DL-NEXT:    v_mul_u32_u24_e32 v2, v2, v3
 ; GFX11-DL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-DL-NEXT:    v_add3_u32 v0, s0, v2, v0
@@ -5919,10 +5919,10 @@ define amdgpu_kernel void @idot4_acc32_v16i8(ptr addrspace(1) %src1,
 ; GFX11-DL-NEXT:    global_load_b32 v0, v4, s[2:3]
 ; GFX11-DL-NEXT:    s_waitcnt vmcnt(1)
 ; GFX11-DL-NEXT:    v_perm_b32 v1, v3, v2, 0x7050002
-; GFX11-DL-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-DL-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-DL-NEXT:    v_perm_b32 v0, v0, v0, 0x3020001
-; GFX11-DL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-DL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX11-DL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-DL-NEXT:    v_dot4_u32_u8 v0, v1, v0, 0
 ; GFX11-DL-NEXT:    global_store_b32 v2, v0, s[4:5]
 ; GFX11-DL-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 6040cc47ad6f2..b5665835eaf7a 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -5763,13 +5763,13 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1)
 ; GENERIC-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; GENERIC-NEXT:    v_cndmask_b32_e64 v17, 63, v17, s[0:1]
 ; GENERIC-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
-; GENERIC-NEXT:    v_mov_b32_e32 v15, s21
-; GENERIC-NEXT:    v_cmp_eq_u32_e32 vcc, 13, v14
-; GENERIC-NEXT:    v_cndmask_b32_e32 v15, v15, v1, vcc
-; GENERIC-NEXT:    v_cmp_ne_u32_e32 vcc, 13, v18
-; GENERIC-NEXT:    v_cndmask_b32_e32 v15, 63, v15, vcc
 ; GENERIC-NEXT:    v_mov_b32_e32 v19, s20
 ; GENERIC-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v14
+; GENERIC-NEXT:    v_mov_b32_e32 v15, s21
+; GENERIC-NEXT:    v_cmp_eq_u32_e64 s[0:1], 13, v14
+; GENERIC-NEXT:    v_cndmask_b32_e64 v14, v15, v1, s[0:1]
+; GENERIC-NEXT:    v_cmp_ne_u32_e64 s[0:1], 13, v18
+; GENERIC-NEXT:    v_cndmask_b32_e64 v15, 63, v14, s[0:1]
 ; GENERIC-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; GENERIC-NEXT:    s_mov_b32 s2, -1
 ; GENERIC-NEXT:    v_cndmask_b32_e32 v14, v19, v1, vcc
@@ -6319,19 +6319,19 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1)
 ; SI-MOVREL-NEXT:    v_cmp_ne_u32_e32 vcc, 9, v18
 ; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v11, 63, v11, vcc
 ; SI-MOVREL-NEXT:    v_cmp_ne_u32_e32 vcc, 8, v18
+; SI-MOVREL-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
 ; SI-MOVREL-NEXT:    v_cmp_ne_u32_e32 vcc, 14, v18
-; SI-MOVREL-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; SI-MOVREL-NEXT:    v_cndmask_b32_e64 v17, 63, v17, s[0:1]
-; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
 ; SI-MOVREL-NEXT:    v_mov_b32_e32 v15, s21
-; SI-MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 13, v14
+; SI-MOVREL-NEXT:    v_cmp_eq_u32_e64 s[0:1], 13, v14
+; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
+; SI-MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v14
+; SI-MOVREL-NEXT:    v_cndmask_b32_e64 v14, v15, v1, s[0:1]
+; SI-MOVREL-NEXT:    v_cmp_ne_u32_e64 s[0:1], 13, v18
+; SI-MOVREL-NEXT:    v_cndmask_b32_e64 v15, 63, v14, s[0:1]
 ; SI-MOVREL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
-; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v15, v15, v1, vcc
-; SI-MOVREL-NEXT:    v_cmp_ne_u32_e32 vcc, 13, v18
-; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v15, 63, v15, vcc
 ; SI-MOVREL-NEXT:    v_mov_b32_e32 v19, s20
-; SI-MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v14
 ; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v14, v19, v1, vcc
 ; SI-MOVREL-NEXT:    v_cmp_ne_u32_e32 vcc, 12, v18
 ; SI-MOVREL-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
@@ -6426,35 +6426,35 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1)
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 15, v14
 ; VI-NEXT:    v_cndmask_b32_e32 v17, v11, v1, vcc
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 8, v14
-; VI-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; VI-NEXT:    v_mov_b32_e32 v16, s17
 ; VI-NEXT:    v_cndmask_b32_e32 v10, v15, v1, vcc
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 9, v14
-; VI-NEXT:    v_cndmask_b32_e64 v17, 63, v17, s[0:1]
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    v_cndmask_b32_e32 v11, v16, v1, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 9, v18
 ; VI-NEXT:    v_cndmask_b32_e32 v11, 63, v11, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 8, v18
+; VI-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; VI-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 14, v18
-; VI-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v17, 63, v17, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v15, s21
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 13, v14
-; VI-NEXT:    v_cndmask_b32_e32 v15, v15, v1, vcc
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 13, v18
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_add_u32 s2, s0, 48
-; VI-NEXT:    v_cndmask_b32_e32 v15, 63, v15, vcc
-; VI-NEXT:    v_mov_b32_e32 v19, s20
+; VI-NEXT:    v_cmp_eq_u32_e64 s[0:1], 13, v14
+; VI-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v14
-; VI-NEXT:    s_addc_u32 s3, s1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v14, v15, v1, s[0:1]
+; VI-NEXT:    v_cmp_ne_u32_e64 s[0:1], 13, v18
+; VI-NEXT:    v_cndmask_b32_e64 v15, 63, v14, s[0:1]
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; VI-NEXT:    v_mov_b32_e32 v19, s20
 ; VI-NEXT:    v_cndmask_b32_e32 v14, v19, v1, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 12, v18
+; VI-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_add_u32 s2, s0, 48
+; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v19, s3
 ; VI-NEXT:    v_mov_b32_e32 v18, s2
 ; VI-NEXT:    s_add_u32 s2, s0, 32
-; VI-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    flat_store_dwordx4 v[18:19], v[14:17]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
@@ -6558,19 +6558,19 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1)
 ; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e32 vcc, 9, v18
 ; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v11, 63, v11, vcc
 ; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e32 vcc, 8, v18
+; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
 ; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e32 vcc, 14, v18
-; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e64 s[0:1], 15, v18
 ; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e64 v17, 63, v17, s[0:1]
-; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
 ; GFX9-IDXMODE-NEXT:    v_mov_b32_e32 v15, s21
-; GFX9-IDXMODE-NEXT:    v_cmp_eq_u32_e32 vcc, 13, v14
+; GFX9-IDXMODE-NEXT:    v_cmp_eq_u32_e64 s[0:1], 13, v14
+; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v16, 63, v19, vcc
+; GFX9-IDXMODE-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v14
+; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e64 v14, v15, v1, s[0:1]
+; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e64 s[0:1], 13, v18
+; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e64 v15, 63, v14, s[0:1]
 ; GFX9-IDXMODE-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v15, v15, v1, vcc
-; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e32 vcc, 13, v18
-; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v15, 63, v15, vcc
 ; GFX9-IDXMODE-NEXT:    v_mov_b32_e32 v19, s20
-; GFX9-IDXMODE-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v14
 ; GFX9-IDXMODE-NEXT:    v_cndmask_b32_e32 v14, v19, v1, vcc
 ; GFX9-IDXMODE-NEXT:    v_cmp_ne_u32_e32 vcc, 12, v18
 ; GFX9-IDXMODE-NEXT:    v_mov_b32_e32 v18, 0
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 837c18fe7af0a..f6330f4eb8216 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -733,23 +733,23 @@ define amdgpu_kernel void @dynamic_insertelement_v9f32(ptr addrspace(1) %out, <9
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[8:9], 0x40
 ; VI-NEXT:    s_load_dwordx2 s[12:13], s[8:9], 0x0
+; VI-NEXT:    s_load_dword s10, s[8:9], 0x60
 ; VI-NEXT:    v_mov_b32_e32 v9, 0x40a00000
 ; VI-NEXT:    s_mov_b32 s15, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s14, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_load_dword s0, s[8:9], 0x80
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_load_dword s0, s[8:9], 0x60
-; VI-NEXT:    s_load_dword s1, s[8:9], 0x80
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v4, s4
 ; VI-NEXT:    v_mov_b32_e32 v5, s5
 ; VI-NEXT:    v_mov_b32_e32 v6, s6
 ; VI-NEXT:    v_mov_b32_e32 v7, s7
+; VI-NEXT:    v_mov_b32_e32 v8, s10
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v8, s0
-; VI-NEXT:    s_mov_b32 m0, s1
+; VI-NEXT:    s_mov_b32 m0, s0
 ; VI-NEXT:    v_movreld_b32_e32 v0, v9
 ; VI-NEXT:    buffer_store_dword v8, off, s[12:15], 0 offset:32
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[12:15], 0 offset:16
@@ -791,10 +791,11 @@ define amdgpu_kernel void @dynamic_insertelement_v10f32(ptr addrspace(1) %out, <
 ; VI-LABEL: dynamic_insertelement_v10f32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[12:19], s[8:9], 0x40
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[8:9], 0x60
 ; VI-NEXT:    s_load_dword s6, s[8:9], 0x80
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    v_mov_b32_e32 v10, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
 ; VI-NEXT:    v_mov_b32_e32 v1, s13
@@ -807,7 +808,6 @@ define amdgpu_kernel void @dynamic_insertelement_v10f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v8, s4
 ; VI-NEXT:    v_mov_b32_e32 v9, s5
 ; VI-NEXT:    s_mov_b32 m0, s6
-; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_movreld_b32_e32 v0, v10
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
@@ -851,12 +851,13 @@ define amdgpu_kernel void @dynamic_insertelement_v11f32(ptr addrspace(1) %out, <
 ;
 ; VI-LABEL: dynamic_insertelement_v11f32:
 ; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    s_load_dwordx4 s[4:7], s[8:9], 0x60
 ; VI-NEXT:    s_load_dwordx8 s[12:19], s[8:9], 0x40
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_load_dword s7, s[8:9], 0x80
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    v_mov_b32_e32 v11, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    v_mov_b32_e32 v8, s4
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
 ; VI-NEXT:    v_mov_b32_e32 v1, s13
@@ -870,7 +871,6 @@ define amdgpu_kernel void @dynamic_insertelement_v11f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v10, s6
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 m0, s7
-; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_movreld_b32_e32 v0, v11
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
@@ -919,6 +919,7 @@ define amdgpu_kernel void @dynamic_insertelement_v12f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    s_load_dwordx4 s[4:7], s[8:9], 0x60
 ; VI-NEXT:    s_load_dword s8, s[8:9], 0x80
 ; VI-NEXT:    v_mov_b32_e32 v12, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
 ; VI-NEXT:    v_mov_b32_e32 v1, s13
@@ -933,7 +934,6 @@ define amdgpu_kernel void @dynamic_insertelement_v12f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v10, s6
 ; VI-NEXT:    v_mov_b32_e32 v11, s7
 ; VI-NEXT:    s_mov_b32 m0, s8
-; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_movreld_b32_e32 v0, v12
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
@@ -1286,10 +1286,11 @@ define amdgpu_kernel void @dynamic_insertelement_v10i32(ptr addrspace(1) %out, <
 ; VI-LABEL: dynamic_insertelement_v10i32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[12:19], s[8:9], 0x40
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[8:9], 0x60
 ; VI-NEXT:    s_load_dword s6, s[8:9], 0x80
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
 ; VI-NEXT:    v_mov_b32_e32 v1, s13
@@ -1302,7 +1303,6 @@ define amdgpu_kernel void @dynamic_insertelement_v10i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v8, s4
 ; VI-NEXT:    v_mov_b32_e32 v9, s5
 ; VI-NEXT:    s_mov_b32 m0, s6
-; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_movreld_b32_e32 v0, 5
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -1344,12 +1344,13 @@ define amdgpu_kernel void @dynamic_insertelement_v11i32(ptr addrspace(1) %out, <
 ;
 ; VI-LABEL: dynamic_insertelement_v11i32:
 ; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    s_load_dwordx4 s[4:7], s[8:9], 0x60
 ; VI-NEXT:    s_load_dwordx8 s[12:19], s[8:9], 0x40
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_load_dword s7, s[8:9], 0x80
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_mov_b32_e32 v8, s4
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
 ; VI-NEXT:    v_mov_b32_e32 v1, s13
@@ -1363,7 +1364,6 @@ define amdgpu_kernel void @dynamic_insertelement_v11i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v10, s6
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 m0, s7
-; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_movreld_b32_e32 v0, 5
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -1410,6 +1410,7 @@ define amdgpu_kernel void @dynamic_insertelement_v12i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    s_load_dwordx4 s[4:7], s[8:9], 0x60
 ; VI-NEXT:    s_load_dword s8, s[8:9], 0x80
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
 ; VI-NEXT:    v_mov_b32_e32 v1, s13
@@ -1424,7 +1425,6 @@ define amdgpu_kernel void @dynamic_insertelement_v12i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v10, s6
 ; VI-NEXT:    v_mov_b32_e32 v11, s7
 ; VI-NEXT:    s_mov_b32 m0, s8
-; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    v_movreld_b32_e32 v0, 5
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index 81ef7351b84e9..678d06e969276 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -6385,16 +6385,16 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; GFX1200-GISEL-NEXT:    v_add_co_ci_u32_e64 v2, null, 0, v2, vcc_lo
 ; GFX1200-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX1200-GISEL-NEXT:    v_mul_hi_u32 v1, v10, v9
-; GFX1200-GISEL-NEXT:    v_mul_lo_u32 v14, v10, v9
+; GFX1200-GISEL-NEXT:    v_mul_lo_u32 v15, v10, v9
 ; GFX1200-GISEL-NEXT:    v_add_co_u32 v12, vcc_lo, v7, 1
 ; GFX1200-GISEL-NEXT:    s_wait_alu 0xfffd
 ; GFX1200-GISEL-NEXT:    v_add_co_ci_u32_e64 v13, null, 0, v3, vcc_lo
-; GFX1200-GISEL-NEXT:    v_add_co_u32 v15, vcc_lo, v10, 1
+; GFX1200-GISEL-NEXT:    v_add_co_u32 v14, vcc_lo, v10, 1
 ; GFX1200-GISEL-NEXT:    v_mul_lo_u32 v11, v7, v8
 ; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[5:6], null, v7, v5, v[0:1]
 ; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[1:2], null, v10, v2, v[1:2]
 ; GFX1200-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX1200-GISEL-NEXT:    v_mul_hi_u32 v2, v14, v15
+; GFX1200-GISEL-NEXT:    v_mul_hi_u32 v2, v15, v14
 ; GFX1200-GISEL-NEXT:    s_wait_alu 0xfffd
 ; GFX1200-GISEL-NEXT:    v_add_co_ci_u32_e64 v10, null, 0, v4, vcc_lo
 ; GFX1200-GISEL-NEXT:    v_mul_hi_u32 v0, v11, v12
@@ -6403,11 +6403,11 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; GFX1200-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[6:7], null, v11, v13, v[0:1]
 ; GFX1200-GISEL-NEXT:    v_mul_lo_u32 v0, v11, v12
-; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[7:8], null, v14, v10, v[2:3]
+; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[7:8], null, v15, v10, v[2:3]
 ; GFX1200-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[1:2], null, v5, v12, v[6:7]
-; GFX1200-GISEL-NEXT:    v_mul_lo_u32 v2, v14, v15
-; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[3:4], null, v3, v15, v[7:8]
+; GFX1200-GISEL-NEXT:    v_mul_lo_u32 v2, v15, v14
+; GFX1200-GISEL-NEXT:    v_mad_co_u64_u32 v[3:4], null, v3, v14, v[7:8]
 ; GFX1200-GISEL-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %y18 = add <2 x i64> %x, <i64 1, i64 1>
diff --git a/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll b/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
index 7ffc2a6987742..7e7de6498410c 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
@@ -396,7 +396,7 @@ define amdgpu_kernel void @test_flat_misaligned_v4(ptr %arg) {
 ; ALIGNED-GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, s1, 0, s0
 ; ALIGNED-GFX11-NEXT:    flat_load_b128 v[0:3], v[7:8]
 ; ALIGNED-GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; ALIGNED-GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v2
+; ALIGNED-GFX11-NEXT:    v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1
 ; ALIGNED-GFX11-NEXT:    v_mov_b32_e32 v6, v0
 ; ALIGNED-GFX11-NEXT:    flat_store_b128 v[7:8], v[3:6]
 ; ALIGNED-GFX11-NEXT:    s_endpgm
@@ -413,7 +413,7 @@ define amdgpu_kernel void @test_flat_misaligned_v4(ptr %arg) {
 ; UNALIGNED-GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, s1, 0, s0
 ; UNALIGNED-GFX11-NEXT:    flat_load_b128 v[0:3], v[7:8]
 ; UNALIGNED-GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; UNALIGNED-GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v2
+; UNALIGNED-GFX11-NEXT:    v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1
 ; UNALIGNED-GFX11-NEXT:    v_mov_b32_e32 v6, v0
 ; UNALIGNED-GFX11-NEXT:    flat_store_b128 v[7:8], v[3:6]
 ; UNALIGNED-GFX11-NEXT:    s_endpgm
@@ -818,7 +818,7 @@ define amdgpu_kernel void @test_flat_aligned_v4(ptr %arg) {
 ; ALIGNED-GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, s1, 0, s0
 ; ALIGNED-GFX11-NEXT:    flat_load_b128 v[0:3], v[7:8]
 ; ALIGNED-GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; ALIGNED-GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v2
+; ALIGNED-GFX11-NEXT:    v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1
 ; ALIGNED-GFX11-NEXT:    v_mov_b32_e32 v6, v0
 ; ALIGNED-GFX11-NEXT:    flat_store_b128 v[7:8], v[3:6]
 ; ALIGNED-GFX11-NEXT:    s_endpgm
@@ -835,7 +835,7 @@ define amdgpu_kernel void @test_flat_aligned_v4(ptr %arg) {
 ; UNALIGNED-GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, s1, 0, s0
 ; UNALIGNED-GFX11-NEXT:    flat_load_b128 v[0:3], v[7:8]
 ; UNALIGNED-GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; UNALIGNED-GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v2
+; UNALIGNED-GFX11-NEXT:    v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1
 ; UNALIGNED-GFX11-NEXT:    v_mov_b32_e32 v6, v0
 ; UNALIGNED-GFX11-NEXT:    flat_store_b128 v[7:8], v[3:6]
 ; UNALIGNED-GFX11-NEXT:    s_endpgm
@@ -1001,7 +1001,7 @@ define amdgpu_kernel void @test_flat_v4_aligned8(ptr %arg) {
 ; ALIGNED-GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, s1, 0, s0
 ; ALIGNED-GFX11-NEXT:    flat_load_b128 v[0:3], v[7:8]
 ; ALIGNED-GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; ALIGNED-GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v2
+; ALIGNED-GFX11-NEXT:    v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1
 ; ALIGNED-GFX11-NEXT:    v_mov_b32_e32 v6, v0
 ; ALIGNED-GFX11-NEXT:    flat_store_b128 v[7:8], v[3:6]
 ; ALIGNED-GFX11-NEXT:    s_endpgm
@@ -1018,7 +1018,7 @@ define amdgpu_kernel void @test_flat_v4_aligned8(ptr %arg) {
 ; UNALIGNED-GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, s1, 0, s0
 ; UNALIGNED-GFX11-NEXT:    flat_load_b128 v[0:3], v[7:8]
 ; UNALIGNED-GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; UNALIGNED-GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v2
+; UNALIGNED-GFX11-NEXT:    v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1
 ; UNALIGNED-GFX11-NEXT:    v_mov_b32_e32 v6, v0
 ; UNALIGNED-GFX11-NEXT:    flat_store_b128 v[7:8], v[3:6]
 ; UNALIGNED-GFX11-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.ordered.add.b64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.ordered.add.b64.ll
index 2afa9ba14ceae..968c198fb6239 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.ordered.add.b64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.atomic.ordered.add.b64.ll
@@ -35,7 +35,7 @@ define amdgpu_kernel void @global_atomic_ordered_add_b64_rtn(ptr addrspace(1) %a
 ; GFX12-SDAG-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX12-SDAG-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX12-SDAG-NEXT:    s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT:    v_dual_mov_b32 v1, s3 :: v_dual_mov_b32 v0, s2
+; GFX12-SDAG-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
 ; GFX12-SDAG-NEXT:    global_atomic_ordered_add_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
 ; GFX12-SDAG-NEXT:    s_wait_loadcnt 0x0
 ; GFX12-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
index 9606c68684957..6a5c83248038d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
@@ -477,8 +477,8 @@ define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr,
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, s3, 0, s0
 ; GFX11-NEXT:    flat_load_b32 v9, v[0:1]
 ; GFX11-NEXT:    flat_load_b32 v10, v[2:3]
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0x40e00000
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0x40c00000
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0x40e00000
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0x41000000
 ; GFX11-NEXT:    v_mov_b32_e32 v3, 0x40400000
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -505,8 +505,8 @@ define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr,
 ; GFX12-SDAG-NEXT:    v_add_co_ci_u32_e64 v3, null, s3, 0, s0
 ; GFX12-SDAG-NEXT:    flat_load_b32 v9, v[0:1]
 ; GFX12-SDAG-NEXT:    flat_load_b32 v10, v[2:3]
-; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40e00000
 ; GFX12-SDAG-NEXT:    v_mov_b32_e32 v0, 0x40c00000
+; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40e00000
 ; GFX12-SDAG-NEXT:    v_mov_b32_e32 v2, 0x41000000
 ; GFX12-SDAG-NEXT:    v_mov_b32_e32 v3, 0x40400000
 ; GFX12-SDAG-NEXT:    s_wait_loadcnt_dscnt 0x0
@@ -633,8 +633,8 @@ define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, s3, 0, s0
 ; GFX11-NEXT:    flat_load_b32 v6, v[0:1]
 ; GFX11-NEXT:    flat_load_b32 v7, v[2:3]
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0x46004200
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX11-NEXT:    v_dual_mov_b32 v2, 0x48004500 :: v_dual_mov_b32 v3, 0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    image_bvh_intersect_ray v[0:3], [v6, v7, v[3:5], v[0:2]], s[4:7] a16
@@ -658,8 +658,8 @@ define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_
 ; GFX12-SDAG-NEXT:    v_add_co_ci_u32_e64 v3, null, s3, 0, s0
 ; GFX12-SDAG-NEXT:    flat_load_b32 v6, v[0:1]
 ; GFX12-SDAG-NEXT:    flat_load_b32 v7, v[2:3]
-; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX12-SDAG-NEXT:    v_mov_b32_e32 v0, 0x46004200
+; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX12-SDAG-NEXT:    v_dual_mov_b32 v2, 0x48004500 :: v_dual_mov_b32 v3, 0
 ; GFX12-SDAG-NEXT:    s_wait_loadcnt_dscnt 0x0
 ; GFX12-SDAG-NEXT:    image_bvh_intersect_ray v[0:3], [v6, v7, v[3:5], v[0:2]], s[4:7] a16
@@ -947,8 +947,8 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
 ; GFX11-NEXT:    flat_load_b32 v8, v[0:1]
-; GFX11-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0x46004200
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    image_bvh64_intersect_ray v[0:3], [v[6:7], v8, v[3:5], v[0:2]], s[0:3] a16
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
@@ -973,8 +973,8 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX12-SDAG-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
 ; GFX12-SDAG-NEXT:    flat_load_b32 v8, v[0:1]
-; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX12-SDAG-NEXT:    v_mov_b32_e32 v0, 0x46004200
+; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, 0x47004400
 ; GFX12-SDAG-NEXT:    s_wait_loadcnt_dscnt 0x0
 ; GFX12-SDAG-NEXT:    image_bvh64_intersect_ray v[0:3], [v[6:7], v8, v[3:5], v[0:2]], s[0:3] a16
 ; GFX12-SDAG-NEXT:    s_wait_bvhcnt 0x0
@@ -995,12 +995,12 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
 ; GFX12-GISEL-NEXT:    s_mov_b32 s10, 0x45004800
 ; GFX12-GISEL-NEXT:    v_mov_b32_e32 v6, 0xb36211c6
 ; GFX12-GISEL-NEXT:    v_bfrev_b32_e32 v7, 4.0
-; GFX12-GISEL-NEXT:    v_mov_b32_e32 v3, s8
-; GFX12-GISEL-NEXT:    v_dual_mov_b32 v5, s10 :: v_dual_mov_b32 v4, s9
+; GFX12-GISEL-NEXT:    v_dual_mov_b32 v3, s8 :: v_dual_mov_b32 v4, s9
 ; GFX12-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX12-GISEL-NEXT:    v_dual_mov_b32 v5, s10 :: v_dual_mov_b32 v0, s6
+; GFX12-GISEL-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX12-GISEL-NEXT:    s_mov_b32 s6, 2.0
-; GFX12-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX12-GISEL-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
 ; GFX12-GISEL-NEXT:    v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
 ; GFX12-GISEL-NEXT:    flat_load_b32 v8, v[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
index fb755ea2e5a7f..24e213ea2fe55 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
@@ -183,8 +183,8 @@ entry:
 define amdgpu_cs void @sgpr_inverse_ballot(i64 inreg %input, ptr addrspace(1) %out) {
 ; GISEL_W64-LABEL: sgpr_inverse_ballot:
 ; GISEL_W64:       ; %bb.0: ; %entry
-; GISEL_W64-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
 ; GISEL_W64-NEXT:    v_mov_b32_e32 v3, 0
+; GISEL_W64-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
 ; GISEL_W64-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GISEL_W64-NEXT:    s_endpgm
 ;
@@ -199,8 +199,8 @@ define amdgpu_cs void @sgpr_inverse_ballot(i64 inreg %input, ptr addrspace(1) %o
 ;
 ; GISEL_W32-LABEL: sgpr_inverse_ballot:
 ; GISEL_W32:       ; %bb.0: ; %entry
-; GISEL_W32-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; GISEL_W32-NEXT:    v_mov_b32_e32 v3, 0
+; GISEL_W32-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; GISEL_W32-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GISEL_W32-NEXT:    s_endpgm
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
index 8cf7497fca640..b6656569b79c1 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
@@ -3283,8 +3283,8 @@ define void @test_writelane_v8f64(ptr addrspace(1) %out, <8 x double> %src, i32
 ; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s7, v3
 ; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s8, v2
 ; GFX802-SDAG-NEXT:    v_addc_u32_e32 v23, vcc, 0, v1, vcc
-; GFX802-SDAG-NEXT:    flat_load_dwordx4 v[2:5], v[22:23]
 ; GFX802-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX802-SDAG-NEXT:    flat_load_dwordx4 v[2:5], v[22:23]
 ; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v9
 ; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s10, v15
 ; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s11, v14
@@ -3444,8 +3444,8 @@ define void @test_writelane_v8f64(ptr addrspace(1) %out, <8 x double> %src, i32
 ; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s7, v4
 ; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s8, v5
 ; GFX802-GISEL-NEXT:    v_addc_u32_e32 v23, vcc, 0, v1, vcc
-; GFX802-GISEL-NEXT:    flat_load_dwordx4 v[2:5], v[22:23]
 ; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    flat_load_dwordx4 v[2:5], v[22:23]
 ; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v7
 ; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s9, v11
 ; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s10, v12
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index d757df83b32ba..0d5846a4a4985 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -1945,10 +1945,11 @@ define amdgpu_kernel void @s_log_v4f32(ptr addrspace(1) %out, <4 x float> %in) {
 ; GFX1100-GISEL-NEXT:    v_dual_fmac_f32 v12, 0x3377d1cf, v2 :: v_dual_fmac_f32 v13, 0x3377d1cf, v3
 ; GFX1100-GISEL-NEXT:    v_add_f32_e32 v7, v7, v12
 ; GFX1100-GISEL-NEXT:    s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT:    v_dual_mul_f32 v5, 0x3f317217, v0 :: v_dual_add_f32 v8, v8, v13
-; GFX1100-GISEL-NEXT:    v_mul_f32_e32 v6, 0x3f317217, v1
+; GFX1100-GISEL-NEXT:    v_mul_f32_e32 v5, 0x3f317217, v0
 ; GFX1100-GISEL-NEXT:    v_cmp_gt_f32_e64 vcc_lo, 0x7f800000, |v0|
-; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1100-GISEL-NEXT:    v_mul_f32_e32 v6, 0x3f317217, v1
+; GFX1100-GISEL-NEXT:    v_add_f32_e32 v8, v8, v13
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX1100-GISEL-NEXT:    v_fma_f32 v10, 0x3f317217, v0, -v5
 ; GFX1100-GISEL-NEXT:    v_fma_f32 v11, 0x3f317217, v1, -v6
 ; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index ae1318da453c5..8006876dbe3ff 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -1945,10 +1945,11 @@ define amdgpu_kernel void @s_log10_v4f32(ptr addrspace(1) %out, <4 x float> %in)
 ; GFX1100-GISEL-NEXT:    v_dual_fmac_f32 v12, 0x3284fbcf, v2 :: v_dual_fmac_f32 v13, 0x3284fbcf, v3
 ; GFX1100-GISEL-NEXT:    v_add_f32_e32 v7, v7, v12
 ; GFX1100-GISEL-NEXT:    s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT:    v_dual_mul_f32 v5, 0x3e9a209a, v0 :: v_dual_add_f32 v8, v8, v13
-; GFX1100-GISEL-NEXT:    v_mul_f32_e32 v6, 0x3e9a209a, v1
+; GFX1100-GISEL-NEXT:    v_mul_f32_e32 v5, 0x3e9a209a, v0
 ; GFX1100-GISEL-NEXT:    v_cmp_gt_f32_e64 vcc_lo, 0x7f800000, |v0|
-; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1100-GISEL-NEXT:    v_mul_f32_e32 v6, 0x3e9a209a, v1
+; GFX1100-GISEL-NEXT:    v_add_f32_e32 v8, v8, v13
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX1100-GISEL-NEXT:    v_fma_f32 v10, 0x3e9a209a, v0, -v5
 ; GFX1100-GISEL-NEXT:    v_fma_f32 v11, 0x3e9a209a, v1, -v6
 ; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
index 17fdc841a1258..a56c92785d487 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
@@ -2297,121 +2297,121 @@ define <16 x half> @v_maximum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX7-LABEL: v_maximum_v16f16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v17
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v1, v1
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX7-NEXT:    v_max_f32_e32 v0, v0, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v17
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v3, v3
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
-; GFX7-NEXT:    v_max_f32_e32 v1, v1, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v18
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v4, v4
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v7, v7
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v4, v4
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v5, v5
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v16
+; GFX7-NEXT:    v_max_f32_e32 v1, v1, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v18
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v6, v6
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v17
-; GFX7-NEXT:    v_max_f32_e32 v2, v2, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v19
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v7, v7
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v9, v9
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v10, v10
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v7, v7
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v16
+; GFX7-NEXT:    v_max_f32_e32 v2, v2, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v19
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v9, v9
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v17
-; GFX7-NEXT:    v_max_f32_e32 v3, v3, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v20
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v10, v10
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v10, v10
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v27
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v11, v11
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v18, v28
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v10, v10
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v12, v12
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v16
+; GFX7-NEXT:    v_max_f32_e32 v3, v3, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v20
 ; GFX7-NEXT:    v_cvt_f32_f16_e32 v11, v11
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v18, v18
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v17
-; GFX7-NEXT:    v_max_f32_e32 v4, v4, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v21
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v12, v12
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v13, v13
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v19, v16
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v18
-; GFX7-NEXT:    v_max_f32_e32 v12, v12, v18
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v18, v29
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v17
-; GFX7-NEXT:    v_max_f32_e32 v5, v5, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v22
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v20, v0
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v18
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v18, v13
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v0, v19
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v13, v20
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[26:27], v18, v16
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v17
-; GFX7-NEXT:    v_max_f32_e32 v6, v6, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v23
-; GFX7-NEXT:    v_max_f32_e32 v16, v18, v16
-; GFX7-NEXT:    v_max_f32_e32 v18, v13, v0
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v0
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v13, v15
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v15, v30
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v12, v12
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v19, v29
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v17
+; GFX7-NEXT:    v_max_f32_e32 v11, v11, v17
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v28
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v16
+; GFX7-NEXT:    v_max_f32_e32 v4, v4, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v21
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v20, v13
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v13, v17
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v18, v12
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v12, v19
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v20
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[26:27], v18, v13
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v16
+; GFX7-NEXT:    v_max_f32_e32 v5, v5, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v22
+; GFX7-NEXT:    v_max_f32_e32 v13, v18, v13
+; GFX7-NEXT:    v_max_f32_e32 v18, v17, v12
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[28:29], v17, v12
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
 ; GFX7-NEXT:    v_cvt_f16_f32_e32 v14, v14
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v17
-; GFX7-NEXT:    v_max_f32_e32 v7, v7, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v24
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v15, v15
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v14, v14
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v20, v13
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v15, v15
 ; GFX7-NEXT:    v_mov_b32_e32 v19, 0x7fc00000
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v19, v1, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v13, v19, v16, s[26:27]
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v17
-; GFX7-NEXT:    v_max_f32_e32 v8, v8, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v25
-; GFX7-NEXT:    v_max_f32_e32 v16, v14, v15
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v14, v15
-; GFX7-NEXT:    v_cndmask_b32_e32 v14, v19, v16, vcc
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cndmask_b32_e64 v2, v19, v2, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v3, v19, v3, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v4, v19, v4, s[8:9]
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v17
-; GFX7-NEXT:    v_max_f32_e32 v9, v9, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v26
-; GFX7-NEXT:    v_cndmask_b32_e64 v5, v19, v5, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v6, v19, v6, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v7, v19, v7, s[14:15]
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cndmask_b32_e64 v8, v19, v8, s[16:17]
-; GFX7-NEXT:    v_cndmask_b32_e64 v9, v19, v9, s[18:19]
-; GFX7-NEXT:    v_cndmask_b32_e64 v12, v19, v12, s[24:25]
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v17
-; GFX7-NEXT:    v_max_f32_e32 v10, v10, v17
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v17, v27
-; GFX7-NEXT:    v_cndmask_b32_e64 v10, v19, v10, s[20:21]
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v17
-; GFX7-NEXT:    v_max_f32_e32 v11, v11, v17
-; GFX7-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX7-NEXT:    v_cndmask_b32_e64 v11, v19, v11, s[22:23]
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v16
+; GFX7-NEXT:    v_max_f32_e32 v6, v6, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v23
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v14, v14
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v15, v15
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v19, v0, vcc
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v19, v1, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v19, v2, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, v19, v3, s[8:9]
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v16
+; GFX7-NEXT:    v_max_f32_e32 v7, v7, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v24
+; GFX7-NEXT:    v_cndmask_b32_e64 v4, v19, v4, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v5, v19, v5, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, v19, v6, s[14:15]
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cndmask_b32_e64 v7, v19, v7, s[16:17]
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v19, v11, s[24:25]
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v16
+; GFX7-NEXT:    v_max_f32_e32 v8, v8, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v25
+; GFX7-NEXT:    v_cndmask_b32_e64 v8, v19, v8, s[18:19]
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v16
+; GFX7-NEXT:    v_max_f32_e32 v9, v9, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v26
+; GFX7-NEXT:    v_cndmask_b32_e64 v9, v19, v9, s[20:21]
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v16
+; GFX7-NEXT:    v_max_f32_e32 v10, v10, v16
+; GFX7-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT:    v_cndmask_b32_e64 v10, v19, v10, s[22:23]
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_cvt_f16_f32_e32 v0, v17
-; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v0
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v19, v18, s[28:29]
-; GFX7-NEXT:    v_max_f32_e32 v15, v20, v17
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v20, v17
-; GFX7-NEXT:    v_cndmask_b32_e32 v15, v19, v15, vcc
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v12, v16
+; GFX7-NEXT:    v_cvt_f16_f32_e32 v16, v30
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v17, v12
+; GFX7-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v19, v13, s[26:27]
+; GFX7-NEXT:    v_cndmask_b32_e64 v13, v19, v18, s[28:29]
+; GFX7-NEXT:    v_max_f32_e32 v18, v14, v16
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v14, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v14, v19, v18, vcc
+; GFX7-NEXT:    v_max_f32_e32 v16, v15, v17
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
+; GFX7-NEXT:    v_cndmask_b32_e32 v15, v19, v16, vcc
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_maximum_v16f16:
@@ -2439,6 +2439,7 @@ define <16 x half> @v_maximum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[10:11], v18, v17
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v17, 16, v9
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v18, 16, v1
+; GFX8-NEXT:    v_mov_b32_e32 v19, 0x7e00
 ; GFX8-NEXT:    v_max_f16_e32 v24, v18, v17
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[12:13], v18, v17
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v17, 16, v8
@@ -2453,28 +2454,26 @@ define <16 x half> @v_maximum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[20:21], v4, v12
 ; GFX8-NEXT:    v_max_f16_e32 v4, v3, v11
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[22:23], v3, v11
+; GFX8-NEXT:    v_max_f16_e32 v3, v2, v10
 ; GFX8-NEXT:    v_max_f16_e32 v11, v7, v15
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[24:25], v7, v15
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v12, 16, v15
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GFX8-NEXT:    v_mov_b32_e32 v19, 0x7e00
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, v19, v16, vcc
+; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v2, v10
 ; GFX8-NEXT:    v_max_f16_e32 v13, v7, v12
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[26:27], v7, v12
-; GFX8-NEXT:    v_max_f16_e32 v3, v2, v10
-; GFX8-NEXT:    v_cndmask_b32_e64 v12, v19, v13, s[26:27]
-; GFX8-NEXT:    v_cndmask_b32_e32 v13, v19, v16, vcc
-; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v2, v10
-; GFX8-NEXT:    v_max_f16_e32 v14, v1, v9
+; GFX8-NEXT:    v_max_f16_e32 v7, v1, v9
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v19, v3, vcc
 ; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v1, v9
-; GFX8-NEXT:    v_max_f16_e32 v7, v0, v8
+; GFX8-NEXT:    v_max_f16_e32 v12, v0, v8
 ; GFX8-NEXT:    v_cndmask_b32_e64 v18, v19, v22, s[8:9]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v22, v19, v25, s[14:15]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v19, v14, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v19, v7, vcc
 ; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v0, v8
 ; GFX8-NEXT:    v_cndmask_b32_e64 v16, v19, v21, s[6:7]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v21, v19, v24, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v19, v7, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v19, v12, vcc
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v22
 ; GFX8-NEXT:    v_cndmask_b32_e64 v15, v19, v20, s[4:5]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v20, v19, v23, s[10:11]
@@ -2488,14 +2487,15 @@ define <16 x half> @v_maximum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX8-NEXT:    v_cndmask_b32_e64 v5, v19, v5, s[20:21]
 ; GFX8-NEXT:    v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 16, v16
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, v19, v13, s[26:27]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v6, v19, v6, s[18:19]
 ; GFX8-NEXT:    v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v15
 ; GFX8-NEXT:    v_cndmask_b32_e64 v11, v19, v11, s[24:25]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v17, v19, v17, s[16:17]
 ; GFX8-NEXT:    v_or_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v13
-; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v12
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v14
+; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v13
 ; GFX8-NEXT:    v_or_b32_sdwa v6, v17, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v7, v11, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f32.ll
index 8b1ba393c8de8..826bf427503ab 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f32.ll
@@ -1684,7 +1684,7 @@ define <8 x float> @v_maximum_v8f32(<8 x float> %src0, <8 x float> %src1) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, 0x7fc00000, v16, vcc_lo
 ; GFX11-NEXT:    v_cmp_o_f32_e32 vcc_lo, v1, v9
-; GFX11-NEXT:    v_dual_max_f32 v9, v3, v11 :: v_dual_max_f32 v8, v2, v10
+; GFX11-NEXT:    v_dual_max_f32 v8, v2, v10 :: v_dual_max_f32 v9, v3, v11
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0x7fc00000, v17, vcc_lo
 ; GFX11-NEXT:    v_cmp_o_f32_e32 vcc_lo, v2, v10
 ; GFX11-NEXT:    v_max_f32_e32 v10, v7, v15
@@ -1727,169 +1727,169 @@ define <16 x float> @v_maximum_v16f32(<16 x float> %src0, <16 x float> %src1) {
 ; GFX7-LABEL: v_maximum_v16f32:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX7-NEXT:    v_max_f32_e32 v0, v0, v16
+; GFX7-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v17
 ; GFX7-NEXT:    v_max_f32_e32 v1, v1, v17
-; GFX7-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v18
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v18
 ; GFX7-NEXT:    v_max_f32_e32 v2, v2, v18
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v19
+; GFX7-NEXT:    v_mov_b32_e32 v17, 0x7fc00000
+; GFX7-NEXT:    v_max_f32_e32 v18, v13, v29
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v29
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v19
 ; GFX7-NEXT:    v_max_f32_e32 v3, v3, v19
-; GFX7-NEXT:    v_mov_b32_e32 v18, 0x7fc00000
-; GFX7-NEXT:    v_max_f32_e32 v19, v0, v16
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[28:29], v0, v16
-; GFX7-NEXT:    v_max_f32_e32 v16, v14, v30
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v20
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v20
 ; GFX7-NEXT:    v_max_f32_e32 v4, v4, v20
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v21
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v21
 ; GFX7-NEXT:    v_max_f32_e32 v5, v5, v21
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v22
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v22
 ; GFX7-NEXT:    v_max_f32_e32 v6, v6, v22
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v23
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v23
 ; GFX7-NEXT:    v_max_f32_e32 v7, v7, v23
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v24
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v24
 ; GFX7-NEXT:    v_max_f32_e32 v8, v8, v24
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v25
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v25
 ; GFX7-NEXT:    v_max_f32_e32 v9, v9, v25
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v26
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v26
 ; GFX7-NEXT:    v_max_f32_e32 v10, v10, v26
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v27
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v27
 ; GFX7-NEXT:    v_max_f32_e32 v11, v11, v27
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v28
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[26:27], v12, v28
 ; GFX7-NEXT:    v_max_f32_e32 v12, v12, v28
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[26:27], v13, v29
-; GFX7-NEXT:    v_max_f32_e32 v13, v13, v29
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v18, v1, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v14, v18, v16, s[40:41]
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v18, v19, s[28:29]
-; GFX7-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v3, v18, v3, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s[8:9]
-; GFX7-NEXT:    v_cndmask_b32_e64 v5, v18, v5, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v6, v18, v6, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v7, v18, v7, s[14:15]
-; GFX7-NEXT:    v_cndmask_b32_e64 v8, v18, v8, s[16:17]
-; GFX7-NEXT:    v_cndmask_b32_e64 v9, v18, v9, s[18:19]
-; GFX7-NEXT:    v_cndmask_b32_e64 v10, v18, v10, s[20:21]
-; GFX7-NEXT:    v_cndmask_b32_e64 v11, v18, v11, s[22:23]
-; GFX7-NEXT:    v_cndmask_b32_e64 v12, v18, v12, s[24:25]
-; GFX7-NEXT:    v_cndmask_b32_e64 v13, v18, v13, s[26:27]
+; GFX7-NEXT:    v_max_f32_e32 v19, v14, v30
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v17, v0, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v13, v17, v18, s[28:29]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v17, v1, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v17, v2, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, v17, v3, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v4, v17, v4, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, v17, v6, s[14:15]
+; GFX7-NEXT:    v_cndmask_b32_e64 v7, v17, v7, s[16:17]
+; GFX7-NEXT:    v_cndmask_b32_e64 v8, v17, v8, s[18:19]
+; GFX7-NEXT:    v_cndmask_b32_e64 v9, v17, v9, s[20:21]
+; GFX7-NEXT:    v_cndmask_b32_e64 v10, v17, v10, s[22:23]
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v17, v11, s[24:25]
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v17, v12, s[26:27]
+; GFX7-NEXT:    v_cndmask_b32_e64 v14, v17, v19, s[40:41]
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_max_f32_e32 v16, v15, v17
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
-; GFX7-NEXT:    v_cndmask_b32_e32 v15, v18, v16, vcc
+; GFX7-NEXT:    v_max_f32_e32 v18, v15, v16
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v15, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_maximum_v16f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
+; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX8-NEXT:    v_max_f32_e32 v0, v0, v16
+; GFX8-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v17
 ; GFX8-NEXT:    v_max_f32_e32 v1, v1, v17
-; GFX8-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v18
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v18
 ; GFX8-NEXT:    v_max_f32_e32 v2, v2, v18
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v19
+; GFX8-NEXT:    v_mov_b32_e32 v17, 0x7fc00000
+; GFX8-NEXT:    v_max_f32_e32 v18, v13, v29
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v29
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v19
 ; GFX8-NEXT:    v_max_f32_e32 v3, v3, v19
-; GFX8-NEXT:    v_mov_b32_e32 v18, 0x7fc00000
-; GFX8-NEXT:    v_max_f32_e32 v19, v0, v16
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[28:29], v0, v16
-; GFX8-NEXT:    v_max_f32_e32 v16, v14, v30
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v20
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v20
 ; GFX8-NEXT:    v_max_f32_e32 v4, v4, v20
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v21
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v21
 ; GFX8-NEXT:    v_max_f32_e32 v5, v5, v21
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v22
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v22
 ; GFX8-NEXT:    v_max_f32_e32 v6, v6, v22
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v23
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v23
 ; GFX8-NEXT:    v_max_f32_e32 v7, v7, v23
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v24
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v24
 ; GFX8-NEXT:    v_max_f32_e32 v8, v8, v24
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v25
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v25
 ; GFX8-NEXT:    v_max_f32_e32 v9, v9, v25
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v26
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v26
 ; GFX8-NEXT:    v_max_f32_e32 v10, v10, v26
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v27
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v27
 ; GFX8-NEXT:    v_max_f32_e32 v11, v11, v27
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v28
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[26:27], v12, v28
 ; GFX8-NEXT:    v_max_f32_e32 v12, v12, v28
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[26:27], v13, v29
-; GFX8-NEXT:    v_max_f32_e32 v13, v13, v29
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v18, v1, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v14, v18, v16, s[40:41]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v18, v19, s[28:29]
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v18, v3, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v18, v5, s[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v18, v6, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v18, v7, s[14:15]
-; GFX8-NEXT:    v_cndmask_b32_e64 v8, v18, v8, s[16:17]
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, v18, v9, s[18:19]
-; GFX8-NEXT:    v_cndmask_b32_e64 v10, v18, v10, s[20:21]
-; GFX8-NEXT:    v_cndmask_b32_e64 v11, v18, v11, s[22:23]
-; GFX8-NEXT:    v_cndmask_b32_e64 v12, v18, v12, s[24:25]
-; GFX8-NEXT:    v_cndmask_b32_e64 v13, v18, v13, s[26:27]
+; GFX8-NEXT:    v_max_f32_e32 v19, v14, v30
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v17, v0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, v17, v18, s[28:29]
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v17, v1, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v17, v2, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v17, v3, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v17, v4, s[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v17, v6, s[14:15]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v17, v7, s[16:17]
+; GFX8-NEXT:    v_cndmask_b32_e64 v8, v17, v8, s[18:19]
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, v17, v9, s[20:21]
+; GFX8-NEXT:    v_cndmask_b32_e64 v10, v17, v10, s[22:23]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v17, v11, s[24:25]
+; GFX8-NEXT:    v_cndmask_b32_e64 v12, v17, v12, s[26:27]
+; GFX8-NEXT:    v_cndmask_b32_e64 v14, v17, v19, s[40:41]
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_max_f32_e32 v16, v15, v17
-; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v18, v16, vcc
+; GFX8-NEXT:    v_max_f32_e32 v18, v15, v16
+; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v15, v16
+; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX900-LABEL: v_maximum_v16f32:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
+; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX900-NEXT:    v_max_f32_e32 v0, v0, v16
+; GFX900-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v17
 ; GFX900-NEXT:    v_max_f32_e32 v1, v1, v17
-; GFX900-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v18
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v18
 ; GFX900-NEXT:    v_max_f32_e32 v2, v2, v18
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v19
+; GFX900-NEXT:    v_mov_b32_e32 v17, 0x7fc00000
+; GFX900-NEXT:    v_max_f32_e32 v18, v13, v29
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v29
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v19
 ; GFX900-NEXT:    v_max_f32_e32 v3, v3, v19
-; GFX900-NEXT:    v_mov_b32_e32 v18, 0x7fc00000
-; GFX900-NEXT:    v_max_f32_e32 v19, v0, v16
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[28:29], v0, v16
-; GFX900-NEXT:    v_max_f32_e32 v16, v14, v30
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v20
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v20
 ; GFX900-NEXT:    v_max_f32_e32 v4, v4, v20
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v21
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v21
 ; GFX900-NEXT:    v_max_f32_e32 v5, v5, v21
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v22
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v22
 ; GFX900-NEXT:    v_max_f32_e32 v6, v6, v22
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v23
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v23
 ; GFX900-NEXT:    v_max_f32_e32 v7, v7, v23
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v24
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v24
 ; GFX900-NEXT:    v_max_f32_e32 v8, v8, v24
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v25
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v25
 ; GFX900-NEXT:    v_max_f32_e32 v9, v9, v25
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v26
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v26
 ; GFX900-NEXT:    v_max_f32_e32 v10, v10, v26
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v27
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v27
 ; GFX900-NEXT:    v_max_f32_e32 v11, v11, v27
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v28
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[26:27], v12, v28
 ; GFX900-NEXT:    v_max_f32_e32 v12, v12, v28
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[26:27], v13, v29
-; GFX900-NEXT:    v_max_f32_e32 v13, v13, v29
-; GFX900-NEXT:    v_cndmask_b32_e32 v1, v18, v1, vcc
-; GFX900-NEXT:    v_cndmask_b32_e64 v14, v18, v16, s[40:41]
-; GFX900-NEXT:    v_cndmask_b32_e64 v0, v18, v19, s[28:29]
-; GFX900-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v3, v18, v3, s[6:7]
-; GFX900-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s[8:9]
-; GFX900-NEXT:    v_cndmask_b32_e64 v5, v18, v5, s[10:11]
-; GFX900-NEXT:    v_cndmask_b32_e64 v6, v18, v6, s[12:13]
-; GFX900-NEXT:    v_cndmask_b32_e64 v7, v18, v7, s[14:15]
-; GFX900-NEXT:    v_cndmask_b32_e64 v8, v18, v8, s[16:17]
-; GFX900-NEXT:    v_cndmask_b32_e64 v9, v18, v9, s[18:19]
-; GFX900-NEXT:    v_cndmask_b32_e64 v10, v18, v10, s[20:21]
-; GFX900-NEXT:    v_cndmask_b32_e64 v11, v18, v11, s[22:23]
-; GFX900-NEXT:    v_cndmask_b32_e64 v12, v18, v12, s[24:25]
-; GFX900-NEXT:    v_cndmask_b32_e64 v13, v18, v13, s[26:27]
+; GFX900-NEXT:    v_max_f32_e32 v19, v14, v30
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
+; GFX900-NEXT:    v_cndmask_b32_e32 v0, v17, v0, vcc
+; GFX900-NEXT:    v_cndmask_b32_e64 v13, v17, v18, s[28:29]
+; GFX900-NEXT:    v_cndmask_b32_e64 v1, v17, v1, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v2, v17, v2, s[6:7]
+; GFX900-NEXT:    v_cndmask_b32_e64 v3, v17, v3, s[8:9]
+; GFX900-NEXT:    v_cndmask_b32_e64 v4, v17, v4, s[10:11]
+; GFX900-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s[12:13]
+; GFX900-NEXT:    v_cndmask_b32_e64 v6, v17, v6, s[14:15]
+; GFX900-NEXT:    v_cndmask_b32_e64 v7, v17, v7, s[16:17]
+; GFX900-NEXT:    v_cndmask_b32_e64 v8, v17, v8, s[18:19]
+; GFX900-NEXT:    v_cndmask_b32_e64 v9, v17, v9, s[20:21]
+; GFX900-NEXT:    v_cndmask_b32_e64 v10, v17, v10, s[22:23]
+; GFX900-NEXT:    v_cndmask_b32_e64 v11, v17, v11, s[24:25]
+; GFX900-NEXT:    v_cndmask_b32_e64 v12, v17, v12, s[26:27]
+; GFX900-NEXT:    v_cndmask_b32_e64 v14, v17, v19, s[40:41]
 ; GFX900-NEXT:    s_waitcnt vmcnt(0)
-; GFX900-NEXT:    v_max_f32_e32 v16, v15, v17
-; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
-; GFX900-NEXT:    v_cndmask_b32_e32 v15, v18, v16, vcc
+; GFX900-NEXT:    v_max_f32_e32 v18, v15, v16
+; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v15, v16
+; GFX900-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX900-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: v_maximum_v16f32:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f64.ll
index 3344c73f9eb6f..a18e5ace18704 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f64.ll
@@ -820,18 +820,18 @@ define void @s_maximum_v2f64(<2 x double> inreg %src0, <2 x double> inreg %src1)
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mov_b32_e32 v0, s22
-; GFX7-NEXT:    v_mov_b32_e32 v4, s20
 ; GFX7-NEXT:    v_mov_b32_e32 v1, s23
-; GFX7-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX7-NEXT:    v_max_f64 v[2:3], s[18:19], v[0:1]
 ; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, s[18:19], v[0:1]
-; GFX7-NEXT:    v_max_f64 v[0:1], s[16:17], v[4:5]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v0, s20
+; GFX7-NEXT:    v_mov_b32_e32 v1, s21
+; GFX7-NEXT:    v_max_f64 v[4:5], s[16:17], v[0:1]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[0:1]
 ; GFX7-NEXT:    v_mov_b32_e32 v6, 0x7ff80000
 ; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX7-NEXT:    ;;#ASMSTART
 ; GFX7-NEXT:    ; use v[0:3]
 ; GFX7-NEXT:    ;;#ASMEND
@@ -841,18 +841,18 @@ define void @s_maximum_v2f64(<2 x double> inreg %src0, <2 x double> inreg %src1)
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s22
-; GFX8-NEXT:    v_mov_b32_e32 v4, s20
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s23
-; GFX8-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX8-NEXT:    v_max_f64 v[2:3], s[18:19], v[0:1]
 ; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, s[18:19], v[0:1]
-; GFX8-NEXT:    v_max_f64 v[0:1], s[16:17], v[4:5]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v0, s20
+; GFX8-NEXT:    v_mov_b32_e32 v1, s21
+; GFX8-NEXT:    v_max_f64 v[4:5], s[16:17], v[0:1]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[0:1]
 ; GFX8-NEXT:    v_mov_b32_e32 v6, 0x7ff80000
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX8-NEXT:    ;;#ASMSTART
 ; GFX8-NEXT:    ; use v[0:3]
 ; GFX8-NEXT:    ;;#ASMEND
@@ -862,18 +862,18 @@ define void @s_maximum_v2f64(<2 x double> inreg %src0, <2 x double> inreg %src1)
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX900-NEXT:    v_mov_b32_e32 v0, s22
-; GFX900-NEXT:    v_mov_b32_e32 v4, s20
 ; GFX900-NEXT:    v_mov_b32_e32 v1, s23
-; GFX900-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX900-NEXT:    v_max_f64 v[2:3], s[18:19], v[0:1]
 ; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, s[18:19], v[0:1]
-; GFX900-NEXT:    v_max_f64 v[0:1], s[16:17], v[4:5]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[4:5]
+; GFX900-NEXT:    v_mov_b32_e32 v0, s20
+; GFX900-NEXT:    v_mov_b32_e32 v1, s21
+; GFX900-NEXT:    v_max_f64 v[4:5], s[16:17], v[0:1]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[0:1]
 ; GFX900-NEXT:    v_mov_b32_e32 v6, 0x7ff80000
 ; GFX900-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GFX900-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX900-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX900-NEXT:    ;;#ASMSTART
 ; GFX900-NEXT:    ; use v[0:3]
 ; GFX900-NEXT:    ;;#ASMEND
@@ -1743,120 +1743,120 @@ define <8 x double> @v_maximum_v8f64(<8 x double> %src0, <8 x double> %src1) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX7-NEXT:    v_max_f64 v[32:33], v[2:3], v[18:19]
-; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[18:19]
-; GFX7-NEXT:    v_max_f64 v[18:19], v[4:5], v[20:21]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], v[4:5], v[20:21]
-; GFX7-NEXT:    v_max_f64 v[2:3], v[0:1], v[16:17]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[8:9], v[0:1], v[16:17]
+; GFX7-NEXT:    v_max_f64 v[32:33], v[0:1], v[16:17]
+; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[16:17]
+; GFX7-NEXT:    v_max_f64 v[16:17], v[2:3], v[18:19]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], v[2:3], v[18:19]
 ; GFX7-NEXT:    v_mov_b32_e32 v34, 0x7ff80000
+; GFX7-NEXT:    v_max_f64 v[18:19], v[4:5], v[20:21]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[6:7], v[4:5], v[20:21]
 ; GFX7-NEXT:    v_max_f64 v[20:21], v[6:7], v[22:23]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[6:7], v[6:7], v[22:23]
-; GFX7-NEXT:    v_max_f64 v[16:17], v[8:9], v[24:25]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[8:9], v[6:7], v[22:23]
+; GFX7-NEXT:    v_max_f64 v[22:23], v[8:9], v[24:25]
 ; GFX7-NEXT:    v_cmp_u_f64_e64 s[10:11], v[8:9], v[24:25]
-; GFX7-NEXT:    v_max_f64 v[22:23], v[10:11], v[26:27]
+; GFX7-NEXT:    v_max_f64 v[24:25], v[10:11], v[26:27]
 ; GFX7-NEXT:    v_cmp_u_f64_e64 s[12:13], v[10:11], v[26:27]
-; GFX7-NEXT:    v_max_f64 v[24:25], v[12:13], v[28:29]
+; GFX7-NEXT:    v_max_f64 v[26:27], v[12:13], v[28:29]
 ; GFX7-NEXT:    v_cmp_u_f64_e64 s[14:15], v[12:13], v[28:29]
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[8:9]
-; GFX7-NEXT:    v_cndmask_b32_e64 v1, v3, v34, s[8:9]
-; GFX7-NEXT:    v_cndmask_b32_e64 v2, v32, 0, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v33, v34, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v8, v16, 0, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v9, v17, v34, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v10, v22, 0, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v11, v23, v34, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v12, v24, 0, s[14:15]
-; GFX7-NEXT:    v_cndmask_b32_e64 v13, v25, v34, s[14:15]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v33, v34, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v16, 0, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, v17, v34, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v8, v22, 0, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v9, v23, v34, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v10, v24, 0, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v25, v34, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v26, 0, s[14:15]
+; GFX7-NEXT:    v_cndmask_b32_e64 v13, v27, v34, s[14:15]
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_max_f64 v[18:19], v[14:15], v[30:31]
+; GFX7-NEXT:    v_max_f64 v[16:17], v[14:15], v[30:31]
 ; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[30:31]
-; GFX7-NEXT:    v_cndmask_b32_e64 v14, v18, 0, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v15, v19, v34, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v14, v16, 0, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v15, v17, v34, vcc
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_maximum_v8f64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX8-NEXT:    v_max_f64 v[32:33], v[2:3], v[18:19]
-; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[18:19]
-; GFX8-NEXT:    v_max_f64 v[18:19], v[4:5], v[20:21]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], v[4:5], v[20:21]
-; GFX8-NEXT:    v_max_f64 v[2:3], v[0:1], v[16:17]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[8:9], v[0:1], v[16:17]
+; GFX8-NEXT:    v_max_f64 v[32:33], v[0:1], v[16:17]
+; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[16:17]
+; GFX8-NEXT:    v_max_f64 v[16:17], v[2:3], v[18:19]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], v[2:3], v[18:19]
 ; GFX8-NEXT:    v_mov_b32_e32 v34, 0x7ff80000
+; GFX8-NEXT:    v_max_f64 v[18:19], v[4:5], v[20:21]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[6:7], v[4:5], v[20:21]
 ; GFX8-NEXT:    v_max_f64 v[20:21], v[6:7], v[22:23]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[6:7], v[6:7], v[22:23]
-; GFX8-NEXT:    v_max_f64 v[16:17], v[8:9], v[24:25]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[8:9], v[6:7], v[22:23]
+; GFX8-NEXT:    v_max_f64 v[22:23], v[8:9], v[24:25]
 ; GFX8-NEXT:    v_cmp_u_f64_e64 s[10:11], v[8:9], v[24:25]
-; GFX8-NEXT:    v_max_f64 v[22:23], v[10:11], v[26:27]
+; GFX8-NEXT:    v_max_f64 v[24:25], v[10:11], v[26:27]
 ; GFX8-NEXT:    v_cmp_u_f64_e64 s[12:13], v[10:11], v[26:27]
-; GFX8-NEXT:    v_max_f64 v[24:25], v[12:13], v[28:29]
+; GFX8-NEXT:    v_max_f64 v[26:27], v[12:13], v[28:29]
 ; GFX8-NEXT:    v_cmp_u_f64_e64 s[14:15], v[12:13], v[28:29]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, v34, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v32, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v33, v34, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v8, v16, 0, s[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, v17, v34, s[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v10, v22, 0, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v11, v23, v34, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v12, v24, 0, s[14:15]
-; GFX8-NEXT:    v_cndmask_b32_e64 v13, v25, v34, s[14:15]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v33, v34, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v16, 0, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v17, v34, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v8, v22, 0, s[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, v23, v34, s[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v10, v24, 0, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v25, v34, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v12, v26, 0, s[14:15]
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, v27, v34, s[14:15]
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_max_f64 v[18:19], v[14:15], v[30:31]
+; GFX8-NEXT:    v_max_f64 v[16:17], v[14:15], v[30:31]
 ; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[30:31]
-; GFX8-NEXT:    v_cndmask_b32_e64 v14, v18, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v19, v34, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v14, v16, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v34, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX900-LABEL: v_maximum_v8f64:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX900-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX900-NEXT:    v_max_f64 v[32:33], v[2:3], v[18:19]
-; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[18:19]
-; GFX900-NEXT:    v_max_f64 v[18:19], v[4:5], v[20:21]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], v[4:5], v[20:21]
-; GFX900-NEXT:    v_max_f64 v[2:3], v[0:1], v[16:17]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[8:9], v[0:1], v[16:17]
+; GFX900-NEXT:    v_max_f64 v[32:33], v[0:1], v[16:17]
+; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[16:17]
+; GFX900-NEXT:    v_max_f64 v[16:17], v[2:3], v[18:19]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], v[2:3], v[18:19]
 ; GFX900-NEXT:    v_mov_b32_e32 v34, 0x7ff80000
+; GFX900-NEXT:    v_max_f64 v[18:19], v[4:5], v[20:21]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[6:7], v[4:5], v[20:21]
 ; GFX900-NEXT:    v_max_f64 v[20:21], v[6:7], v[22:23]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[6:7], v[6:7], v[22:23]
-; GFX900-NEXT:    v_max_f64 v[16:17], v[8:9], v[24:25]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[8:9], v[6:7], v[22:23]
+; GFX900-NEXT:    v_max_f64 v[22:23], v[8:9], v[24:25]
 ; GFX900-NEXT:    v_cmp_u_f64_e64 s[10:11], v[8:9], v[24:25]
-; GFX900-NEXT:    v_max_f64 v[22:23], v[10:11], v[26:27]
+; GFX900-NEXT:    v_max_f64 v[24:25], v[10:11], v[26:27]
 ; GFX900-NEXT:    v_cmp_u_f64_e64 s[12:13], v[10:11], v[26:27]
-; GFX900-NEXT:    v_max_f64 v[24:25], v[12:13], v[28:29]
+; GFX900-NEXT:    v_max_f64 v[26:27], v[12:13], v[28:29]
 ; GFX900-NEXT:    v_cmp_u_f64_e64 s[14:15], v[12:13], v[28:29]
-; GFX900-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[8:9]
-; GFX900-NEXT:    v_cndmask_b32_e64 v1, v3, v34, s[8:9]
-; GFX900-NEXT:    v_cndmask_b32_e64 v2, v32, 0, vcc
-; GFX900-NEXT:    v_cndmask_b32_e32 v3, v33, v34, vcc
-; GFX900-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[6:7]
-; GFX900-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[6:7]
-; GFX900-NEXT:    v_cndmask_b32_e64 v8, v16, 0, s[10:11]
-; GFX900-NEXT:    v_cndmask_b32_e64 v9, v17, v34, s[10:11]
-; GFX900-NEXT:    v_cndmask_b32_e64 v10, v22, 0, s[12:13]
-; GFX900-NEXT:    v_cndmask_b32_e64 v11, v23, v34, s[12:13]
-; GFX900-NEXT:    v_cndmask_b32_e64 v12, v24, 0, s[14:15]
-; GFX900-NEXT:    v_cndmask_b32_e64 v13, v25, v34, s[14:15]
+; GFX900-NEXT:    v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX900-NEXT:    v_cndmask_b32_e32 v1, v33, v34, vcc
+; GFX900-NEXT:    v_cndmask_b32_e64 v2, v16, 0, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v3, v17, v34, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[6:7]
+; GFX900-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[6:7]
+; GFX900-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[8:9]
+; GFX900-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[8:9]
+; GFX900-NEXT:    v_cndmask_b32_e64 v8, v22, 0, s[10:11]
+; GFX900-NEXT:    v_cndmask_b32_e64 v9, v23, v34, s[10:11]
+; GFX900-NEXT:    v_cndmask_b32_e64 v10, v24, 0, s[12:13]
+; GFX900-NEXT:    v_cndmask_b32_e64 v11, v25, v34, s[12:13]
+; GFX900-NEXT:    v_cndmask_b32_e64 v12, v26, 0, s[14:15]
+; GFX900-NEXT:    v_cndmask_b32_e64 v13, v27, v34, s[14:15]
 ; GFX900-NEXT:    s_waitcnt vmcnt(0)
-; GFX900-NEXT:    v_max_f64 v[18:19], v[14:15], v[30:31]
+; GFX900-NEXT:    v_max_f64 v[16:17], v[14:15], v[30:31]
 ; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[30:31]
-; GFX900-NEXT:    v_cndmask_b32_e64 v14, v18, 0, vcc
-; GFX900-NEXT:    v_cndmask_b32_e32 v15, v19, v34, vcc
+; GFX900-NEXT:    v_cndmask_b32_e64 v14, v16, 0, vcc
+; GFX900-NEXT:    v_cndmask_b32_e32 v15, v17, v34, vcc
 ; GFX900-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: v_maximum_v8f64:
@@ -2365,24 +2365,24 @@ define <16 x double> @v_maximum_v16f64(<16 x double> %src0, <16 x double> %src1)
 ; GFX950-LABEL: v_maximum_v16f64:
 ; GFX950:       ; %bb.0:
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v40 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a2, v41 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a3, v42 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a4, v43 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a5, v44 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a6, v45 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a7, v46 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a8, v47 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a9, v56 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a10, v57 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a0, v40 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v41 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a2, v42 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a3, v43 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a4, v44 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a5, v45 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a6, v46 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a7, v47 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a8, v56 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a9, v57 ; Reload Reuse
+; GFX950-NEXT:    scratch_load_dword v33, off, s32 offset:8
+; GFX950-NEXT:    scratch_load_dword v32, off, s32 offset:4
 ; GFX950-NEXT:    scratch_load_dword v37, off, s32 offset:16
 ; GFX950-NEXT:    scratch_load_dword v36, off, s32 offset:12
 ; GFX950-NEXT:    scratch_load_dword v39, off, s32 offset:24
 ; GFX950-NEXT:    scratch_load_dword v38, off, s32 offset:20
-; GFX950-NEXT:    scratch_load_dword v49, off, s32 offset:32
-; GFX950-NEXT:    scratch_load_dword v48, off, s32 offset:28
-; GFX950-NEXT:    scratch_load_dword v57, off, s32 offset:8
-; GFX950-NEXT:    scratch_load_dword v56, off, s32 offset:4
+; GFX950-NEXT:    scratch_load_dword v57, off, s32 offset:32
+; GFX950-NEXT:    scratch_load_dword v56, off, s32 offset:28
 ; GFX950-NEXT:    scratch_load_dword v47, off, s32 offset:40
 ; GFX950-NEXT:    scratch_load_dword v46, off, s32 offset:36
 ; GFX950-NEXT:    scratch_load_dword v45, off, s32 offset:48
@@ -2397,148 +2397,149 @@ define <16 x double> @v_maximum_v16f64(<16 x double> %src0, <16 x double> %src1)
 ; GFX950-NEXT:    scratch_load_dword v52, off, s32 offset:76
 ; GFX950-NEXT:    scratch_load_dword v51, off, s32 offset:88
 ; GFX950-NEXT:    scratch_load_dword v50, off, s32 offset:84
-; GFX950-NEXT:    scratch_load_dword v35, off, s32 offset:96
-; GFX950-NEXT:    scratch_load_dword v34, off, s32 offset:92
+; GFX950-NEXT:    scratch_load_dword v49, off, s32 offset:96
+; GFX950-NEXT:    scratch_load_dword v48, off, s32 offset:92
 ; GFX950-NEXT:    scratch_load_dword v31, off, s32
-; GFX950-NEXT:    scratch_load_dword v33, off, s32 offset:104
-; GFX950-NEXT:    scratch_load_dword v32, off, s32 offset:100
-; GFX950-NEXT:    v_accvgpr_write_b32 a11, v58 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a12, v59 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a13, v60 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a14, v61 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a15, v62 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a16, v63 ; Reload Reuse
+; GFX950-NEXT:    scratch_load_dword v35, off, s32 offset:104
+; GFX950-NEXT:    scratch_load_dword v34, off, s32 offset:100
+; GFX950-NEXT:    v_accvgpr_write_b32 a10, v58 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a11, v59 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a12, v60 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a13, v61 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a14, v62 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a15, v63 ; Reload Reuse
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_max_f64 v[58:59], v[2:3], v[36:37]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[36:37]
-; GFX950-NEXT:    scratch_load_dword v37, off, s32 offset:112
-; GFX950-NEXT:    scratch_load_dword v36, off, s32 offset:108
+; GFX950-NEXT:    v_max_f64 v[58:59], v[0:1], v[32:33]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[32:33]
+; GFX950-NEXT:    scratch_load_dword v33, off, s32 offset:112
+; GFX950-NEXT:    scratch_load_dword v32, off, s32 offset:108
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_max_f64 v[60:61], v[4:5], v[38:39]
-; GFX950-NEXT:    v_cmp_u_f64_e64 s[0:1], v[4:5], v[38:39]
-; GFX950-NEXT:    scratch_load_dword v39, off, s32 offset:120
-; GFX950-NEXT:    scratch_load_dword v38, off, s32 offset:116
+; GFX950-NEXT:    v_max_f64 v[60:61], v[2:3], v[36:37]
+; GFX950-NEXT:    v_cmp_u_f64_e64 s[0:1], v[2:3], v[36:37]
+; GFX950-NEXT:    scratch_load_dword v37, off, s32 offset:120
+; GFX950-NEXT:    scratch_load_dword v36, off, s32 offset:116
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_max_f64 v[62:63], v[6:7], v[48:49]
-; GFX950-NEXT:    v_cmp_u_f64_e64 s[2:3], v[6:7], v[48:49]
-; GFX950-NEXT:    scratch_load_dword v49, off, s32 offset:128
-; GFX950-NEXT:    scratch_load_dword v48, off, s32 offset:124
+; GFX950-NEXT:    v_max_f64 v[62:63], v[4:5], v[38:39]
+; GFX950-NEXT:    v_cmp_u_f64_e64 s[2:3], v[4:5], v[38:39]
+; GFX950-NEXT:    scratch_load_dword v39, off, s32 offset:128
+; GFX950-NEXT:    scratch_load_dword v38, off, s32 offset:124
+; GFX950-NEXT:    v_mov_b32_e32 v2, 0x7ff80000
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_max_f64 v[2:3], v[0:1], v[56:57]
-; GFX950-NEXT:    v_cmp_u_f64_e64 s[4:5], v[0:1], v[56:57]
-; GFX950-NEXT:    v_mov_b32_e32 v0, 0x7ff80000
+; GFX950-NEXT:    v_max_f64 v[0:1], v[6:7], v[56:57]
+; GFX950-NEXT:    v_cmp_u_f64_e64 s[4:5], v[6:7], v[56:57]
 ; GFX950-NEXT:    s_waitcnt vmcnt(23)
 ; GFX950-NEXT:    v_max_f64 v[56:57], v[8:9], v[46:47]
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v2, 0, s[4:5]
-; GFX950-NEXT:    v_accvgpr_write_b32 a0, v1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v3, v0, s[4:5]
-; GFX950-NEXT:    v_cndmask_b32_e64 v2, v58, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v3, v59, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v58, v58, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v59, v59, v2, vcc
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[8:9], v[46:47]
-; GFX950-NEXT:    s_waitcnt vmcnt(21)
-; GFX950-NEXT:    v_max_f64 v[46:47], v[10:11], v[44:45]
-; GFX950-NEXT:    v_cndmask_b32_e64 v4, v60, 0, s[0:1]
+; GFX950-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v1, v2, s[4:5]
 ; GFX950-NEXT:    v_cndmask_b32_e64 v8, v56, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v9, v57, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v9, v57, v2, vcc
+; GFX950-NEXT:    s_waitcnt vmcnt(21)
+; GFX950-NEXT:    v_max_f64 v[0:1], v[10:11], v[44:45]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[10:11], v[44:45]
+; GFX950-NEXT:    v_cndmask_b32_e64 v60, v60, 0, s[0:1]
+; GFX950-NEXT:    v_cndmask_b32_e64 v3, v61, v2, s[0:1]
+; GFX950-NEXT:    v_cndmask_b32_e64 v10, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v11, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(19)
-; GFX950-NEXT:    v_max_f64 v[44:45], v[12:13], v[42:43]
-; GFX950-NEXT:    v_cndmask_b32_e64 v5, v61, v0, s[0:1]
-; GFX950-NEXT:    v_cndmask_b32_e64 v10, v46, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v11, v47, v0, vcc
+; GFX950-NEXT:    v_max_f64 v[0:1], v[12:13], v[42:43]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[12:13], v[42:43]
+; GFX950-NEXT:    v_cndmask_b32_e64 v4, v62, 0, s[2:3]
+; GFX950-NEXT:    v_cndmask_b32_e64 v5, v63, v2, s[2:3]
+; GFX950-NEXT:    v_cndmask_b32_e64 v12, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v13, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(17)
-; GFX950-NEXT:    v_max_f64 v[42:43], v[14:15], v[40:41]
-; GFX950-NEXT:    v_cndmask_b32_e64 v6, v62, 0, s[2:3]
-; GFX950-NEXT:    v_cndmask_b32_e64 v12, v44, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v13, v45, v0, vcc
+; GFX950-NEXT:    v_max_f64 v[0:1], v[14:15], v[40:41]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[40:41]
+; GFX950-NEXT:    v_accvgpr_read_b32 v63, a15 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v62, a14 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v14, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v15, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(15)
-; GFX950-NEXT:    v_max_f64 v[40:41], v[16:17], v[54:55]
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v63, v0, s[2:3]
-; GFX950-NEXT:    v_cndmask_b32_e64 v14, v42, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v15, v43, v0, vcc
+; GFX950-NEXT:    v_max_f64 v[0:1], v[16:17], v[54:55]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[16:17], v[54:55]
+; GFX950-NEXT:    v_accvgpr_read_b32 v61, a13 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v57, a9 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v16, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v17, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(13)
-; GFX950-NEXT:    v_max_f64 v[54:55], v[18:19], v[52:53]
-; GFX950-NEXT:    v_accvgpr_read_b32 v63, a16 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v16, v40, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v17, v41, v0, vcc
+; GFX950-NEXT:    v_max_f64 v[0:1], v[18:19], v[52:53]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[18:19], v[52:53]
+; GFX950-NEXT:    v_accvgpr_read_b32 v56, a8 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v47, a7 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v18, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v19, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(11)
-; GFX950-NEXT:    v_max_f64 v[52:53], v[20:21], v[50:51]
-; GFX950-NEXT:    v_accvgpr_read_b32 v62, a15 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v18, v54, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v19, v55, v0, vcc
+; GFX950-NEXT:    v_max_f64 v[0:1], v[20:21], v[50:51]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[20:21], v[50:51]
+; GFX950-NEXT:    v_accvgpr_read_b32 v46, a6 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v45, a5 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v20, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v21, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(9)
-; GFX950-NEXT:    v_max_f64 v[50:51], v[22:23], v[34:35]
-; GFX950-NEXT:    v_accvgpr_read_b32 v61, a14 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v20, v52, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v21, v53, v0, vcc
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[22:23], v[34:35]
+; GFX950-NEXT:    v_max_f64 v[0:1], v[22:23], v[48:49]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[22:23], v[48:49]
+; GFX950-NEXT:    v_accvgpr_read_b32 v44, a4 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v43, a3 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v22, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v23, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(6)
-; GFX950-NEXT:    v_max_f64 v[34:35], v[24:25], v[32:33]
-; GFX950-NEXT:    v_accvgpr_read_b32 v60, a13 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v22, v50, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v23, v51, v0, vcc
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[24:25], v[32:33]
-; GFX950-NEXT:    v_accvgpr_read_b32 v59, a12 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v58, a11 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v24, v34, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v25, v35, v0, vcc
-; GFX950-NEXT:    v_accvgpr_read_b32 v57, a10 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v56, a9 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v47, a8 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v46, a7 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v45, a6 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v44, a5 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v43, a4 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v42, a3 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v41, a2 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v40, a1 ; Reload Reuse
+; GFX950-NEXT:    v_max_f64 v[0:1], v[24:25], v[34:35]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[24:25], v[34:35]
+; GFX950-NEXT:    v_accvgpr_read_b32 v42, a2 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v41, a1 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v24, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v25, v1, v2, vcc
+; GFX950-NEXT:    v_accvgpr_read_b32 v40, a0 ; Reload Reuse
 ; GFX950-NEXT:    s_waitcnt vmcnt(4)
-; GFX950-NEXT:    v_max_f64 v[32:33], v[26:27], v[36:37]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[26:27], v[36:37]
+; GFX950-NEXT:    v_max_f64 v[0:1], v[26:27], v[32:33]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[26:27], v[32:33]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v26, v32, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v27, v33, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v26, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v27, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(2)
-; GFX950-NEXT:    v_max_f64 v[32:33], v[28:29], v[38:39]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[28:29], v[38:39]
+; GFX950-NEXT:    v_max_f64 v[0:1], v[28:29], v[36:37]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[28:29], v[36:37]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v28, v32, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v29, v33, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v28, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v29, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
-; GFX950-NEXT:    v_max_f64 v[32:33], v[30:31], v[48:49]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[30:31], v[48:49]
+; GFX950-NEXT:    v_max_f64 v[0:1], v[30:31], v[38:39]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[30:31], v[38:39]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v30, v32, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v31, v33, v0, vcc
-; GFX950-NEXT:    v_accvgpr_read_b32 v0, a0
+; GFX950-NEXT:    v_cndmask_b32_e64 v30, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v31, v1, v2, vcc
+; GFX950-NEXT:    v_mov_b32_e32 v0, v58
+; GFX950-NEXT:    v_mov_b32_e32 v1, v59
+; GFX950-NEXT:    v_mov_b32_e32 v2, v60
+; GFX950-NEXT:    v_accvgpr_read_b32 v60, a12 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v59, a11 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v58, a10 ; Reload Reuse
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_maximum_v16f64:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x19
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:16
-; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
-; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:24
-; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:20
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:32
-; GFX10-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:28
+; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:8
+; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:16
+; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:12
+; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:24
+; GFX10-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:20
 ; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:36
-; GFX10-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:68
-; GFX10-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:64
-; GFX10-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:60
-; GFX10-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:56
-; GFX10-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:52
-; GFX10-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:48
-; GFX10-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:44
+; GFX10-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:32
+; GFX10-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:28
+; GFX10-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:68
+; GFX10-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:64
+; GFX10-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:60
+; GFX10-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:56
+; GFX10-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:52
+; GFX10-NEXT:    buffer_load_dword v65, off, s[0:3], s32 offset:48
+; GFX10-NEXT:    buffer_load_dword v64, off, s[0:3], s32 offset:44
 ; GFX10-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:40
-; GFX10-NEXT:    buffer_load_dword v65, off, s[0:3], s32 offset:8
-; GFX10-NEXT:    buffer_load_dword v64, off, s[0:3], s32 offset:4
 ; GFX10-NEXT:    buffer_load_dword v66, off, s[0:3], s32 offset:100
 ; GFX10-NEXT:    buffer_load_dword v69, off, s[0:3], s32 offset:96
 ; GFX10-NEXT:    buffer_load_dword v68, off, s[0:3], s32 offset:92
@@ -2546,96 +2547,96 @@ define <16 x double> @v_maximum_v16f64(<16 x double> %src0, <16 x double> %src1)
 ; GFX10-NEXT:    buffer_load_dword v70, off, s[0:3], s32 offset:84
 ; GFX10-NEXT:    buffer_load_dword v81, off, s[0:3], s32 offset:80
 ; GFX10-NEXT:    buffer_load_dword v80, off, s[0:3], s32 offset:76
-; GFX10-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:72
+; GFX10-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:72
 ; GFX10-NEXT:    buffer_load_dword v67, off, s[0:3], s32 offset:104
 ; GFX10-NEXT:    s_waitcnt vmcnt(24)
-; GFX10-NEXT:    v_max_f64 v[82:83], v[2:3], v[31:32]
-; GFX10-NEXT:    v_cmp_u_f64_e32 vcc_lo, v[2:3], v[31:32]
+; GFX10-NEXT:    v_max_f64 v[82:83], v[0:1], v[31:32]
+; GFX10-NEXT:    v_cmp_u_f64_e32 vcc_lo, v[0:1], v[31:32]
 ; GFX10-NEXT:    s_waitcnt vmcnt(22)
-; GFX10-NEXT:    v_max_f64 v[84:85], v[4:5], v[33:34]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s4, v[4:5], v[33:34]
+; GFX10-NEXT:    v_max_f64 v[84:85], v[2:3], v[33:34]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s4, v[2:3], v[33:34]
 ; GFX10-NEXT:    s_clause 0x3
-; GFX10-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:120
-; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:116
-; GFX10-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:112
-; GFX10-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:108
+; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:120
+; GFX10-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:116
+; GFX10-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:112
+; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:108
 ; GFX10-NEXT:    s_waitcnt vmcnt(24)
-; GFX10-NEXT:    v_max_f64 v[32:33], v[6:7], v[35:36]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s5, v[6:7], v[35:36]
+; GFX10-NEXT:    v_max_f64 v[32:33], v[4:5], v[35:36]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s5, v[4:5], v[35:36]
 ; GFX10-NEXT:    s_clause 0x2
 ; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:128
-; GFX10-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:124
-; GFX10-NEXT:    s_waitcnt vmcnt(23)
-; GFX10-NEXT:    v_cmp_u_f64_e64 s10, v[14:15], v[50:51]
+; GFX10-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:128
+; GFX10-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:124
+; GFX10-NEXT:    s_waitcnt vmcnt(24)
+; GFX10-NEXT:    v_max_f64 v[34:35], v[6:7], v[48:49]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s6, v[6:7], v[48:49]
 ; GFX10-NEXT:    s_waitcnt vmcnt(21)
-; GFX10-NEXT:    v_cmp_u_f64_e64 s9, v[12:13], v[52:53]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s10, v[14:15], v[52:53]
 ; GFX10-NEXT:    s_waitcnt vmcnt(19)
-; GFX10-NEXT:    v_cmp_u_f64_e64 s7, v[10:11], v[54:55]
-; GFX10-NEXT:    s_waitcnt vmcnt(18)
-; GFX10-NEXT:    v_max_f64 v[34:35], v[8:9], v[37:38]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s6, v[8:9], v[37:38]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s9, v[12:13], v[54:55]
+; GFX10-NEXT:    s_waitcnt vmcnt(17)
+; GFX10-NEXT:    v_cmp_u_f64_e64 s8, v[10:11], v[64:65]
 ; GFX10-NEXT:    s_waitcnt vmcnt(16)
-; GFX10-NEXT:    v_max_f64 v[8:9], v[0:1], v[64:65]
-; GFX10-NEXT:    v_max_f64 v[36:37], v[10:11], v[54:55]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s8, v[0:1], v[64:65]
-; GFX10-NEXT:    v_max_f64 v[38:39], v[12:13], v[52:53]
-; GFX10-NEXT:    v_max_f64 v[52:53], v[14:15], v[50:51]
+; GFX10-NEXT:    v_max_f64 v[48:49], v[8:9], v[37:38]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s7, v[8:9], v[37:38]
+; GFX10-NEXT:    v_max_f64 v[36:37], v[10:11], v[64:65]
+; GFX10-NEXT:    v_max_f64 v[38:39], v[12:13], v[54:55]
+; GFX10-NEXT:    v_max_f64 v[54:55], v[14:15], v[52:53]
 ; GFX10-NEXT:    s_waitcnt vmcnt(11)
-; GFX10-NEXT:    v_max_f64 v[54:55], v[20:21], v[70:71]
+; GFX10-NEXT:    v_max_f64 v[64:65], v[20:21], v[70:71]
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s13, v[20:21], v[70:71]
 ; GFX10-NEXT:    s_waitcnt vmcnt(9)
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s12, v[18:19], v[80:81]
 ; GFX10-NEXT:    s_waitcnt vmcnt(8)
-; GFX10-NEXT:    v_max_f64 v[50:51], v[16:17], v[48:49]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s11, v[16:17], v[48:49]
-; GFX10-NEXT:    v_max_f64 v[48:49], v[18:19], v[80:81]
-; GFX10-NEXT:    v_max_f64 v[64:65], v[22:23], v[68:69]
+; GFX10-NEXT:    v_max_f64 v[52:53], v[16:17], v[50:51]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s11, v[16:17], v[50:51]
+; GFX10-NEXT:    v_max_f64 v[50:51], v[18:19], v[80:81]
+; GFX10-NEXT:    v_max_f64 v[70:71], v[22:23], v[68:69]
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s14, v[22:23], v[68:69]
 ; GFX10-NEXT:    s_waitcnt vmcnt(7)
 ; GFX10-NEXT:    v_max_f64 v[68:69], v[24:25], v[66:67]
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s15, v[24:25], v[66:67]
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v36, 0, s7
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v8, 0, s8
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v9, 0x7ff80000, s8
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v34, 0, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v35, 0x7ff80000, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v37, 0x7ff80000, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v34, 0, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v35, 0x7ff80000, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v48, 0, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v49, 0x7ff80000, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v36, 0, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v37, 0x7ff80000, s8
 ; GFX10-NEXT:    v_cndmask_b32_e64 v12, v38, 0, s9
 ; GFX10-NEXT:    v_cndmask_b32_e64 v13, v39, 0x7ff80000, s9
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v52, 0, s10
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v53, 0x7ff80000, s10
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, v50, 0, s11
-; GFX10-NEXT:    v_cndmask_b32_e64 v17, v51, 0x7ff80000, s11
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v48, 0, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v19, v49, 0x7ff80000, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v20, v54, 0, s13
-; GFX10-NEXT:    v_cndmask_b32_e64 v21, v55, 0x7ff80000, s13
-; GFX10-NEXT:    v_cndmask_b32_e64 v22, v64, 0, s14
-; GFX10-NEXT:    v_cndmask_b32_e64 v23, v65, 0x7ff80000, s14
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v54, 0, s10
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v55, 0x7ff80000, s10
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v52, 0, s11
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v53, 0x7ff80000, s11
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v50, 0, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v51, 0x7ff80000, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v64, 0, s13
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v65, 0x7ff80000, s13
+; GFX10-NEXT:    v_cndmask_b32_e64 v22, v70, 0, s14
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v71, 0x7ff80000, s14
 ; GFX10-NEXT:    v_cndmask_b32_e64 v24, v68, 0, s15
 ; GFX10-NEXT:    v_cndmask_b32_e64 v25, v69, 0x7ff80000, s15
 ; GFX10-NEXT:    s_waitcnt vmcnt(5)
-; GFX10-NEXT:    v_max_f64 v[70:71], v[28:29], v[2:3]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s17, v[28:29], v[2:3]
+; GFX10-NEXT:    v_max_f64 v[80:81], v[28:29], v[0:1]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s17, v[28:29], v[0:1]
 ; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    v_max_f64 v[66:67], v[26:27], v[4:5]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s16, v[26:27], v[4:5]
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v82, 0, vcc_lo
+; GFX10-NEXT:    v_max_f64 v[66:67], v[26:27], v[2:3]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s16, v[26:27], v[2:3]
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v82, 0, vcc_lo
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_max_f64 v[80:81], v[30:31], v[6:7]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s18, v[30:31], v[6:7]
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v83, 0x7ff80000, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v84, 0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v85, 0x7ff80000, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v32, 0, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v33, 0x7ff80000, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v28, v70, 0, s17
-; GFX10-NEXT:    v_cndmask_b32_e64 v29, v71, 0x7ff80000, s17
+; GFX10-NEXT:    v_max_f64 v[86:87], v[30:31], v[4:5]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s18, v[30:31], v[4:5]
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v83, 0x7ff80000, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v84, 0, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v85, 0x7ff80000, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v32, 0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v33, 0x7ff80000, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v28, v80, 0, s17
+; GFX10-NEXT:    v_cndmask_b32_e64 v29, v81, 0x7ff80000, s17
 ; GFX10-NEXT:    v_cndmask_b32_e64 v26, v66, 0, s16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v27, v67, 0x7ff80000, s16
-; GFX10-NEXT:    v_cndmask_b32_e64 v30, v80, 0, s18
-; GFX10-NEXT:    v_cndmask_b32_e64 v31, v81, 0x7ff80000, s18
+; GFX10-NEXT:    v_cndmask_b32_e64 v30, v86, 0, s18
+; GFX10-NEXT:    v_cndmask_b32_e64 v31, v87, 0x7ff80000, s18
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_maximum_v16f64:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
index b8e5be785a77d..3dcc70b0ea3b6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
@@ -1858,6 +1858,7 @@ define <16 x half> @v_minimum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[10:11], v18, v17
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v17, 16, v9
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v18, 16, v1
+; GFX8-NEXT:    v_mov_b32_e32 v19, 0x7e00
 ; GFX8-NEXT:    v_min_f16_e32 v24, v18, v17
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[12:13], v18, v17
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v17, 16, v8
@@ -1872,28 +1873,26 @@ define <16 x half> @v_minimum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[20:21], v4, v12
 ; GFX8-NEXT:    v_min_f16_e32 v4, v3, v11
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[22:23], v3, v11
+; GFX8-NEXT:    v_min_f16_e32 v3, v2, v10
 ; GFX8-NEXT:    v_min_f16_e32 v11, v7, v15
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[24:25], v7, v15
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v12, 16, v15
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GFX8-NEXT:    v_mov_b32_e32 v19, 0x7e00
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, v19, v16, vcc
+; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v2, v10
 ; GFX8-NEXT:    v_min_f16_e32 v13, v7, v12
 ; GFX8-NEXT:    v_cmp_o_f16_e64 s[26:27], v7, v12
-; GFX8-NEXT:    v_min_f16_e32 v3, v2, v10
-; GFX8-NEXT:    v_cndmask_b32_e64 v12, v19, v13, s[26:27]
-; GFX8-NEXT:    v_cndmask_b32_e32 v13, v19, v16, vcc
-; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v2, v10
-; GFX8-NEXT:    v_min_f16_e32 v14, v1, v9
+; GFX8-NEXT:    v_min_f16_e32 v7, v1, v9
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v19, v3, vcc
 ; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v1, v9
-; GFX8-NEXT:    v_min_f16_e32 v7, v0, v8
+; GFX8-NEXT:    v_min_f16_e32 v12, v0, v8
 ; GFX8-NEXT:    v_cndmask_b32_e64 v18, v19, v22, s[8:9]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v22, v19, v25, s[14:15]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v19, v14, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v19, v7, vcc
 ; GFX8-NEXT:    v_cmp_o_f16_e32 vcc, v0, v8
 ; GFX8-NEXT:    v_cndmask_b32_e64 v16, v19, v21, s[6:7]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v21, v19, v24, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v19, v7, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v19, v12, vcc
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v22
 ; GFX8-NEXT:    v_cndmask_b32_e64 v15, v19, v20, s[4:5]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v20, v19, v23, s[10:11]
@@ -1907,14 +1906,15 @@ define <16 x half> @v_minimum_v16f16(<16 x half> %src0, <16 x half> %src1) {
 ; GFX8-NEXT:    v_cndmask_b32_e64 v5, v19, v5, s[20:21]
 ; GFX8-NEXT:    v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 16, v16
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, v19, v13, s[26:27]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v6, v19, v6, s[18:19]
 ; GFX8-NEXT:    v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v15
 ; GFX8-NEXT:    v_cndmask_b32_e64 v11, v19, v11, s[24:25]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v17, v19, v17, s[16:17]
 ; GFX8-NEXT:    v_or_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v13
-; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v12
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v14
+; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v13
 ; GFX8-NEXT:    v_or_b32_sdwa v6, v17, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_sdwa v7, v11, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f32.ll
index 7b2998cbd242f..0215795467323 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f32.ll
@@ -1684,7 +1684,7 @@ define <8 x float> @v_minimum_v8f32(<8 x float> %src0, <8 x float> %src1) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, 0x7fc00000, v16, vcc_lo
 ; GFX11-NEXT:    v_cmp_o_f32_e32 vcc_lo, v1, v9
-; GFX11-NEXT:    v_dual_min_f32 v9, v3, v11 :: v_dual_min_f32 v8, v2, v10
+; GFX11-NEXT:    v_dual_min_f32 v8, v2, v10 :: v_dual_min_f32 v9, v3, v11
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0x7fc00000, v17, vcc_lo
 ; GFX11-NEXT:    v_cmp_o_f32_e32 vcc_lo, v2, v10
 ; GFX11-NEXT:    v_min_f32_e32 v10, v7, v15
@@ -1727,169 +1727,169 @@ define <16 x float> @v_minimum_v16f32(<16 x float> %src0, <16 x float> %src1) {
 ; GFX7-LABEL: v_minimum_v16f32:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX7-NEXT:    v_min_f32_e32 v0, v0, v16
+; GFX7-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v17
 ; GFX7-NEXT:    v_min_f32_e32 v1, v1, v17
-; GFX7-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v18
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v18
 ; GFX7-NEXT:    v_min_f32_e32 v2, v2, v18
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v19
+; GFX7-NEXT:    v_mov_b32_e32 v17, 0x7fc00000
+; GFX7-NEXT:    v_min_f32_e32 v18, v13, v29
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v29
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v19
 ; GFX7-NEXT:    v_min_f32_e32 v3, v3, v19
-; GFX7-NEXT:    v_mov_b32_e32 v18, 0x7fc00000
-; GFX7-NEXT:    v_min_f32_e32 v19, v0, v16
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[28:29], v0, v16
-; GFX7-NEXT:    v_min_f32_e32 v16, v14, v30
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v20
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v20
 ; GFX7-NEXT:    v_min_f32_e32 v4, v4, v20
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v21
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v21
 ; GFX7-NEXT:    v_min_f32_e32 v5, v5, v21
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v22
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v22
 ; GFX7-NEXT:    v_min_f32_e32 v6, v6, v22
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v23
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v23
 ; GFX7-NEXT:    v_min_f32_e32 v7, v7, v23
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v24
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v24
 ; GFX7-NEXT:    v_min_f32_e32 v8, v8, v24
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v25
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v25
 ; GFX7-NEXT:    v_min_f32_e32 v9, v9, v25
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v26
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v26
 ; GFX7-NEXT:    v_min_f32_e32 v10, v10, v26
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v27
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v27
 ; GFX7-NEXT:    v_min_f32_e32 v11, v11, v27
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v28
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[26:27], v12, v28
 ; GFX7-NEXT:    v_min_f32_e32 v12, v12, v28
-; GFX7-NEXT:    v_cmp_o_f32_e64 s[26:27], v13, v29
-; GFX7-NEXT:    v_min_f32_e32 v13, v13, v29
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v18, v1, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v14, v18, v16, s[40:41]
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v18, v19, s[28:29]
-; GFX7-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v3, v18, v3, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s[8:9]
-; GFX7-NEXT:    v_cndmask_b32_e64 v5, v18, v5, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v6, v18, v6, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v7, v18, v7, s[14:15]
-; GFX7-NEXT:    v_cndmask_b32_e64 v8, v18, v8, s[16:17]
-; GFX7-NEXT:    v_cndmask_b32_e64 v9, v18, v9, s[18:19]
-; GFX7-NEXT:    v_cndmask_b32_e64 v10, v18, v10, s[20:21]
-; GFX7-NEXT:    v_cndmask_b32_e64 v11, v18, v11, s[22:23]
-; GFX7-NEXT:    v_cndmask_b32_e64 v12, v18, v12, s[24:25]
-; GFX7-NEXT:    v_cndmask_b32_e64 v13, v18, v13, s[26:27]
+; GFX7-NEXT:    v_min_f32_e32 v19, v14, v30
+; GFX7-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v17, v0, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v13, v17, v18, s[28:29]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v17, v1, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v17, v2, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, v17, v3, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v4, v17, v4, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, v17, v6, s[14:15]
+; GFX7-NEXT:    v_cndmask_b32_e64 v7, v17, v7, s[16:17]
+; GFX7-NEXT:    v_cndmask_b32_e64 v8, v17, v8, s[18:19]
+; GFX7-NEXT:    v_cndmask_b32_e64 v9, v17, v9, s[20:21]
+; GFX7-NEXT:    v_cndmask_b32_e64 v10, v17, v10, s[22:23]
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v17, v11, s[24:25]
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v17, v12, s[26:27]
+; GFX7-NEXT:    v_cndmask_b32_e64 v14, v17, v19, s[40:41]
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_min_f32_e32 v16, v15, v17
-; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
-; GFX7-NEXT:    v_cndmask_b32_e32 v15, v18, v16, vcc
+; GFX7-NEXT:    v_min_f32_e32 v18, v15, v16
+; GFX7-NEXT:    v_cmp_o_f32_e32 vcc, v15, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_minimum_v16f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
+; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX8-NEXT:    v_min_f32_e32 v0, v0, v16
+; GFX8-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v17
 ; GFX8-NEXT:    v_min_f32_e32 v1, v1, v17
-; GFX8-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v18
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v18
 ; GFX8-NEXT:    v_min_f32_e32 v2, v2, v18
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v19
+; GFX8-NEXT:    v_mov_b32_e32 v17, 0x7fc00000
+; GFX8-NEXT:    v_min_f32_e32 v18, v13, v29
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v29
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v19
 ; GFX8-NEXT:    v_min_f32_e32 v3, v3, v19
-; GFX8-NEXT:    v_mov_b32_e32 v18, 0x7fc00000
-; GFX8-NEXT:    v_min_f32_e32 v19, v0, v16
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[28:29], v0, v16
-; GFX8-NEXT:    v_min_f32_e32 v16, v14, v30
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v20
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v20
 ; GFX8-NEXT:    v_min_f32_e32 v4, v4, v20
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v21
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v21
 ; GFX8-NEXT:    v_min_f32_e32 v5, v5, v21
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v22
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v22
 ; GFX8-NEXT:    v_min_f32_e32 v6, v6, v22
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v23
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v23
 ; GFX8-NEXT:    v_min_f32_e32 v7, v7, v23
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v24
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v24
 ; GFX8-NEXT:    v_min_f32_e32 v8, v8, v24
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v25
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v25
 ; GFX8-NEXT:    v_min_f32_e32 v9, v9, v25
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v26
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v26
 ; GFX8-NEXT:    v_min_f32_e32 v10, v10, v26
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v27
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v27
 ; GFX8-NEXT:    v_min_f32_e32 v11, v11, v27
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v28
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[26:27], v12, v28
 ; GFX8-NEXT:    v_min_f32_e32 v12, v12, v28
-; GFX8-NEXT:    v_cmp_o_f32_e64 s[26:27], v13, v29
-; GFX8-NEXT:    v_min_f32_e32 v13, v13, v29
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v18, v1, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v14, v18, v16, s[40:41]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v18, v19, s[28:29]
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v18, v3, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v18, v5, s[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v18, v6, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v18, v7, s[14:15]
-; GFX8-NEXT:    v_cndmask_b32_e64 v8, v18, v8, s[16:17]
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, v18, v9, s[18:19]
-; GFX8-NEXT:    v_cndmask_b32_e64 v10, v18, v10, s[20:21]
-; GFX8-NEXT:    v_cndmask_b32_e64 v11, v18, v11, s[22:23]
-; GFX8-NEXT:    v_cndmask_b32_e64 v12, v18, v12, s[24:25]
-; GFX8-NEXT:    v_cndmask_b32_e64 v13, v18, v13, s[26:27]
+; GFX8-NEXT:    v_min_f32_e32 v19, v14, v30
+; GFX8-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v17, v0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, v17, v18, s[28:29]
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v17, v1, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v17, v2, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v17, v3, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v17, v4, s[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v17, v6, s[14:15]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v17, v7, s[16:17]
+; GFX8-NEXT:    v_cndmask_b32_e64 v8, v17, v8, s[18:19]
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, v17, v9, s[20:21]
+; GFX8-NEXT:    v_cndmask_b32_e64 v10, v17, v10, s[22:23]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v17, v11, s[24:25]
+; GFX8-NEXT:    v_cndmask_b32_e64 v12, v17, v12, s[26:27]
+; GFX8-NEXT:    v_cndmask_b32_e64 v14, v17, v19, s[40:41]
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_min_f32_e32 v16, v15, v17
-; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v18, v16, vcc
+; GFX8-NEXT:    v_min_f32_e32 v18, v15, v16
+; GFX8-NEXT:    v_cmp_o_f32_e32 vcc, v15, v16
+; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX900-LABEL: v_minimum_v16f32:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v1, v17
+; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v0, v16
+; GFX900-NEXT:    v_min_f32_e32 v0, v0, v16
+; GFX900-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[4:5], v1, v17
 ; GFX900-NEXT:    v_min_f32_e32 v1, v1, v17
-; GFX900-NEXT:    buffer_load_dword v17, off, s[0:3], s32
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[4:5], v2, v18
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[6:7], v2, v18
 ; GFX900-NEXT:    v_min_f32_e32 v2, v2, v18
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[6:7], v3, v19
+; GFX900-NEXT:    v_mov_b32_e32 v17, 0x7fc00000
+; GFX900-NEXT:    v_min_f32_e32 v18, v13, v29
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[28:29], v13, v29
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[8:9], v3, v19
 ; GFX900-NEXT:    v_min_f32_e32 v3, v3, v19
-; GFX900-NEXT:    v_mov_b32_e32 v18, 0x7fc00000
-; GFX900-NEXT:    v_min_f32_e32 v19, v0, v16
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[28:29], v0, v16
-; GFX900-NEXT:    v_min_f32_e32 v16, v14, v30
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[8:9], v4, v20
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[10:11], v4, v20
 ; GFX900-NEXT:    v_min_f32_e32 v4, v4, v20
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[10:11], v5, v21
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[12:13], v5, v21
 ; GFX900-NEXT:    v_min_f32_e32 v5, v5, v21
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[12:13], v6, v22
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[14:15], v6, v22
 ; GFX900-NEXT:    v_min_f32_e32 v6, v6, v22
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[14:15], v7, v23
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[16:17], v7, v23
 ; GFX900-NEXT:    v_min_f32_e32 v7, v7, v23
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[16:17], v8, v24
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[18:19], v8, v24
 ; GFX900-NEXT:    v_min_f32_e32 v8, v8, v24
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[18:19], v9, v25
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[20:21], v9, v25
 ; GFX900-NEXT:    v_min_f32_e32 v9, v9, v25
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[20:21], v10, v26
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[22:23], v10, v26
 ; GFX900-NEXT:    v_min_f32_e32 v10, v10, v26
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[22:23], v11, v27
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[24:25], v11, v27
 ; GFX900-NEXT:    v_min_f32_e32 v11, v11, v27
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[24:25], v12, v28
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[26:27], v12, v28
 ; GFX900-NEXT:    v_min_f32_e32 v12, v12, v28
-; GFX900-NEXT:    v_cmp_o_f32_e64 s[26:27], v13, v29
-; GFX900-NEXT:    v_min_f32_e32 v13, v13, v29
-; GFX900-NEXT:    v_cndmask_b32_e32 v1, v18, v1, vcc
-; GFX900-NEXT:    v_cndmask_b32_e64 v14, v18, v16, s[40:41]
-; GFX900-NEXT:    v_cndmask_b32_e64 v0, v18, v19, s[28:29]
-; GFX900-NEXT:    v_cndmask_b32_e64 v2, v18, v2, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v3, v18, v3, s[6:7]
-; GFX900-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s[8:9]
-; GFX900-NEXT:    v_cndmask_b32_e64 v5, v18, v5, s[10:11]
-; GFX900-NEXT:    v_cndmask_b32_e64 v6, v18, v6, s[12:13]
-; GFX900-NEXT:    v_cndmask_b32_e64 v7, v18, v7, s[14:15]
-; GFX900-NEXT:    v_cndmask_b32_e64 v8, v18, v8, s[16:17]
-; GFX900-NEXT:    v_cndmask_b32_e64 v9, v18, v9, s[18:19]
-; GFX900-NEXT:    v_cndmask_b32_e64 v10, v18, v10, s[20:21]
-; GFX900-NEXT:    v_cndmask_b32_e64 v11, v18, v11, s[22:23]
-; GFX900-NEXT:    v_cndmask_b32_e64 v12, v18, v12, s[24:25]
-; GFX900-NEXT:    v_cndmask_b32_e64 v13, v18, v13, s[26:27]
+; GFX900-NEXT:    v_min_f32_e32 v19, v14, v30
+; GFX900-NEXT:    v_cmp_o_f32_e64 s[40:41], v14, v30
+; GFX900-NEXT:    v_cndmask_b32_e32 v0, v17, v0, vcc
+; GFX900-NEXT:    v_cndmask_b32_e64 v13, v17, v18, s[28:29]
+; GFX900-NEXT:    v_cndmask_b32_e64 v1, v17, v1, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v2, v17, v2, s[6:7]
+; GFX900-NEXT:    v_cndmask_b32_e64 v3, v17, v3, s[8:9]
+; GFX900-NEXT:    v_cndmask_b32_e64 v4, v17, v4, s[10:11]
+; GFX900-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s[12:13]
+; GFX900-NEXT:    v_cndmask_b32_e64 v6, v17, v6, s[14:15]
+; GFX900-NEXT:    v_cndmask_b32_e64 v7, v17, v7, s[16:17]
+; GFX900-NEXT:    v_cndmask_b32_e64 v8, v17, v8, s[18:19]
+; GFX900-NEXT:    v_cndmask_b32_e64 v9, v17, v9, s[20:21]
+; GFX900-NEXT:    v_cndmask_b32_e64 v10, v17, v10, s[22:23]
+; GFX900-NEXT:    v_cndmask_b32_e64 v11, v17, v11, s[24:25]
+; GFX900-NEXT:    v_cndmask_b32_e64 v12, v17, v12, s[26:27]
+; GFX900-NEXT:    v_cndmask_b32_e64 v14, v17, v19, s[40:41]
 ; GFX900-NEXT:    s_waitcnt vmcnt(0)
-; GFX900-NEXT:    v_min_f32_e32 v16, v15, v17
-; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v15, v17
-; GFX900-NEXT:    v_cndmask_b32_e32 v15, v18, v16, vcc
+; GFX900-NEXT:    v_min_f32_e32 v18, v15, v16
+; GFX900-NEXT:    v_cmp_o_f32_e32 vcc, v15, v16
+; GFX900-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX900-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: v_minimum_v16f32:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f64.ll
index 1d1673315f6ff..4c413af878462 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f64.ll
@@ -820,18 +820,18 @@ define void @s_minimum_v2f64(<2 x double> inreg %src0, <2 x double> inreg %src1)
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mov_b32_e32 v0, s22
-; GFX7-NEXT:    v_mov_b32_e32 v4, s20
 ; GFX7-NEXT:    v_mov_b32_e32 v1, s23
-; GFX7-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX7-NEXT:    v_min_f64 v[2:3], s[18:19], v[0:1]
 ; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, s[18:19], v[0:1]
-; GFX7-NEXT:    v_min_f64 v[0:1], s[16:17], v[4:5]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v0, s20
+; GFX7-NEXT:    v_mov_b32_e32 v1, s21
+; GFX7-NEXT:    v_min_f64 v[4:5], s[16:17], v[0:1]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[0:1]
 ; GFX7-NEXT:    v_mov_b32_e32 v6, 0x7ff80000
 ; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX7-NEXT:    ;;#ASMSTART
 ; GFX7-NEXT:    ; use v[0:3]
 ; GFX7-NEXT:    ;;#ASMEND
@@ -841,18 +841,18 @@ define void @s_minimum_v2f64(<2 x double> inreg %src0, <2 x double> inreg %src1)
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s22
-; GFX8-NEXT:    v_mov_b32_e32 v4, s20
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s23
-; GFX8-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX8-NEXT:    v_min_f64 v[2:3], s[18:19], v[0:1]
 ; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, s[18:19], v[0:1]
-; GFX8-NEXT:    v_min_f64 v[0:1], s[16:17], v[4:5]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v0, s20
+; GFX8-NEXT:    v_mov_b32_e32 v1, s21
+; GFX8-NEXT:    v_min_f64 v[4:5], s[16:17], v[0:1]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[0:1]
 ; GFX8-NEXT:    v_mov_b32_e32 v6, 0x7ff80000
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX8-NEXT:    ;;#ASMSTART
 ; GFX8-NEXT:    ; use v[0:3]
 ; GFX8-NEXT:    ;;#ASMEND
@@ -862,18 +862,18 @@ define void @s_minimum_v2f64(<2 x double> inreg %src0, <2 x double> inreg %src1)
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX900-NEXT:    v_mov_b32_e32 v0, s22
-; GFX900-NEXT:    v_mov_b32_e32 v4, s20
 ; GFX900-NEXT:    v_mov_b32_e32 v1, s23
-; GFX900-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX900-NEXT:    v_min_f64 v[2:3], s[18:19], v[0:1]
 ; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, s[18:19], v[0:1]
-; GFX900-NEXT:    v_min_f64 v[0:1], s[16:17], v[4:5]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[4:5]
+; GFX900-NEXT:    v_mov_b32_e32 v0, s20
+; GFX900-NEXT:    v_mov_b32_e32 v1, s21
+; GFX900-NEXT:    v_min_f64 v[4:5], s[16:17], v[0:1]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], s[16:17], v[0:1]
 ; GFX900-NEXT:    v_mov_b32_e32 v6, 0x7ff80000
 ; GFX900-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GFX900-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX900-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX900-NEXT:    ;;#ASMSTART
 ; GFX900-NEXT:    ; use v[0:3]
 ; GFX900-NEXT:    ;;#ASMEND
@@ -1743,120 +1743,120 @@ define <8 x double> @v_minimum_v8f64(<8 x double> %src0, <8 x double> %src1) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX7-NEXT:    v_min_f64 v[32:33], v[2:3], v[18:19]
-; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[18:19]
-; GFX7-NEXT:    v_min_f64 v[18:19], v[4:5], v[20:21]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], v[4:5], v[20:21]
-; GFX7-NEXT:    v_min_f64 v[2:3], v[0:1], v[16:17]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[8:9], v[0:1], v[16:17]
+; GFX7-NEXT:    v_min_f64 v[32:33], v[0:1], v[16:17]
+; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[16:17]
+; GFX7-NEXT:    v_min_f64 v[16:17], v[2:3], v[18:19]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[4:5], v[2:3], v[18:19]
 ; GFX7-NEXT:    v_mov_b32_e32 v34, 0x7ff80000
+; GFX7-NEXT:    v_min_f64 v[18:19], v[4:5], v[20:21]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[6:7], v[4:5], v[20:21]
 ; GFX7-NEXT:    v_min_f64 v[20:21], v[6:7], v[22:23]
-; GFX7-NEXT:    v_cmp_u_f64_e64 s[6:7], v[6:7], v[22:23]
-; GFX7-NEXT:    v_min_f64 v[16:17], v[8:9], v[24:25]
+; GFX7-NEXT:    v_cmp_u_f64_e64 s[8:9], v[6:7], v[22:23]
+; GFX7-NEXT:    v_min_f64 v[22:23], v[8:9], v[24:25]
 ; GFX7-NEXT:    v_cmp_u_f64_e64 s[10:11], v[8:9], v[24:25]
-; GFX7-NEXT:    v_min_f64 v[22:23], v[10:11], v[26:27]
+; GFX7-NEXT:    v_min_f64 v[24:25], v[10:11], v[26:27]
 ; GFX7-NEXT:    v_cmp_u_f64_e64 s[12:13], v[10:11], v[26:27]
-; GFX7-NEXT:    v_min_f64 v[24:25], v[12:13], v[28:29]
+; GFX7-NEXT:    v_min_f64 v[26:27], v[12:13], v[28:29]
 ; GFX7-NEXT:    v_cmp_u_f64_e64 s[14:15], v[12:13], v[28:29]
-; GFX7-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[8:9]
-; GFX7-NEXT:    v_cndmask_b32_e64 v1, v3, v34, s[8:9]
-; GFX7-NEXT:    v_cndmask_b32_e64 v2, v32, 0, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v33, v34, vcc
-; GFX7-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[4:5]
-; GFX7-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[6:7]
-; GFX7-NEXT:    v_cndmask_b32_e64 v8, v16, 0, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v9, v17, v34, s[10:11]
-; GFX7-NEXT:    v_cndmask_b32_e64 v10, v22, 0, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v11, v23, v34, s[12:13]
-; GFX7-NEXT:    v_cndmask_b32_e64 v12, v24, 0, s[14:15]
-; GFX7-NEXT:    v_cndmask_b32_e64 v13, v25, v34, s[14:15]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v33, v34, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v16, 0, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, v17, v34, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[8:9]
+; GFX7-NEXT:    v_cndmask_b32_e64 v8, v22, 0, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v9, v23, v34, s[10:11]
+; GFX7-NEXT:    v_cndmask_b32_e64 v10, v24, 0, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v25, v34, s[12:13]
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v26, 0, s[14:15]
+; GFX7-NEXT:    v_cndmask_b32_e64 v13, v27, v34, s[14:15]
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_min_f64 v[18:19], v[14:15], v[30:31]
+; GFX7-NEXT:    v_min_f64 v[16:17], v[14:15], v[30:31]
 ; GFX7-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[30:31]
-; GFX7-NEXT:    v_cndmask_b32_e64 v14, v18, 0, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v15, v19, v34, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v14, v16, 0, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v15, v17, v34, vcc
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_minimum_v8f64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX8-NEXT:    v_min_f64 v[32:33], v[2:3], v[18:19]
-; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[18:19]
-; GFX8-NEXT:    v_min_f64 v[18:19], v[4:5], v[20:21]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], v[4:5], v[20:21]
-; GFX8-NEXT:    v_min_f64 v[2:3], v[0:1], v[16:17]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[8:9], v[0:1], v[16:17]
+; GFX8-NEXT:    v_min_f64 v[32:33], v[0:1], v[16:17]
+; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[16:17]
+; GFX8-NEXT:    v_min_f64 v[16:17], v[2:3], v[18:19]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[4:5], v[2:3], v[18:19]
 ; GFX8-NEXT:    v_mov_b32_e32 v34, 0x7ff80000
+; GFX8-NEXT:    v_min_f64 v[18:19], v[4:5], v[20:21]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[6:7], v[4:5], v[20:21]
 ; GFX8-NEXT:    v_min_f64 v[20:21], v[6:7], v[22:23]
-; GFX8-NEXT:    v_cmp_u_f64_e64 s[6:7], v[6:7], v[22:23]
-; GFX8-NEXT:    v_min_f64 v[16:17], v[8:9], v[24:25]
+; GFX8-NEXT:    v_cmp_u_f64_e64 s[8:9], v[6:7], v[22:23]
+; GFX8-NEXT:    v_min_f64 v[22:23], v[8:9], v[24:25]
 ; GFX8-NEXT:    v_cmp_u_f64_e64 s[10:11], v[8:9], v[24:25]
-; GFX8-NEXT:    v_min_f64 v[22:23], v[10:11], v[26:27]
+; GFX8-NEXT:    v_min_f64 v[24:25], v[10:11], v[26:27]
 ; GFX8-NEXT:    v_cmp_u_f64_e64 s[12:13], v[10:11], v[26:27]
-; GFX8-NEXT:    v_min_f64 v[24:25], v[12:13], v[28:29]
+; GFX8-NEXT:    v_min_f64 v[26:27], v[12:13], v[28:29]
 ; GFX8-NEXT:    v_cmp_u_f64_e64 s[14:15], v[12:13], v[28:29]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, v34, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v32, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v33, v34, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v8, v16, 0, s[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, v17, v34, s[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v10, v22, 0, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v11, v23, v34, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v12, v24, 0, s[14:15]
-; GFX8-NEXT:    v_cndmask_b32_e64 v13, v25, v34, s[14:15]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v33, v34, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v16, 0, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v17, v34, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v8, v22, 0, s[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, v23, v34, s[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v10, v24, 0, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v25, v34, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v12, v26, 0, s[14:15]
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, v27, v34, s[14:15]
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_min_f64 v[18:19], v[14:15], v[30:31]
+; GFX8-NEXT:    v_min_f64 v[16:17], v[14:15], v[30:31]
 ; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[30:31]
-; GFX8-NEXT:    v_cndmask_b32_e64 v14, v18, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v19, v34, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v14, v16, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v34, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX900-LABEL: v_minimum_v8f64:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX900-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX900-NEXT:    v_min_f64 v[32:33], v[2:3], v[18:19]
-; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[18:19]
-; GFX900-NEXT:    v_min_f64 v[18:19], v[4:5], v[20:21]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], v[4:5], v[20:21]
-; GFX900-NEXT:    v_min_f64 v[2:3], v[0:1], v[16:17]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[8:9], v[0:1], v[16:17]
+; GFX900-NEXT:    v_min_f64 v[32:33], v[0:1], v[16:17]
+; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[16:17]
+; GFX900-NEXT:    v_min_f64 v[16:17], v[2:3], v[18:19]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[4:5], v[2:3], v[18:19]
 ; GFX900-NEXT:    v_mov_b32_e32 v34, 0x7ff80000
+; GFX900-NEXT:    v_min_f64 v[18:19], v[4:5], v[20:21]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[6:7], v[4:5], v[20:21]
 ; GFX900-NEXT:    v_min_f64 v[20:21], v[6:7], v[22:23]
-; GFX900-NEXT:    v_cmp_u_f64_e64 s[6:7], v[6:7], v[22:23]
-; GFX900-NEXT:    v_min_f64 v[16:17], v[8:9], v[24:25]
+; GFX900-NEXT:    v_cmp_u_f64_e64 s[8:9], v[6:7], v[22:23]
+; GFX900-NEXT:    v_min_f64 v[22:23], v[8:9], v[24:25]
 ; GFX900-NEXT:    v_cmp_u_f64_e64 s[10:11], v[8:9], v[24:25]
-; GFX900-NEXT:    v_min_f64 v[22:23], v[10:11], v[26:27]
+; GFX900-NEXT:    v_min_f64 v[24:25], v[10:11], v[26:27]
 ; GFX900-NEXT:    v_cmp_u_f64_e64 s[12:13], v[10:11], v[26:27]
-; GFX900-NEXT:    v_min_f64 v[24:25], v[12:13], v[28:29]
+; GFX900-NEXT:    v_min_f64 v[26:27], v[12:13], v[28:29]
 ; GFX900-NEXT:    v_cmp_u_f64_e64 s[14:15], v[12:13], v[28:29]
-; GFX900-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[8:9]
-; GFX900-NEXT:    v_cndmask_b32_e64 v1, v3, v34, s[8:9]
-; GFX900-NEXT:    v_cndmask_b32_e64 v2, v32, 0, vcc
-; GFX900-NEXT:    v_cndmask_b32_e32 v3, v33, v34, vcc
-; GFX900-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[4:5]
-; GFX900-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[6:7]
-; GFX900-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[6:7]
-; GFX900-NEXT:    v_cndmask_b32_e64 v8, v16, 0, s[10:11]
-; GFX900-NEXT:    v_cndmask_b32_e64 v9, v17, v34, s[10:11]
-; GFX900-NEXT:    v_cndmask_b32_e64 v10, v22, 0, s[12:13]
-; GFX900-NEXT:    v_cndmask_b32_e64 v11, v23, v34, s[12:13]
-; GFX900-NEXT:    v_cndmask_b32_e64 v12, v24, 0, s[14:15]
-; GFX900-NEXT:    v_cndmask_b32_e64 v13, v25, v34, s[14:15]
+; GFX900-NEXT:    v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX900-NEXT:    v_cndmask_b32_e32 v1, v33, v34, vcc
+; GFX900-NEXT:    v_cndmask_b32_e64 v2, v16, 0, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v3, v17, v34, s[4:5]
+; GFX900-NEXT:    v_cndmask_b32_e64 v4, v18, 0, s[6:7]
+; GFX900-NEXT:    v_cndmask_b32_e64 v5, v19, v34, s[6:7]
+; GFX900-NEXT:    v_cndmask_b32_e64 v6, v20, 0, s[8:9]
+; GFX900-NEXT:    v_cndmask_b32_e64 v7, v21, v34, s[8:9]
+; GFX900-NEXT:    v_cndmask_b32_e64 v8, v22, 0, s[10:11]
+; GFX900-NEXT:    v_cndmask_b32_e64 v9, v23, v34, s[10:11]
+; GFX900-NEXT:    v_cndmask_b32_e64 v10, v24, 0, s[12:13]
+; GFX900-NEXT:    v_cndmask_b32_e64 v11, v25, v34, s[12:13]
+; GFX900-NEXT:    v_cndmask_b32_e64 v12, v26, 0, s[14:15]
+; GFX900-NEXT:    v_cndmask_b32_e64 v13, v27, v34, s[14:15]
 ; GFX900-NEXT:    s_waitcnt vmcnt(0)
-; GFX900-NEXT:    v_min_f64 v[18:19], v[14:15], v[30:31]
+; GFX900-NEXT:    v_min_f64 v[16:17], v[14:15], v[30:31]
 ; GFX900-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[30:31]
-; GFX900-NEXT:    v_cndmask_b32_e64 v14, v18, 0, vcc
-; GFX900-NEXT:    v_cndmask_b32_e32 v15, v19, v34, vcc
+; GFX900-NEXT:    v_cndmask_b32_e64 v14, v16, 0, vcc
+; GFX900-NEXT:    v_cndmask_b32_e32 v15, v17, v34, vcc
 ; GFX900-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX950-LABEL: v_minimum_v8f64:
@@ -2365,24 +2365,24 @@ define <16 x double> @v_minimum_v16f64(<16 x double> %src0, <16 x double> %src1)
 ; GFX950-LABEL: v_minimum_v16f64:
 ; GFX950:       ; %bb.0:
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v40 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a2, v41 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a3, v42 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a4, v43 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a5, v44 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a6, v45 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a7, v46 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a8, v47 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a9, v56 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a10, v57 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a0, v40 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v41 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a2, v42 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a3, v43 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a4, v44 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a5, v45 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a6, v46 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a7, v47 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a8, v56 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a9, v57 ; Reload Reuse
+; GFX950-NEXT:    scratch_load_dword v33, off, s32 offset:8
+; GFX950-NEXT:    scratch_load_dword v32, off, s32 offset:4
 ; GFX950-NEXT:    scratch_load_dword v37, off, s32 offset:16
 ; GFX950-NEXT:    scratch_load_dword v36, off, s32 offset:12
 ; GFX950-NEXT:    scratch_load_dword v39, off, s32 offset:24
 ; GFX950-NEXT:    scratch_load_dword v38, off, s32 offset:20
-; GFX950-NEXT:    scratch_load_dword v49, off, s32 offset:32
-; GFX950-NEXT:    scratch_load_dword v48, off, s32 offset:28
-; GFX950-NEXT:    scratch_load_dword v57, off, s32 offset:8
-; GFX950-NEXT:    scratch_load_dword v56, off, s32 offset:4
+; GFX950-NEXT:    scratch_load_dword v57, off, s32 offset:32
+; GFX950-NEXT:    scratch_load_dword v56, off, s32 offset:28
 ; GFX950-NEXT:    scratch_load_dword v47, off, s32 offset:40
 ; GFX950-NEXT:    scratch_load_dword v46, off, s32 offset:36
 ; GFX950-NEXT:    scratch_load_dword v45, off, s32 offset:48
@@ -2397,148 +2397,149 @@ define <16 x double> @v_minimum_v16f64(<16 x double> %src0, <16 x double> %src1)
 ; GFX950-NEXT:    scratch_load_dword v52, off, s32 offset:76
 ; GFX950-NEXT:    scratch_load_dword v51, off, s32 offset:88
 ; GFX950-NEXT:    scratch_load_dword v50, off, s32 offset:84
-; GFX950-NEXT:    scratch_load_dword v35, off, s32 offset:96
-; GFX950-NEXT:    scratch_load_dword v34, off, s32 offset:92
+; GFX950-NEXT:    scratch_load_dword v49, off, s32 offset:96
+; GFX950-NEXT:    scratch_load_dword v48, off, s32 offset:92
 ; GFX950-NEXT:    scratch_load_dword v31, off, s32
-; GFX950-NEXT:    scratch_load_dword v33, off, s32 offset:104
-; GFX950-NEXT:    scratch_load_dword v32, off, s32 offset:100
-; GFX950-NEXT:    v_accvgpr_write_b32 a11, v58 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a12, v59 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a13, v60 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a14, v61 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a15, v62 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_write_b32 a16, v63 ; Reload Reuse
+; GFX950-NEXT:    scratch_load_dword v35, off, s32 offset:104
+; GFX950-NEXT:    scratch_load_dword v34, off, s32 offset:100
+; GFX950-NEXT:    v_accvgpr_write_b32 a10, v58 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a11, v59 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a12, v60 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a13, v61 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a14, v62 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_write_b32 a15, v63 ; Reload Reuse
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_min_f64 v[58:59], v[2:3], v[36:37]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[2:3], v[36:37]
-; GFX950-NEXT:    scratch_load_dword v37, off, s32 offset:112
-; GFX950-NEXT:    scratch_load_dword v36, off, s32 offset:108
+; GFX950-NEXT:    v_min_f64 v[58:59], v[0:1], v[32:33]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[32:33]
+; GFX950-NEXT:    scratch_load_dword v33, off, s32 offset:112
+; GFX950-NEXT:    scratch_load_dword v32, off, s32 offset:108
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_min_f64 v[60:61], v[4:5], v[38:39]
-; GFX950-NEXT:    v_cmp_u_f64_e64 s[0:1], v[4:5], v[38:39]
-; GFX950-NEXT:    scratch_load_dword v39, off, s32 offset:120
-; GFX950-NEXT:    scratch_load_dword v38, off, s32 offset:116
+; GFX950-NEXT:    v_min_f64 v[60:61], v[2:3], v[36:37]
+; GFX950-NEXT:    v_cmp_u_f64_e64 s[0:1], v[2:3], v[36:37]
+; GFX950-NEXT:    scratch_load_dword v37, off, s32 offset:120
+; GFX950-NEXT:    scratch_load_dword v36, off, s32 offset:116
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_min_f64 v[62:63], v[6:7], v[48:49]
-; GFX950-NEXT:    v_cmp_u_f64_e64 s[2:3], v[6:7], v[48:49]
-; GFX950-NEXT:    scratch_load_dword v49, off, s32 offset:128
-; GFX950-NEXT:    scratch_load_dword v48, off, s32 offset:124
+; GFX950-NEXT:    v_min_f64 v[62:63], v[4:5], v[38:39]
+; GFX950-NEXT:    v_cmp_u_f64_e64 s[2:3], v[4:5], v[38:39]
+; GFX950-NEXT:    scratch_load_dword v39, off, s32 offset:128
+; GFX950-NEXT:    scratch_load_dword v38, off, s32 offset:124
+; GFX950-NEXT:    v_mov_b32_e32 v2, 0x7ff80000
 ; GFX950-NEXT:    s_waitcnt vmcnt(25)
-; GFX950-NEXT:    v_min_f64 v[2:3], v[0:1], v[56:57]
-; GFX950-NEXT:    v_cmp_u_f64_e64 s[4:5], v[0:1], v[56:57]
-; GFX950-NEXT:    v_mov_b32_e32 v0, 0x7ff80000
+; GFX950-NEXT:    v_min_f64 v[0:1], v[6:7], v[56:57]
+; GFX950-NEXT:    v_cmp_u_f64_e64 s[4:5], v[6:7], v[56:57]
 ; GFX950-NEXT:    s_waitcnt vmcnt(23)
 ; GFX950-NEXT:    v_min_f64 v[56:57], v[8:9], v[46:47]
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v2, 0, s[4:5]
-; GFX950-NEXT:    v_accvgpr_write_b32 a0, v1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v3, v0, s[4:5]
-; GFX950-NEXT:    v_cndmask_b32_e64 v2, v58, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v3, v59, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v58, v58, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v59, v59, v2, vcc
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[8:9], v[46:47]
-; GFX950-NEXT:    s_waitcnt vmcnt(21)
-; GFX950-NEXT:    v_min_f64 v[46:47], v[10:11], v[44:45]
-; GFX950-NEXT:    v_cndmask_b32_e64 v4, v60, 0, s[0:1]
+; GFX950-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v1, v2, s[4:5]
 ; GFX950-NEXT:    v_cndmask_b32_e64 v8, v56, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v9, v57, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v9, v57, v2, vcc
+; GFX950-NEXT:    s_waitcnt vmcnt(21)
+; GFX950-NEXT:    v_min_f64 v[0:1], v[10:11], v[44:45]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[10:11], v[44:45]
+; GFX950-NEXT:    v_cndmask_b32_e64 v60, v60, 0, s[0:1]
+; GFX950-NEXT:    v_cndmask_b32_e64 v3, v61, v2, s[0:1]
+; GFX950-NEXT:    v_cndmask_b32_e64 v10, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v11, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(19)
-; GFX950-NEXT:    v_min_f64 v[44:45], v[12:13], v[42:43]
-; GFX950-NEXT:    v_cndmask_b32_e64 v5, v61, v0, s[0:1]
-; GFX950-NEXT:    v_cndmask_b32_e64 v10, v46, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v11, v47, v0, vcc
+; GFX950-NEXT:    v_min_f64 v[0:1], v[12:13], v[42:43]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[12:13], v[42:43]
+; GFX950-NEXT:    v_cndmask_b32_e64 v4, v62, 0, s[2:3]
+; GFX950-NEXT:    v_cndmask_b32_e64 v5, v63, v2, s[2:3]
+; GFX950-NEXT:    v_cndmask_b32_e64 v12, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v13, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(17)
-; GFX950-NEXT:    v_min_f64 v[42:43], v[14:15], v[40:41]
-; GFX950-NEXT:    v_cndmask_b32_e64 v6, v62, 0, s[2:3]
-; GFX950-NEXT:    v_cndmask_b32_e64 v12, v44, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v13, v45, v0, vcc
+; GFX950-NEXT:    v_min_f64 v[0:1], v[14:15], v[40:41]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[14:15], v[40:41]
+; GFX950-NEXT:    v_accvgpr_read_b32 v63, a15 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v62, a14 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v14, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v15, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(15)
-; GFX950-NEXT:    v_min_f64 v[40:41], v[16:17], v[54:55]
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v63, v0, s[2:3]
-; GFX950-NEXT:    v_cndmask_b32_e64 v14, v42, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v15, v43, v0, vcc
+; GFX950-NEXT:    v_min_f64 v[0:1], v[16:17], v[54:55]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[16:17], v[54:55]
+; GFX950-NEXT:    v_accvgpr_read_b32 v61, a13 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v57, a9 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v16, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v17, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(13)
-; GFX950-NEXT:    v_min_f64 v[54:55], v[18:19], v[52:53]
-; GFX950-NEXT:    v_accvgpr_read_b32 v63, a16 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v16, v40, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v17, v41, v0, vcc
+; GFX950-NEXT:    v_min_f64 v[0:1], v[18:19], v[52:53]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[18:19], v[52:53]
+; GFX950-NEXT:    v_accvgpr_read_b32 v56, a8 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v47, a7 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v18, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v19, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(11)
-; GFX950-NEXT:    v_min_f64 v[52:53], v[20:21], v[50:51]
-; GFX950-NEXT:    v_accvgpr_read_b32 v62, a15 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v18, v54, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v19, v55, v0, vcc
+; GFX950-NEXT:    v_min_f64 v[0:1], v[20:21], v[50:51]
 ; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[20:21], v[50:51]
+; GFX950-NEXT:    v_accvgpr_read_b32 v46, a6 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v45, a5 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v20, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v21, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(9)
-; GFX950-NEXT:    v_min_f64 v[50:51], v[22:23], v[34:35]
-; GFX950-NEXT:    v_accvgpr_read_b32 v61, a14 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v20, v52, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v21, v53, v0, vcc
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[22:23], v[34:35]
+; GFX950-NEXT:    v_min_f64 v[0:1], v[22:23], v[48:49]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[22:23], v[48:49]
+; GFX950-NEXT:    v_accvgpr_read_b32 v44, a4 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v43, a3 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v22, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v23, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(6)
-; GFX950-NEXT:    v_min_f64 v[34:35], v[24:25], v[32:33]
-; GFX950-NEXT:    v_accvgpr_read_b32 v60, a13 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v22, v50, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v23, v51, v0, vcc
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[24:25], v[32:33]
-; GFX950-NEXT:    v_accvgpr_read_b32 v59, a12 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v58, a11 ; Reload Reuse
-; GFX950-NEXT:    v_cndmask_b32_e64 v24, v34, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v25, v35, v0, vcc
-; GFX950-NEXT:    v_accvgpr_read_b32 v57, a10 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v56, a9 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v47, a8 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v46, a7 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v45, a6 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v44, a5 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v43, a4 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v42, a3 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v41, a2 ; Reload Reuse
-; GFX950-NEXT:    v_accvgpr_read_b32 v40, a1 ; Reload Reuse
+; GFX950-NEXT:    v_min_f64 v[0:1], v[24:25], v[34:35]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[24:25], v[34:35]
+; GFX950-NEXT:    v_accvgpr_read_b32 v42, a2 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v41, a1 ; Reload Reuse
+; GFX950-NEXT:    v_cndmask_b32_e64 v24, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v25, v1, v2, vcc
+; GFX950-NEXT:    v_accvgpr_read_b32 v40, a0 ; Reload Reuse
 ; GFX950-NEXT:    s_waitcnt vmcnt(4)
-; GFX950-NEXT:    v_min_f64 v[32:33], v[26:27], v[36:37]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[26:27], v[36:37]
+; GFX950-NEXT:    v_min_f64 v[0:1], v[26:27], v[32:33]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[26:27], v[32:33]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v26, v32, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v27, v33, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v26, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v27, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(2)
-; GFX950-NEXT:    v_min_f64 v[32:33], v[28:29], v[38:39]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[28:29], v[38:39]
+; GFX950-NEXT:    v_min_f64 v[0:1], v[28:29], v[36:37]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[28:29], v[36:37]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v28, v32, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v29, v33, v0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v28, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v29, v1, v2, vcc
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
-; GFX950-NEXT:    v_min_f64 v[32:33], v[30:31], v[48:49]
-; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[30:31], v[48:49]
+; GFX950-NEXT:    v_min_f64 v[0:1], v[30:31], v[38:39]
+; GFX950-NEXT:    v_cmp_u_f64_e32 vcc, v[30:31], v[38:39]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v30, v32, 0, vcc
-; GFX950-NEXT:    v_cndmask_b32_e32 v31, v33, v0, vcc
-; GFX950-NEXT:    v_accvgpr_read_b32 v0, a0
+; GFX950-NEXT:    v_cndmask_b32_e64 v30, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e32 v31, v1, v2, vcc
+; GFX950-NEXT:    v_mov_b32_e32 v0, v58
+; GFX950-NEXT:    v_mov_b32_e32 v1, v59
+; GFX950-NEXT:    v_mov_b32_e32 v2, v60
+; GFX950-NEXT:    v_accvgpr_read_b32 v60, a12 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v59, a11 ; Reload Reuse
+; GFX950-NEXT:    v_accvgpr_read_b32 v58, a10 ; Reload Reuse
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_minimum_v16f64:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x19
-; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:16
-; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
-; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:24
-; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:20
-; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:32
-; GFX10-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:28
+; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:8
+; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; GFX10-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:16
+; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:12
+; GFX10-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:24
+; GFX10-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:20
 ; GFX10-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:36
-; GFX10-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:68
-; GFX10-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:64
-; GFX10-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:60
-; GFX10-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:56
-; GFX10-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:52
-; GFX10-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:48
-; GFX10-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:44
+; GFX10-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:32
+; GFX10-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:28
+; GFX10-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:68
+; GFX10-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:64
+; GFX10-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:60
+; GFX10-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:56
+; GFX10-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:52
+; GFX10-NEXT:    buffer_load_dword v65, off, s[0:3], s32 offset:48
+; GFX10-NEXT:    buffer_load_dword v64, off, s[0:3], s32 offset:44
 ; GFX10-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:40
-; GFX10-NEXT:    buffer_load_dword v65, off, s[0:3], s32 offset:8
-; GFX10-NEXT:    buffer_load_dword v64, off, s[0:3], s32 offset:4
 ; GFX10-NEXT:    buffer_load_dword v66, off, s[0:3], s32 offset:100
 ; GFX10-NEXT:    buffer_load_dword v69, off, s[0:3], s32 offset:96
 ; GFX10-NEXT:    buffer_load_dword v68, off, s[0:3], s32 offset:92
@@ -2546,96 +2547,96 @@ define <16 x double> @v_minimum_v16f64(<16 x double> %src0, <16 x double> %src1)
 ; GFX10-NEXT:    buffer_load_dword v70, off, s[0:3], s32 offset:84
 ; GFX10-NEXT:    buffer_load_dword v81, off, s[0:3], s32 offset:80
 ; GFX10-NEXT:    buffer_load_dword v80, off, s[0:3], s32 offset:76
-; GFX10-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:72
+; GFX10-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:72
 ; GFX10-NEXT:    buffer_load_dword v67, off, s[0:3], s32 offset:104
 ; GFX10-NEXT:    s_waitcnt vmcnt(24)
-; GFX10-NEXT:    v_min_f64 v[82:83], v[2:3], v[31:32]
-; GFX10-NEXT:    v_cmp_u_f64_e32 vcc_lo, v[2:3], v[31:32]
+; GFX10-NEXT:    v_min_f64 v[82:83], v[0:1], v[31:32]
+; GFX10-NEXT:    v_cmp_u_f64_e32 vcc_lo, v[0:1], v[31:32]
 ; GFX10-NEXT:    s_waitcnt vmcnt(22)
-; GFX10-NEXT:    v_min_f64 v[84:85], v[4:5], v[33:34]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s4, v[4:5], v[33:34]
+; GFX10-NEXT:    v_min_f64 v[84:85], v[2:3], v[33:34]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s4, v[2:3], v[33:34]
 ; GFX10-NEXT:    s_clause 0x3
-; GFX10-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:120
-; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:116
-; GFX10-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:112
-; GFX10-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:108
+; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:120
+; GFX10-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:116
+; GFX10-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:112
+; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:108
 ; GFX10-NEXT:    s_waitcnt vmcnt(24)
-; GFX10-NEXT:    v_min_f64 v[32:33], v[6:7], v[35:36]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s5, v[6:7], v[35:36]
+; GFX10-NEXT:    v_min_f64 v[32:33], v[4:5], v[35:36]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s5, v[4:5], v[35:36]
 ; GFX10-NEXT:    s_clause 0x2
 ; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:128
-; GFX10-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:124
-; GFX10-NEXT:    s_waitcnt vmcnt(23)
-; GFX10-NEXT:    v_cmp_u_f64_e64 s10, v[14:15], v[50:51]
+; GFX10-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:128
+; GFX10-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:124
+; GFX10-NEXT:    s_waitcnt vmcnt(24)
+; GFX10-NEXT:    v_min_f64 v[34:35], v[6:7], v[48:49]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s6, v[6:7], v[48:49]
 ; GFX10-NEXT:    s_waitcnt vmcnt(21)
-; GFX10-NEXT:    v_cmp_u_f64_e64 s9, v[12:13], v[52:53]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s10, v[14:15], v[52:53]
 ; GFX10-NEXT:    s_waitcnt vmcnt(19)
-; GFX10-NEXT:    v_cmp_u_f64_e64 s7, v[10:11], v[54:55]
-; GFX10-NEXT:    s_waitcnt vmcnt(18)
-; GFX10-NEXT:    v_min_f64 v[34:35], v[8:9], v[37:38]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s6, v[8:9], v[37:38]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s9, v[12:13], v[54:55]
+; GFX10-NEXT:    s_waitcnt vmcnt(17)
+; GFX10-NEXT:    v_cmp_u_f64_e64 s8, v[10:11], v[64:65]
 ; GFX10-NEXT:    s_waitcnt vmcnt(16)
-; GFX10-NEXT:    v_min_f64 v[8:9], v[0:1], v[64:65]
-; GFX10-NEXT:    v_min_f64 v[36:37], v[10:11], v[54:55]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s8, v[0:1], v[64:65]
-; GFX10-NEXT:    v_min_f64 v[38:39], v[12:13], v[52:53]
-; GFX10-NEXT:    v_min_f64 v[52:53], v[14:15], v[50:51]
+; GFX10-NEXT:    v_min_f64 v[48:49], v[8:9], v[37:38]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s7, v[8:9], v[37:38]
+; GFX10-NEXT:    v_min_f64 v[36:37], v[10:11], v[64:65]
+; GFX10-NEXT:    v_min_f64 v[38:39], v[12:13], v[54:55]
+; GFX10-NEXT:    v_min_f64 v[54:55], v[14:15], v[52:53]
 ; GFX10-NEXT:    s_waitcnt vmcnt(11)
-; GFX10-NEXT:    v_min_f64 v[54:55], v[20:21], v[70:71]
+; GFX10-NEXT:    v_min_f64 v[64:65], v[20:21], v[70:71]
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s13, v[20:21], v[70:71]
 ; GFX10-NEXT:    s_waitcnt vmcnt(9)
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s12, v[18:19], v[80:81]
 ; GFX10-NEXT:    s_waitcnt vmcnt(8)
-; GFX10-NEXT:    v_min_f64 v[50:51], v[16:17], v[48:49]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s11, v[16:17], v[48:49]
-; GFX10-NEXT:    v_min_f64 v[48:49], v[18:19], v[80:81]
-; GFX10-NEXT:    v_min_f64 v[64:65], v[22:23], v[68:69]
+; GFX10-NEXT:    v_min_f64 v[52:53], v[16:17], v[50:51]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s11, v[16:17], v[50:51]
+; GFX10-NEXT:    v_min_f64 v[50:51], v[18:19], v[80:81]
+; GFX10-NEXT:    v_min_f64 v[70:71], v[22:23], v[68:69]
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s14, v[22:23], v[68:69]
 ; GFX10-NEXT:    s_waitcnt vmcnt(7)
 ; GFX10-NEXT:    v_min_f64 v[68:69], v[24:25], v[66:67]
 ; GFX10-NEXT:    v_cmp_u_f64_e64 s15, v[24:25], v[66:67]
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v36, 0, s7
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v8, 0, s8
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v9, 0x7ff80000, s8
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v34, 0, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v35, 0x7ff80000, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v37, 0x7ff80000, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v34, 0, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v35, 0x7ff80000, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v48, 0, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v49, 0x7ff80000, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v36, 0, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v37, 0x7ff80000, s8
 ; GFX10-NEXT:    v_cndmask_b32_e64 v12, v38, 0, s9
 ; GFX10-NEXT:    v_cndmask_b32_e64 v13, v39, 0x7ff80000, s9
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v52, 0, s10
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v53, 0x7ff80000, s10
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, v50, 0, s11
-; GFX10-NEXT:    v_cndmask_b32_e64 v17, v51, 0x7ff80000, s11
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v48, 0, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v19, v49, 0x7ff80000, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v20, v54, 0, s13
-; GFX10-NEXT:    v_cndmask_b32_e64 v21, v55, 0x7ff80000, s13
-; GFX10-NEXT:    v_cndmask_b32_e64 v22, v64, 0, s14
-; GFX10-NEXT:    v_cndmask_b32_e64 v23, v65, 0x7ff80000, s14
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v54, 0, s10
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v55, 0x7ff80000, s10
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v52, 0, s11
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v53, 0x7ff80000, s11
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v50, 0, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v51, 0x7ff80000, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v20, v64, 0, s13
+; GFX10-NEXT:    v_cndmask_b32_e64 v21, v65, 0x7ff80000, s13
+; GFX10-NEXT:    v_cndmask_b32_e64 v22, v70, 0, s14
+; GFX10-NEXT:    v_cndmask_b32_e64 v23, v71, 0x7ff80000, s14
 ; GFX10-NEXT:    v_cndmask_b32_e64 v24, v68, 0, s15
 ; GFX10-NEXT:    v_cndmask_b32_e64 v25, v69, 0x7ff80000, s15
 ; GFX10-NEXT:    s_waitcnt vmcnt(5)
-; GFX10-NEXT:    v_min_f64 v[70:71], v[28:29], v[2:3]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s17, v[28:29], v[2:3]
+; GFX10-NEXT:    v_min_f64 v[80:81], v[28:29], v[0:1]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s17, v[28:29], v[0:1]
 ; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    v_min_f64 v[66:67], v[26:27], v[4:5]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s16, v[26:27], v[4:5]
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v82, 0, vcc_lo
+; GFX10-NEXT:    v_min_f64 v[66:67], v[26:27], v[2:3]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s16, v[26:27], v[2:3]
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v82, 0, vcc_lo
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_min_f64 v[80:81], v[30:31], v[6:7]
-; GFX10-NEXT:    v_cmp_u_f64_e64 s18, v[30:31], v[6:7]
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v83, 0x7ff80000, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v84, 0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v85, 0x7ff80000, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v32, 0, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v33, 0x7ff80000, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v28, v70, 0, s17
-; GFX10-NEXT:    v_cndmask_b32_e64 v29, v71, 0x7ff80000, s17
+; GFX10-NEXT:    v_min_f64 v[86:87], v[30:31], v[4:5]
+; GFX10-NEXT:    v_cmp_u_f64_e64 s18, v[30:31], v[4:5]
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v83, 0x7ff80000, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v84, 0, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v85, 0x7ff80000, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v32, 0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v33, 0x7ff80000, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v28, v80, 0, s17
+; GFX10-NEXT:    v_cndmask_b32_e64 v29, v81, 0x7ff80000, s17
 ; GFX10-NEXT:    v_cndmask_b32_e64 v26, v66, 0, s16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v27, v67, 0x7ff80000, s16
-; GFX10-NEXT:    v_cndmask_b32_e64 v30, v80, 0, s18
-; GFX10-NEXT:    v_cndmask_b32_e64 v31, v81, 0x7ff80000, s18
+; GFX10-NEXT:    v_cndmask_b32_e64 v30, v86, 0, s18
+; GFX10-NEXT:    v_cndmask_b32_e64 v31, v87, 0x7ff80000, s18
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_minimum_v16f64:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.ll
index c29362898f40e..42671f9dd6747 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.ll
@@ -651,10 +651,10 @@ define amdgpu_kernel void @round_v8f32(ptr addrspace(1) %out, <8 x float> %in) #
 ; GFX11-NEXT:    v_dual_sub_f32 v2, s11, v0 :: v_dual_sub_f32 v3, s10, v1
 ; GFX11-NEXT:    v_sub_f32_e32 v7, s9, v4
 ; GFX11-NEXT:    v_trunc_f32_e32 v9, s13
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_sub_f32_e32 v12, s15, v5
-; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v2|, 0.5
 ; GFX11-NEXT:    v_sub_f32_e32 v11, s8, v8
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v2|, 0.5
+; GFX11-NEXT:    v_sub_f32_e32 v12, s15, v5
 ; GFX11-NEXT:    v_trunc_f32_e32 v6, s14
 ; GFX11-NEXT:    v_sub_f32_e32 v14, s13, v9
 ; GFX11-NEXT:    v_trunc_f32_e32 v10, s12
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index a9240eff8e691..67c2ee6403558 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -1697,7 +1697,6 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_bfe_u32 s7, s2, 0x10009
 ; GFX12-NEXT:    s_bfe_u32 s8, s2, 0x1000d
 ; GFX12-NEXT:    s_and_b32 s9, s2, 1
-; GFX12-NEXT:    v_mov_b32_e32 v1, s8
 ; GFX12-NEXT:    s_bfe_u32 s10, s2, 0x1000a
 ; GFX12-NEXT:    s_bfe_u32 s2, s2, 0x1000c
 ; GFX12-NEXT:    s_bfe_u32 s11, s6, 0x10005
@@ -1709,6 +1708,7 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_bfe_u32 s17, s6, 0x10008
 ; GFX12-NEXT:    s_bfe_u32 s6, s6, 0x1000e
 ; GFX12-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v5, s7
+; GFX12-NEXT:    v_mov_b32_e32 v1, s8
 ; GFX12-NEXT:    v_dual_mov_b32 v15, s3 :: v_dual_mov_b32 v2, s6
 ; GFX12-NEXT:    v_dual_mov_b32 v3, s13 :: v_dual_mov_b32 v4, s17
 ; GFX12-NEXT:    v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v11, s5
@@ -2266,8 +2266,8 @@ define amdgpu_kernel void @constant_zextload_v32i1_to_v32i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s12 :: v_dual_mov_b32 v0, s27
-; GFX12-NEXT:    v_dual_mov_b32 v3, s11 :: v_dual_mov_b32 v2, s26
+; GFX12-NEXT:    v_dual_mov_b32 v0, s27 :: v_dual_mov_b32 v3, s11
+; GFX12-NEXT:    v_dual_mov_b32 v1, s12 :: v_dual_mov_b32 v2, s26
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s10 :: v_dual_mov_b32 v4, s25
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s9 :: v_dual_mov_b32 v6, s24
 ; GFX12-NEXT:    v_dual_mov_b32 v13, s8 :: v_dual_mov_b32 v12, s23
@@ -2668,8 +2668,8 @@ define amdgpu_kernel void @constant_sextload_v32i1_to_v32i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s21 :: v_dual_mov_b32 v0, s22
-; GFX12-NEXT:    v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v2, s20
+; GFX12-NEXT:    v_dual_mov_b32 v0, s22 :: v_dual_mov_b32 v3, s19
+; GFX12-NEXT:    v_dual_mov_b32 v1, s21 :: v_dual_mov_b32 v2, s20
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s18
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s15 :: v_dual_mov_b32 v6, s16
 ; GFX12-NEXT:    v_dual_mov_b32 v13, s13 :: v_dual_mov_b32 v12, s14
@@ -3314,8 +3314,8 @@ define amdgpu_kernel void @constant_zextload_v64i1_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v0, s3
-; GFX12-NEXT:    v_dual_mov_b32 v3, s25 :: v_dual_mov_b32 v2, s57
+; GFX12-NEXT:    v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v3, s25
+; GFX12-NEXT:    v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v2, s57
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s24 :: v_dual_mov_b32 v4, s56
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s23 :: v_dual_mov_b32 v6, s55
 ; GFX12-NEXT:    v_mov_b32_e32 v9, s22
@@ -3367,8 +3367,8 @@ define amdgpu_kernel void @constant_zextload_v64i1_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v0, s47
-; GFX12-NEXT:    v_dual_mov_b32 v3, s14 :: v_dual_mov_b32 v2, s45
+; GFX12-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v3, s14
+; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s45
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s13 :: v_dual_mov_b32 v4, s44
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s12 :: v_dual_mov_b32 v6, s43
 ; GFX12-NEXT:    v_dual_mov_b32 v9, s11 :: v_dual_mov_b32 v8, s42
@@ -4075,8 +4075,8 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s50 :: v_dual_mov_b32 v0, s3
-; GFX12-NEXT:    v_dual_mov_b32 v3, s48 :: v_dual_mov_b32 v2, s49
+; GFX12-NEXT:    v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v3, s48
+; GFX12-NEXT:    v_dual_mov_b32 v1, s50 :: v_dual_mov_b32 v2, s49
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s46 :: v_dual_mov_b32 v4, s47
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s44 :: v_dual_mov_b32 v6, s45
 ; GFX12-NEXT:    v_mov_b32_e32 v9, s42
@@ -4128,8 +4128,8 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v0, s27
-; GFX12-NEXT:    v_dual_mov_b32 v3, s24 :: v_dual_mov_b32 v2, s25
+; GFX12-NEXT:    v_dual_mov_b32 v0, s27 :: v_dual_mov_b32 v3, s24
+; GFX12-NEXT:    v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v2, s25
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s22 :: v_dual_mov_b32 v4, s23
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s20 :: v_dual_mov_b32 v6, s21
 ; GFX12-NEXT:    v_dual_mov_b32 v9, s18 :: v_dual_mov_b32 v8, s19
@@ -5653,14 +5653,13 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    global_load_u16 v0, v1, s[2:3]
 ; GFX12-NEXT:    s_wait_loadcnt 0x0
-; GFX12-NEXT:    v_and_b32_e32 v4, 0xffff, v0
 ; GFX12-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX12-NEXT:    v_mov_b32_e32 v7, v1
+; GFX12-NEXT:    v_dual_mov_b32 v7, v1 :: v_dual_and_b32 v4, 0xffff, v0
 ; GFX12-NEXT:    v_mov_b32_e32 v11, v1
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT:    v_bfe_u32 v2, v4, 11, 1
 ; GFX12-NEXT:    s_bfe_u32 s3, s2, 0x1000a
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-NEXT:    v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v0, s3
+; GFX12-NEXT:    v_bfe_u32 v2, v4, 11, 1
 ; GFX12-NEXT:    s_bfe_u32 s3, s2, 0x1000d
 ; GFX12-NEXT:    s_bfe_u32 s4, s2, 0x1000c
 ; GFX12-NEXT:    v_mov_b32_e32 v5, v1
@@ -7229,8 +7228,8 @@ define amdgpu_kernel void @constant_sextload_v32i1_to_v32i64(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s27 :: v_dual_mov_b32 v0, s26
-; GFX12-NEXT:    v_dual_mov_b32 v3, s51 :: v_dual_mov_b32 v2, s50
+; GFX12-NEXT:    v_dual_mov_b32 v0, s26 :: v_dual_mov_b32 v3, s51
+; GFX12-NEXT:    v_dual_mov_b32 v1, s27 :: v_dual_mov_b32 v2, s50
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s53
 ; GFX12-NEXT:    s_lshr_b32 s30, s2, 12
 ; GFX12-NEXT:    s_lshr_b32 s28, s2, 13
@@ -7273,8 +7272,8 @@ define amdgpu_kernel void @constant_sextload_v32i1_to_v32i64(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s25 :: v_dual_mov_b32 v0, s24
-; GFX12-NEXT:    v_dual_mov_b32 v3, s23 :: v_dual_mov_b32 v2, s22
+; GFX12-NEXT:    v_dual_mov_b32 v0, s24 :: v_dual_mov_b32 v3, s23
+; GFX12-NEXT:    v_dual_mov_b32 v1, s25 :: v_dual_mov_b32 v2, s22
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX12-NEXT:    s_lshr_b32 s68, s2, 1
 ; GFX12-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x10000
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index 817c5def5614f..32c86c094aaa3 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -661,10 +661,10 @@ define amdgpu_kernel void @constant_load_v16i16_align2(ptr addrspace(4) %ptr0) #
 ; GCN-NOHSA-VI-NEXT:    flat_load_ushort v17, v[2:3]
 ; GCN-NOHSA-VI-NEXT:    flat_load_ushort v18, v[4:5]
 ; GCN-NOHSA-VI-NEXT:    flat_load_ushort v19, v[6:7]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v20, v[8:9]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v21, v[10:11]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v12, v[12:13]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v13, v[14:15]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v8, v[8:9]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v9, v[10:11]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v10, v[12:13]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v11, v[14:15]
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s3
@@ -681,27 +681,27 @@ define amdgpu_kernel void @constant_load_v16i16_align2(ptr addrspace(4) %ptr0) #
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s3
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s2
 ; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 18
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v12, v[0:1]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v13, v[2:3]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v14, v[4:5]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v15, v[6:7]
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, s3
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v8, s2
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 16
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, s3
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v10, s2
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s3
 ; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 2
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v14, v[0:1]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v15, v[2:3]
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v4, v[4:5]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v5, v[6:7]
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v8, v[8:9]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v9, v[10:11]
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s1
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s0
 ; GCN-NOHSA-VI-NEXT:    flat_load_ushort v0, v[0:1]
-; GCN-NOHSA-VI-NEXT:    flat_load_ushort v10, v[2:3]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v20, v[2:3]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v21, v[4:5]
+; GCN-NOHSA-VI-NEXT:    flat_load_ushort v22, v[6:7]
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(14)
 ; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v16
 ; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v3, v17, v1
@@ -710,29 +710,29 @@ define amdgpu_kernel void @constant_load_v16i16_align2(ptr addrspace(4) %ptr0) #
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(12)
 ; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v2, v19, v1
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(11)
-; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v20
+; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v8
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(10)
-; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v1, v21, v1
+; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v1, v9, v1
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(9)
-; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v6, 16, v12
+; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v4, 16, v10
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(8)
-; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v7, v13, v6
+; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v7, v11, v4
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(7)
-; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v6, 16, v14
+; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v4, 16, v12
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(6)
-; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v6, v15, v6
+; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v6, v13, v4
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(5)
-; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v4, 16, v14
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(4)
-; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v5, v5, v4
+; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v5, v15, v4
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v4, 16, v8
+; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v4, v9, v4
+; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v4, v20, v0
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NOHSA-VI-NEXT:    v_lshlrev_b32_e32 v0, 16, v21
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v0, v10, v0
+; GCN-NOHSA-VI-NEXT:    v_or_b32_e32 v0, v22, v0
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[0:1], v[0:3]
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
@@ -3051,8 +3051,8 @@ define amdgpu_kernel void @constant_zextload_v32i16_to_v32i32(ptr addrspace(1) %
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[16:17] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[16:17] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s27 :: v_dual_mov_b32 v0, s8
-; GFX12-NEXT:    v_dual_mov_b32 v3, s26 :: v_dual_mov_b32 v2, s9
+; GFX12-NEXT:    v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v3, s26
+; GFX12-NEXT:    v_dual_mov_b32 v1, s27 :: v_dual_mov_b32 v2, s9
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s25
 ; GFX12-NEXT:    s_lshr_b32 s20, s3, 16
 ; GFX12-NEXT:    s_and_b32 s3, s3, 0xffff
@@ -3545,8 +3545,8 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i32(ptr addrspace(1) %
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[16:17] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[16:17] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s27 :: v_dual_mov_b32 v0, s8
-; GFX12-NEXT:    v_dual_mov_b32 v3, s26 :: v_dual_mov_b32 v2, s9
+; GFX12-NEXT:    v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v3, s26
+; GFX12-NEXT:    v_dual_mov_b32 v1, s27 :: v_dual_mov_b32 v2, s9
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s25
 ; GFX12-NEXT:    s_ashr_i32 s20, s3, 16
 ; GFX12-NEXT:    s_ashr_i32 s21, s2, 16
@@ -4415,8 +4415,8 @@ define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[36:37] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[36:37] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[36:37] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s58 :: v_dual_mov_b32 v0, s6
-; GFX12-NEXT:    v_dual_mov_b32 v3, s57 :: v_dual_mov_b32 v2, s7
+; GFX12-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v3, s57
+; GFX12-NEXT:    v_dual_mov_b32 v1, s58 :: v_dual_mov_b32 v2, s7
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s56
 ; GFX12-NEXT:    s_lshr_b32 s51, s1, 16
 ; GFX12-NEXT:    s_lshr_b32 s52, s0, 16
@@ -4458,8 +4458,8 @@ define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[36:37] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[36:37] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[36:37] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v0, s26
-; GFX12-NEXT:    v_dual_mov_b32 v3, s45 :: v_dual_mov_b32 v2, s27
+; GFX12-NEXT:    v_dual_mov_b32 v0, s26 :: v_dual_mov_b32 v3, s45
+; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s27
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s44
 ; GFX12-NEXT:    s_lshr_b32 s39, s21, 16
 ; GFX12-NEXT:    s_lshr_b32 s40, s20, 16
@@ -5353,8 +5353,8 @@ define amdgpu_kernel void @constant_sextload_v64i16_to_v64i32(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[36:37] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[36:37] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[36:37] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s58 :: v_dual_mov_b32 v0, s6
-; GFX12-NEXT:    v_dual_mov_b32 v3, s57 :: v_dual_mov_b32 v2, s7
+; GFX12-NEXT:    v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v3, s57
+; GFX12-NEXT:    v_dual_mov_b32 v1, s58 :: v_dual_mov_b32 v2, s7
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s56
 ; GFX12-NEXT:    s_ashr_i32 s51, s1, 16
 ; GFX12-NEXT:    s_ashr_i32 s52, s0, 16
@@ -5397,8 +5397,8 @@ define amdgpu_kernel void @constant_sextload_v64i16_to_v64i32(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[36:37] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[36:37] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[36:37] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v0, s26
-; GFX12-NEXT:    v_dual_mov_b32 v3, s45 :: v_dual_mov_b32 v2, s27
+; GFX12-NEXT:    v_dual_mov_b32 v0, s26 :: v_dual_mov_b32 v3, s45
+; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s27
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s44
 ; GFX12-NEXT:    s_ashr_i32 s39, s21, 16
 ; GFX12-NEXT:    s_ashr_i32 s40, s20, 16
@@ -7610,8 +7610,8 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:80
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:64
-; GFX12-NEXT:    v_dual_mov_b32 v1, s17 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT:    v_dual_mov_b32 v3, s7 :: v_dual_mov_b32 v2, s6
+; GFX12-NEXT:    v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v3, s7
+; GFX12-NEXT:    v_dual_mov_b32 v1, s17 :: v_dual_mov_b32 v2, s6
 ; GFX12-NEXT:    v_dual_mov_b32 v9, s13 :: v_dual_mov_b32 v8, s12
 ; GFX12-NEXT:    v_dual_mov_b32 v11, s15 :: v_dual_mov_b32 v10, s14
 ; GFX12-NEXT:    v_dual_mov_b32 v21, s3 :: v_dual_mov_b32 v20, s2
@@ -9128,9 +9128,9 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[16:17] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[16:17] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[16:17] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s51 :: v_dual_mov_b32 v0, s50
 ; GFX12-NEXT:    s_wait_alu 0xfffe
-; GFX12-NEXT:    v_dual_mov_b32 v3, s13 :: v_dual_mov_b32 v2, s12
+; GFX12-NEXT:    v_dual_mov_b32 v0, s50 :: v_dual_mov_b32 v3, s13
+; GFX12-NEXT:    v_dual_mov_b32 v1, s51 :: v_dual_mov_b32 v2, s12
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s57 :: v_dual_mov_b32 v4, s56
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s49 :: v_dual_mov_b32 v6, s48
 ; GFX12-NEXT:    v_dual_mov_b32 v9, s45 :: v_dual_mov_b32 v8, s44
@@ -9148,8 +9148,8 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[16:17] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[16:17] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[16:17] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s31 :: v_dual_mov_b32 v0, s30
-; GFX12-NEXT:    v_dual_mov_b32 v3, s29 :: v_dual_mov_b32 v2, s28
+; GFX12-NEXT:    v_dual_mov_b32 v0, s30 :: v_dual_mov_b32 v3, s29
+; GFX12-NEXT:    v_dual_mov_b32 v1, s31 :: v_dual_mov_b32 v2, s28
 ; GFX12-NEXT:    v_dual_mov_b32 v5, s3 :: v_dual_mov_b32 v4, s2
 ; GFX12-NEXT:    v_dual_mov_b32 v7, s27 :: v_dual_mov_b32 v6, s26
 ; GFX12-NEXT:    v_dual_mov_b32 v9, s25 :: v_dual_mov_b32 v8, s24
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
index 68a6a148819e8..d86402a6fb62e 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
@@ -3197,8 +3197,8 @@ define amdgpu_kernel void @constant_sextload_v16i32_to_v16i64(ptr addrspace(1) %
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v28, v[0:3], s[16:17] offset:112
 ; GFX12-NEXT:    global_store_b128 v28, v[4:7], s[16:17] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s19 :: v_dual_mov_b32 v0, s0
-; GFX12-NEXT:    v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v2, s1
+; GFX12-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s18
+; GFX12-NEXT:    v_dual_mov_b32 v1, s19 :: v_dual_mov_b32 v2, s1
 ; GFX12-NEXT:    s_clause 0x5
 ; GFX12-NEXT:    global_store_b128 v28, v[8:11], s[16:17] offset:80
 ; GFX12-NEXT:    global_store_b128 v28, v[12:15], s[16:17] offset:64
@@ -4401,9 +4401,9 @@ define amdgpu_kernel void @constant_sextload_v32i32_to_v32i64(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[36:37] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[36:37] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[36:37] offset:192
+; GFX12-NEXT:    v_dual_mov_b32 v0, s22 :: v_dual_mov_b32 v3, s57
 ; GFX12-NEXT:    s_wait_alu 0xfffe
-; GFX12-NEXT:    v_dual_mov_b32 v1, s24 :: v_dual_mov_b32 v0, s22
-; GFX12-NEXT:    v_dual_mov_b32 v3, s57 :: v_dual_mov_b32 v2, s23
+; GFX12-NEXT:    v_dual_mov_b32 v1, s24 :: v_dual_mov_b32 v2, s23
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s56
 ; GFX12-NEXT:    s_ashr_i32 s51, s17, 31
 ; GFX12-NEXT:    s_ashr_i32 s52, s16, 31
@@ -4433,8 +4433,8 @@ define amdgpu_kernel void @constant_sextload_v32i32_to_v32i64(ptr addrspace(1) %
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[36:37] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[36:37] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[36:37] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v0, s10
-; GFX12-NEXT:    v_dual_mov_b32 v3, s45 :: v_dual_mov_b32 v2, s11
+; GFX12-NEXT:    v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v3, s45
+; GFX12-NEXT:    v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s11
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s44
 ; GFX12-NEXT:    s_ashr_i32 s39, s5, 31
 ; GFX12-NEXT:    s_ashr_i32 s40, s4, 31
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index 3b0f8523e1b52..88beb0683f8e0 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -2827,8 +2827,8 @@ define amdgpu_kernel void @constant_zextload_v32i8_to_v32i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s19 :: v_dual_mov_b32 v0, s30
-; GFX12-NEXT:    v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v2, s8
+; GFX12-NEXT:    v_dual_mov_b32 v0, s30 :: v_dual_mov_b32 v3, s18
+; GFX12-NEXT:    v_dual_mov_b32 v1, s19 :: v_dual_mov_b32 v2, s8
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s17
 ; GFX12-NEXT:    s_lshr_b32 s12, s5, 24
 ; GFX12-NEXT:    s_bfe_u32 s13, s5, 0x80008
@@ -3335,8 +3335,8 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s24 :: v_dual_mov_b32 v0, s8
-; GFX12-NEXT:    v_dual_mov_b32 v3, s22 :: v_dual_mov_b32 v2, s23
+; GFX12-NEXT:    v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v3, s22
+; GFX12-NEXT:    v_dual_mov_b32 v1, s24 :: v_dual_mov_b32 v2, s23
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX12-NEXT:    s_ashr_i32 s13, s5, 24
 ; GFX12-NEXT:    s_bfe_i32 s14, s5, 0x80010
@@ -4203,8 +4203,8 @@ define amdgpu_kernel void @constant_zextload_v64i8_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[16:17] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[16:17] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[16:17] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s42 :: v_dual_mov_b32 v0, s62
-; GFX12-NEXT:    v_dual_mov_b32 v3, s41 :: v_dual_mov_b32 v2, s11
+; GFX12-NEXT:    v_dual_mov_b32 v0, s62 :: v_dual_mov_b32 v3, s41
+; GFX12-NEXT:    v_dual_mov_b32 v1, s42 :: v_dual_mov_b32 v2, s11
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s40
 ; GFX12-NEXT:    s_lshr_b32 s35, s8, 24
 ; GFX12-NEXT:    s_bfe_u32 s36, s8, 0x80008
@@ -4247,8 +4247,8 @@ define amdgpu_kernel void @constant_zextload_v64i8_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[16:17] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[16:17] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[16:17] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v0, s56
-; GFX12-NEXT:    v_dual_mov_b32 v3, s28 :: v_dual_mov_b32 v2, s5
+; GFX12-NEXT:    v_dual_mov_b32 v0, s56 :: v_dual_mov_b32 v3, s28
+; GFX12-NEXT:    v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v2, s5
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s27
 ; GFX12-NEXT:    s_lshr_b32 s22, s2, 24
 ; GFX12-NEXT:    s_bfe_u32 s23, s2, 0x80008
@@ -5163,8 +5163,8 @@ define amdgpu_kernel void @constant_sextload_v64i8_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[16:17] offset:224
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[16:17] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[16:17] offset:192
-; GFX12-NEXT:    v_dual_mov_b32 v1, s54 :: v_dual_mov_b32 v0, s11
-; GFX12-NEXT:    v_dual_mov_b32 v3, s52 :: v_dual_mov_b32 v2, s53
+; GFX12-NEXT:    v_dual_mov_b32 v0, s11 :: v_dual_mov_b32 v3, s52
+; GFX12-NEXT:    v_dual_mov_b32 v1, s54 :: v_dual_mov_b32 v2, s53
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s51
 ; GFX12-NEXT:    s_ashr_i32 s43, s8, 24
 ; GFX12-NEXT:    s_bfe_i32 s44, s8, 0x80010
@@ -5207,8 +5207,8 @@ define amdgpu_kernel void @constant_sextload_v64i8_to_v64i32(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[16:17] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[16:17] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[16:17] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s36 :: v_dual_mov_b32 v0, s5
-; GFX12-NEXT:    v_dual_mov_b32 v3, s34 :: v_dual_mov_b32 v2, s35
+; GFX12-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s34
+; GFX12-NEXT:    v_dual_mov_b32 v1, s36 :: v_dual_mov_b32 v2, s35
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s33
 ; GFX12-NEXT:    s_ashr_i32 s24, s2, 24
 ; GFX12-NEXT:    s_bfe_i32 s25, s2, 0x80010
@@ -7467,8 +7467,8 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX12-NEXT:    s_clause 0x1
 ; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s23 :: v_dual_mov_b32 v0, s22
-; GFX12-NEXT:    v_dual_mov_b32 v3, s21 :: v_dual_mov_b32 v2, s20
+; GFX12-NEXT:    v_dual_mov_b32 v0, s22 :: v_dual_mov_b32 v3, s21
+; GFX12-NEXT:    v_dual_mov_b32 v1, s23 :: v_dual_mov_b32 v2, s20
 ; GFX12-NEXT:    v_dual_mov_b32 v9, s25 :: v_dual_mov_b32 v8, s24
 ; GFX12-NEXT:    v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v10, s26
 ; GFX12-NEXT:    v_dual_mov_b32 v21, s31 :: v_dual_mov_b32 v20, s30
@@ -9002,8 +9002,8 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[8:9] offset:208
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9] offset:192
 ; GFX12-NEXT:    s_wait_alu 0xfffe
-; GFX12-NEXT:    v_dual_mov_b32 v1, s37 :: v_dual_mov_b32 v0, s36
-; GFX12-NEXT:    v_dual_mov_b32 v3, s71 :: v_dual_mov_b32 v2, s70
+; GFX12-NEXT:    v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v3, s71
+; GFX12-NEXT:    v_dual_mov_b32 v1, s37 :: v_dual_mov_b32 v2, s70
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s53
 ; GFX12-NEXT:    s_lshr_b32 s34, s3, 8
 ; GFX12-NEXT:    s_mov_b32 s30, s3
@@ -9044,8 +9044,8 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9] offset:128
 ; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[8:9] offset:112
 ; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[8:9] offset:96
-; GFX12-NEXT:    v_dual_mov_b32 v1, s25 :: v_dual_mov_b32 v0, s24
-; GFX12-NEXT:    v_dual_mov_b32 v3, s23 :: v_dual_mov_b32 v2, s22
+; GFX12-NEXT:    v_dual_mov_b32 v0, s24 :: v_dual_mov_b32 v3, s23
+; GFX12-NEXT:    v_dual_mov_b32 v1, s25 :: v_dual_mov_b32 v2, s22
 ; GFX12-NEXT:    v_mov_b32_e32 v5, s17
 ; GFX12-NEXT:    s_lshr_b32 s68, s0, 8
 ; GFX12-NEXT:    s_bfe_i64 s[6:7], s[62:63], 0x80000
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index b19486b0e7671..94dc980e6b5cc 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -3658,8 +3658,8 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s9
 ; GCN-HSA-NEXT:    s_add_u32 s10, s2, 64
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s8
-; GCN-HSA-NEXT:    flat_load_dwordx4 v[20:23], v[16:17]
 ; GCN-HSA-NEXT:    s_addc_u32 s11, s3, 0
+; GCN-HSA-NEXT:    flat_load_dwordx4 v[20:23], v[16:17]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s11
 ; GCN-HSA-NEXT:    s_add_u32 s10, s2, 0x50
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
index e8c862a3cb93c..e55fb2cac0985 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
@@ -3661,66 +3661,57 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
 ; GCN-GFX900-HSA-NEXT:    s_mov_b64 s[22:23], s[2:3]
 ; GCN-GFX900-HSA-NEXT:    s_mov_b64 s[20:21], s[0:1]
 ; GCN-GFX900-HSA-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-GFX900-HSA-NEXT:    s_add_u32 s20, s20, s17
 ; GCN-GFX900-HSA-NEXT:    s_addc_u32 s21, s21, 0
 ; GCN-GFX900-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[0:3], v8, s[2:3] offset:96
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[4:7], v8, s[2:3] offset:112
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[9:12], v8, s[2:3] offset:80
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[13:16], v8, s[2:3] offset:64
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[17:20], v8, s[2:3] offset:48
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[21:24], v8, s[2:3] offset:32
-; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(5)
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v28, 31, v3
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v2
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v25, v2
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v27, v3
-; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(4)
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[4:7], v12, s[2:3] offset:96
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[8:11], v12, s[2:3] offset:112
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[25:28], v12, s[2:3] offset:80
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[13:16], v12, s[2:3] offset:64
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[17:20], v12, s[2:3] offset:48
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[21:24], v12, s[2:3] offset:32
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[0:3], v12, s[2:3] offset:16
+; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(6)
 ; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v32, 31, v7
 ; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v30, 31, v6
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v36, 31, v5
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v34, 31, v4
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v33, v4
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v35, v5
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v29, v6
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v31, v7
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v0
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v4, v0
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v6, v1
-; GCN-GFX900-HSA-NEXT:    buffer_store_dword v25, off, s[20:23], 0 ; 4-byte Folded Spill
+; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(5)
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v36, 31, v11
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v34, 31, v10
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v40, 31, v9
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v38, 31, v8
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v37, v8
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v39, v9
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v33, v10
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v35, v11
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v5
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v4
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v10, v5
+; GCN-GFX900-HSA-NEXT:    buffer_store_dword v29, off, s[20:23], 0 ; 4-byte Folded Spill
 ; GCN-GFX900-HSA-NEXT:    s_nop 0
-; GCN-GFX900-HSA-NEXT:    buffer_store_dword v26, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill
-; GCN-GFX900-HSA-NEXT:    buffer_store_dword v27, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill
-; GCN-GFX900-HSA-NEXT:    buffer_store_dword v28, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill
+; GCN-GFX900-HSA-NEXT:    buffer_store_dword v30, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill
+; GCN-GFX900-HSA-NEXT:    buffer_store_dword v31, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill
+; GCN-GFX900-HSA-NEXT:    buffer_store_dword v32, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill
 ; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(7)
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v28, 31, v12
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v11
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v40, 31, v10
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v38, 31, v9
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v37, v9
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v39, v10
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v25, v11
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v27, v12
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v16
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v15
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v48, 31, v14
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v46, 31, v13
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v45, v13
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v47, v14
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v4, v15
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v6, v16
 ; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(6)
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v16
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v15
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v44, 31, v14
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v42, 31, v13
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v41, v13
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v43, v14
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v9, v15
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v11, v16
-; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(5)
 ; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v20
 ; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v19
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v48, 31, v18
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v46, 31, v17
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v45, v17
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v47, v18
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v52, 31, v18
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v50, 31, v17
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v49, v17
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v51, v18
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v13, v19
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[49:52], v8, s[2:3] offset:16
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v15, v20
 ; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(5)
 ; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v20, 31, v24
@@ -3731,96 +3722,104 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v55, v22
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v17, v23
 ; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v19, v24
-; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[21:24], v8, s[2:3]
+; GCN-GFX900-HSA-NEXT:    global_load_dwordx4 v[21:24], v12, s[2:3]
 ; GCN-GFX900-HSA-NEXT:    s_nop 0
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[33:36], s[0:1] offset:224
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[29:32], s[0:1] offset:240
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:192
-; GCN-GFX900-HSA-NEXT:    buffer_load_dword v32, off, s[20:23], 0 ; 4-byte Folded Reload
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[37:40], s[0:1] offset:224
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[33:36], s[0:1] offset:240
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[8:11], s[0:1] offset:192
+; GCN-GFX900-HSA-NEXT:    buffer_load_dword v33, off, s[20:23], 0 ; 4-byte Folded Reload
 ; GCN-GFX900-HSA-NEXT:    s_nop 0
-; GCN-GFX900-HSA-NEXT:    buffer_load_dword v33, off, s[20:23], 0 offset:4 ; 4-byte Folded Reload
-; GCN-GFX900-HSA-NEXT:    buffer_load_dword v34, off, s[20:23], 0 offset:8 ; 4-byte Folded Reload
-; GCN-GFX900-HSA-NEXT:    buffer_load_dword v35, off, s[20:23], 0 offset:12 ; 4-byte Folded Reload
-; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(8)
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v60, 31, v52
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v58, 31, v51
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v50
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v49
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v0, v49
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v2, v50
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v57, v51
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v59, v52
+; GCN-GFX900-HSA-NEXT:    buffer_load_dword v34, off, s[20:23], 0 offset:4 ; 4-byte Folded Reload
+; GCN-GFX900-HSA-NEXT:    buffer_load_dword v35, off, s[20:23], 0 offset:8 ; 4-byte Folded Reload
+; GCN-GFX900-HSA-NEXT:    buffer_load_dword v36, off, s[20:23], 0 offset:12 ; 4-byte Folded Reload
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v32, 31, v28
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v30, 31, v27
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v44, 31, v26
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v42, 31, v25
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v41, v25
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v43, v26
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v29, v27
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v31, v28
+; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(12)
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v60, 31, v3
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v58, 31, v2
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v28, 31, v1
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v0
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v25, v0
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v27, v1
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v57, v2
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v59, v3
 ; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(7)
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v31, 31, v24
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v29, 31, v23
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v22
-; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v21
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v4, v21
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v6, v22
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v24
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v23
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v0, v23
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v2, v24
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v22
+; GCN-GFX900-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v21
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v8, v21
+; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v10, v22
 ; GCN-GFX900-HSA-NEXT:    s_waitcnt vmcnt(0)
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[32:35], s[0:1] offset:208
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[37:40], s[0:1] offset:160
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[25:28], s[0:1] offset:176
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[41:44], s[0:1] offset:128
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[9:12], s[0:1] offset:144
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[45:48], s[0:1] offset:96
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[13:16], s[0:1] offset:112
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[53:56], s[0:1] offset:64
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[17:20], s[0:1] offset:80
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1] offset:32
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[57:60], s[0:1] offset:48
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1]
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v28, v23
-; GCN-GFX900-HSA-NEXT:    v_mov_b32_e32 v30, v24
-; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v8, v[28:31], s[0:1] offset:16
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[33:36], s[0:1] offset:208
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[41:44], s[0:1] offset:160
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[29:32], s[0:1] offset:176
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[45:48], s[0:1] offset:128
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[4:7], s[0:1] offset:144
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[49:52], s[0:1] offset:96
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[13:16], s[0:1] offset:112
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[53:56], s[0:1] offset:64
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[17:20], s[0:1] offset:80
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[25:28], s[0:1] offset:32
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[57:60], s[0:1] offset:48
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[8:11], s[0:1]
+; GCN-GFX900-HSA-NEXT:    global_store_dwordx4 v12, v[0:3], s[0:1] offset:16
 ; GCN-GFX900-HSA-NEXT:    s_endpgm
 ;
 ; GCN-GFX908-HSA-LABEL: global_sextload_v32i32_to_v32i64:
 ; GCN-GFX908-HSA:       ; %bb.0:
 ; GCN-GFX908-HSA-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[0:3], v8, s[2:3] offset:96
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[4:7], v8, s[2:3] offset:112
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[9:12], v8, s[2:3] offset:80
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[13:16], v8, s[2:3] offset:64
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[17:20], v8, s[2:3] offset:48
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[21:24], v8, s[2:3] offset:32
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[49:52], v8, s[2:3] offset:16
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[4:7], v12, s[2:3] offset:96
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[8:11], v12, s[2:3] offset:112
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[25:28], v12, s[2:3] offset:80
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[13:16], v12, s[2:3] offset:64
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[17:20], v12, s[2:3] offset:48
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[21:24], v12, s[2:3] offset:32
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[0:3], v12, s[2:3] offset:16
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(6)
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v25, v2
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v28, 31, v3
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v2
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v27, v3
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a0, v25
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a1, v26
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a2, v27
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a3, v28
-; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(4)
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v28, 31, v12
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v11
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v40, 31, v10
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v38, 31, v9
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v37, v9
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v39, v10
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v25, v11
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v27, v12
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v32, 31, v7
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v30, 31, v6
+; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(5)
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v36, 31, v11
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v34, 31, v10
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v40, 31, v9
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v38, 31, v8
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v37, v8
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v39, v9
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v33, v10
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v35, v11
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v5
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v4
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v10, v5
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v29, v6
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v31, v7
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(3)
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v16
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v15
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v44, 31, v14
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v42, 31, v13
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v41, v13
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v43, v14
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v9, v15
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v11, v16
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v16
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v15
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v48, 31, v14
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v46, 31, v13
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v45, v13
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v47, v14
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v4, v15
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v6, v16
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v20
 ; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v19
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v48, 31, v18
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v46, 31, v17
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v45, v17
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v47, v18
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v52, 31, v18
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v50, 31, v17
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v49, v17
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v51, v18
 ; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v13, v19
 ; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v15, v20
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(1)
@@ -3832,57 +3831,57 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
 ; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v55, v22
 ; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v17, v23
 ; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v19, v24
-; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[21:24], v8, s[2:3]
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v32, 31, v7
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v36, 31, v5
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v34, 31, v4
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v33, v4
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v35, v5
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v30, 31, v6
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v29, v6
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v31, v7
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[33:36], s[0:1] offset:224
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[29:32], s[0:1] offset:240
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v35, a3
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v0
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v4, v0
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v6, v1
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v34, a2
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v33, a1
-; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v32, a0
+; GCN-GFX908-HSA-NEXT:    global_load_dwordx4 v[21:24], v12, s[2:3]
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a0, v29
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a3, v32
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a1, v30
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_write_b32 a2, v31
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[37:40], s[0:1] offset:224
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[33:36], s[0:1] offset:240
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v32, 31, v28
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v36, a3
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v30, 31, v27
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v44, 31, v26
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v42, 31, v25
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v41, v25
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v43, v26
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v29, v27
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v31, v28
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(3)
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v60, 31, v52
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v58, 31, v51
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v50
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v49
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v0, v49
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v2, v50
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v57, v51
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v59, v52
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:192
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v60, 31, v3
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v58, 31, v2
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v28, 31, v1
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v0
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v25, v0
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v27, v1
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v57, v2
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v59, v3
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v35, a2
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v34, a1
+; GCN-GFX908-HSA-NEXT:    v_accvgpr_read_b32 v33, a0
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[8:11], s[0:1] offset:192
 ; GCN-GFX908-HSA-NEXT:    s_waitcnt vmcnt(3)
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v31, 31, v24
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v29, 31, v23
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v22
-; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v21
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v4, v21
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v6, v22
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[32:35], s[0:1] offset:208
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[37:40], s[0:1] offset:160
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[25:28], s[0:1] offset:176
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[41:44], s[0:1] offset:128
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[9:12], s[0:1] offset:144
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[45:48], s[0:1] offset:96
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[13:16], s[0:1] offset:112
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[53:56], s[0:1] offset:64
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[17:20], s[0:1] offset:80
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1] offset:32
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[57:60], s[0:1] offset:48
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1]
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v28, v23
-; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v30, v24
-; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v8, v[28:31], s[0:1] offset:16
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v24
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v23
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v0, v23
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v2, v24
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v22
+; GCN-GFX908-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v21
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v8, v21
+; GCN-GFX908-HSA-NEXT:    v_mov_b32_e32 v10, v22
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[33:36], s[0:1] offset:208
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[41:44], s[0:1] offset:160
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[29:32], s[0:1] offset:176
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[45:48], s[0:1] offset:128
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[4:7], s[0:1] offset:144
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[49:52], s[0:1] offset:96
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[13:16], s[0:1] offset:112
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[53:56], s[0:1] offset:64
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[17:20], s[0:1] offset:80
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[25:28], s[0:1] offset:32
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[57:60], s[0:1] offset:48
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[8:11], s[0:1]
+; GCN-GFX908-HSA-NEXT:    global_store_dwordx4 v12, v[0:3], s[0:1] offset:16
 ; GCN-GFX908-HSA-NEXT:    s_endpgm
   %ld = load <32 x i32>, ptr addrspace(1) %in
   %ext = sext <32 x i32> %ld to <32 x i64>
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll
index 2b10d469acf5c..97db15ba637a5 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll
@@ -27,12 +27,12 @@ define amdgpu_kernel void @buffer_last_use_load_0(ptr addrspace(7) %in, ptr addr
 ; GFX12-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX12-NEXT:    s_mov_b32 s5, s12
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_mov_b32 s4, s3
-; GFX12-NEXT:    s_mov_b32 s3, s12
+; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX12-NEXT:    s_mov_b32 s13, s2
 ; GFX12-NEXT:    s_mov_b32 s2, s1
+; GFX12-NEXT:    s_mov_b32 s3, s12
 ; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX12-NEXT:    s_wait_loadcnt 0x0
@@ -69,12 +69,12 @@ define amdgpu_kernel void @buffer_last_use_load_1(ptr addrspace(7) %in, ptr addr
 ; GFX12-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX12-NEXT:    s_mov_b32 s5, s12
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_mov_b32 s4, s3
-; GFX12-NEXT:    s_mov_b32 s3, s12
+; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX12-NEXT:    s_mov_b32 s13, s2
 ; GFX12-NEXT:    s_mov_b32 s2, s1
+; GFX12-NEXT:    s_mov_b32 s3, s12
 ; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX12-NEXT:    s_wait_loadcnt 0x0
@@ -112,12 +112,12 @@ define amdgpu_kernel void @buffer_last_use_and_volatile_load(ptr addrspace(7) %i
 ; GFX12-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX12-NEXT:    s_mov_b32 s5, s12
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_mov_b32 s4, s3
-; GFX12-NEXT:    s_mov_b32 s3, s12
+; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX12-NEXT:    s_mov_b32 s13, s2
 ; GFX12-NEXT:    s_mov_b32 s2, s1
+; GFX12-NEXT:    s_mov_b32 s3, s12
 ; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX12-NEXT:    s_wait_loadcnt 0x0
@@ -153,12 +153,12 @@ define amdgpu_kernel void @buffer_last_use_and_nontemporal_load(ptr addrspace(7)
 ; GFX12-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX12-NEXT:    s_mov_b32 s5, s12
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_mov_b32 s4, s3
-; GFX12-NEXT:    s_mov_b32 s3, s12
+; GFX12-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX12-NEXT:    s_mov_b32 s13, s2
 ; GFX12-NEXT:    s_mov_b32 s2, s1
+; GFX12-NEXT:    s_mov_b32 s3, s12
 ; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX12-NEXT:    s_wait_loadcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
index b4bbe849c08b9..10225bbeb7172 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
@@ -218,12 +218,12 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
 ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX11-SDAG-NEXT:    s_mov_b32 s5, s12
 ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-SDAG-NEXT:    s_mov_b32 s4, s3
-; GFX11-SDAG-NEXT:    s_mov_b32 s3, s12
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-SDAG-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX11-SDAG-NEXT:    s_mov_b32 s13, s2
 ; GFX11-SDAG-NEXT:    s_mov_b32 s2, s1
+; GFX11-SDAG-NEXT:    s_mov_b32 s3, s12
 ; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-SDAG-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX11-SDAG-NEXT:    s_waitcnt vmcnt(0)
@@ -253,12 +253,12 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
 ; GFX11-GISEL-NEXT:    s_load_b32 s7, s[4:5], 0x30
 ; GFX11-GISEL-NEXT:    s_mov_b32 s4, s9
 ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-GISEL-NEXT:    s_mov_b32 s8, s1
 ; GFX11-GISEL-NEXT:    s_mov_b32 s5, s2
-; GFX11-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-GISEL-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
 ; GFX11-GISEL-NEXT:    s_mov_b32 s8, s3
+; GFX11-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-GISEL-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
 ; GFX11-GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-GISEL-NEXT:    buffer_store_b32 v0, v1, s[4:7], 0 offen glc slc dlc
@@ -287,12 +287,12 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
 ; GFX12-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX12-SDAG-NEXT:    s_mov_b32 s5, s12
 ; GFX12-SDAG-NEXT:    s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-SDAG-NEXT:    s_mov_b32 s4, s3
-; GFX12-SDAG-NEXT:    s_mov_b32 s3, s12
+; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-SDAG-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX12-SDAG-NEXT:    s_mov_b32 s13, s2
 ; GFX12-SDAG-NEXT:    s_mov_b32 s2, s1
+; GFX12-SDAG-NEXT:    s_mov_b32 s3, s12
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-SDAG-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX12-SDAG-NEXT:    s_wait_loadcnt 0x0
@@ -322,12 +322,12 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
 ; GFX12-GISEL-NEXT:    s_load_b32 s7, s[4:5], 0x30
 ; GFX12-GISEL-NEXT:    s_mov_b32 s4, s9
 ; GFX12-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-GISEL-NEXT:    s_mov_b32 s8, s1
 ; GFX12-GISEL-NEXT:    s_mov_b32 s5, s2
-; GFX12-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-GISEL-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
 ; GFX12-GISEL-NEXT:    s_mov_b32 s8, s3
+; GFX12-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-GISEL-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
 ; GFX12-GISEL-NEXT:    s_wait_loadcnt 0x0
 ; GFX12-GISEL-NEXT:    buffer_store_b32 v0, v1, s[4:7], null offen th:TH_STORE_NT
@@ -546,12 +546,12 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
 ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX11-SDAG-NEXT:    s_mov_b32 s5, s12
 ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-SDAG-NEXT:    s_mov_b32 s4, s3
-; GFX11-SDAG-NEXT:    s_mov_b32 s3, s12
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-SDAG-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX11-SDAG-NEXT:    s_mov_b32 s13, s2
 ; GFX11-SDAG-NEXT:    s_mov_b32 s2, s1
+; GFX11-SDAG-NEXT:    s_mov_b32 s3, s12
 ; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-SDAG-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX11-SDAG-NEXT:    s_waitcnt vmcnt(0)
@@ -581,12 +581,12 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
 ; GFX11-GISEL-NEXT:    s_load_b32 s7, s[4:5], 0x30
 ; GFX11-GISEL-NEXT:    s_mov_b32 s4, s9
 ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-GISEL-NEXT:    s_mov_b32 s8, s1
 ; GFX11-GISEL-NEXT:    s_mov_b32 s5, s2
-; GFX11-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX11-GISEL-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
 ; GFX11-GISEL-NEXT:    s_mov_b32 s8, s3
+; GFX11-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-GISEL-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
 ; GFX11-GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-GISEL-NEXT:    buffer_store_b32 v0, v1, s[4:7], 0 offen dlc
@@ -615,12 +615,12 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
 ; GFX12-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x20
 ; GFX12-SDAG-NEXT:    s_mov_b32 s5, s12
 ; GFX12-SDAG-NEXT:    s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-SDAG-NEXT:    s_mov_b32 s4, s3
-; GFX12-SDAG-NEXT:    s_mov_b32 s3, s12
+; GFX12-SDAG-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-SDAG-NEXT:    s_or_b64 s[6:7], s[4:5], s[12:13]
 ; GFX12-SDAG-NEXT:    s_mov_b32 s13, s2
 ; GFX12-SDAG-NEXT:    s_mov_b32 s2, s1
+; GFX12-SDAG-NEXT:    s_mov_b32 s3, s12
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-SDAG-NEXT:    s_or_b64 s[4:5], s[2:3], s[12:13]
 ; GFX12-SDAG-NEXT:    s_wait_loadcnt 0x0
@@ -650,12 +650,12 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
 ; GFX12-GISEL-NEXT:    s_load_b32 s7, s[4:5], 0x30
 ; GFX12-GISEL-NEXT:    s_mov_b32 s4, s9
 ; GFX12-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-GISEL-NEXT:    s_mov_b32 s8, s1
 ; GFX12-GISEL-NEXT:    s_mov_b32 s5, s2
-; GFX12-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX12-GISEL-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
 ; GFX12-GISEL-NEXT:    s_mov_b32 s8, s3
+; GFX12-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-GISEL-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
 ; GFX12-GISEL-NEXT:    s_wait_loadcnt 0x0
 ; GFX12-GISEL-NEXT:    buffer_store_b32 v0, v1, s[4:7], null offen th:TH_STORE_NT scope:SCOPE_SYS
diff --git a/llvm/test/CodeGen/AMDGPU/maximumnum.bf16.ll b/llvm/test/CodeGen/AMDGPU/maximumnum.bf16.ll
index 9009ec54f174d..dbc3a07cf8793 100644
--- a/llvm/test/CodeGen/AMDGPU/maximumnum.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/maximumnum.bf16.ll
@@ -1893,9 +1893,9 @@ define <3 x bfloat> @v_maximumnum_v3bf16(<3 x bfloat> %x, <3 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v10, 16, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v5
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
@@ -2108,10 +2108,10 @@ define <3 x bfloat> @v_maximumnum_v3bf16(<3 x bfloat> %x, <3 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v5
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v0
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
@@ -5041,8 +5041,8 @@ define <6 x bfloat> @v_maximumnum_v6bf16(<6 x bfloat> %x, <6 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v15, v8, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v8, v10, v13, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v1
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v1, v1, v4 :: v_dual_lshlrev_b32 v16, 16, v2
@@ -5437,8 +5437,8 @@ define <6 x bfloat> @v_maximumnum_v6bf16(<6 x bfloat> %x, <6 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v15, v8, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v8, v10, v13, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v1
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -10166,9 +10166,9 @@ define <16 x bfloat> @v_maximumnum_v16bf16(<16 x bfloat> %x, <16 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v12
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc_lo
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v25, v25
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v5, v18, v5, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v2 :: v_dual_lshlrev_b32 v25, 16, v1
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -11244,9 +11244,9 @@ define <16 x bfloat> @v_maximumnum_v16bf16(<16 x bfloat> %x, <16 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v12
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc_lo
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v25, v25
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v5, v18, v5, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v2 :: v_dual_lshlrev_b32 v25, 16, v1
@@ -15188,255 +15188,254 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v134, 16, v134
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v7
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v22
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s0, v15, v15
-; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s44, v112, v134
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s3, v33, v33
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s44, v112, v134
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v13.h, v29.h, s2
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.l, v10.h, v26.h, s8
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v71, 0xffff0000, v3
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s14, v55, v55
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s17, v66, v66
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v55.l, v29.h, v15.l, s3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v9
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v83, 0xffff0000, v1
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s9, v39, v39
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v39.l, v4.h, v20.h, s20
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v71, v71
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v85.l, v15.l
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s9, v39, v39
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v39.l, v4.h, v20.h, s20
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v66.l, v26.h, v33.l, s9
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v80.l, v20.h, v39.l, s21
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v102.l, v39.l
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v118.l, v66.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v132.l, v80.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v48.l, v3.h, v19.h, s22
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v115, 16, v115
-; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s42, v102, v132
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v118, 16, v118
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s42, v102, v132
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v81.l, v19.h, v48.l, s23
-; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v102.l, v80.l, v39.l, s42
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s23, v85, v115
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v102.l, v80.l, v39.l, s42
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v34, 0xffff0000, v12
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
+; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v8
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
-; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s41, v101, v131
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v102, v102, v102
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v102, v102, v102 :: v_dual_lshlrev_b32 v101, 16, v101
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s12, v53, v53
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v53.l, v1.h, v17.h, s26
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s26, v96, v118
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
 ; GFX11-TRUE16-NEXT:    v_max_f32_e32 v85, v85, v85
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.l, v49.h, v30.h, s0
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v84, 0xffff0000, v17
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s41, v101, v131
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v30
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s4, v34, v34
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_dual_max_f32 v96, v96, v96 :: v_dual_lshlrev_b32 v101, 16, v101
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
+; GFX11-TRUE16-NEXT:    v_max_f32_e32 v96, v96, v96
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s1, v31, v31
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v64, 0xffff0000, v23
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v54.l, v30.h, v14.l, s1
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v101, v101, v101
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v114.l, v54.l
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s15, v64, v64
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v114, 16, v114
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s16, v65, v65
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
-; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s22, v84, v114
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
-; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s24, v86, v116
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v97.l, v34.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v68.l, v24.h, v35.l, s13
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v119.l, v67.l
-; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s25, v87, v117
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v84, v84, v84
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s22, v84, v114
+; GFX11-TRUE16-NEXT:    v_max_f32_e32 v101, v101, v101
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v37.l, v6.h, v22.h, s16
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v98.l, v35.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v69.l, v23.h, v36.l, s15
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v128.l, v68.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v119, 16, v119
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v99.l, v36.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v70.l, v22.h, v37.l, s17
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v83.l, v17.h, v53.l, s27
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v129.l, v69.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v128, 16, v128
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s27, v97, v119
-; GFX11-TRUE16-NEXT:    v_dual_max_f32 v86, v86, v86 :: v_dual_lshlrev_b32 v87, 16, v87
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v100.l, v37.l
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v99, 16, v99
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v70.l
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v129, 16, v129
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s28, v98, v128
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v97.l, v67.l, v34.l, s27
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v87, v87, v87
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v85, v85
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v84, v84, v84 :: v_dual_lshlrev_b32 v87, 16, v87
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v130
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s24, v86, v116
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s29, v99, v129
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v98.l, v68.l, v35.l, s28
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s22
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v86, v86
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v103.l, v48.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v133.l, v81.l
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s25, v87, v117
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s40, v100, v130
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v99.l, v69.l, v36.l, s29
 ; GFX11-TRUE16-NEXT:    v_dual_max_f32 v97, v97, v97 :: v_dual_lshlrev_b32 v98, 16, v98
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v130, v96, 16, 1
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s22
-; GFX11-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v87, v87
+; GFX11-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v133, 16, v133
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v100.l, v70.l, v37.l, s40
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-TRUE16-NEXT:    v_dual_max_f32 v98, v98, v98 :: v_dual_lshlrev_b32 v99, 16, v99
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v131, 0x400000, v96
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v132, v97, 16, 1
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v130, v130, v96, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v96, v96
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s43, v103, v133
-; GFX11-TRUE16-NEXT:    v_dual_max_f32 v99, v99, v99 :: v_dual_lshlrev_b32 v100, 16, v100
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v86, v86, v86 :: v_dual_max_f32 v99, v99, v99
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v133, 0x400000, v97
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v134, v98, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v132, v132, v97, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v96, v130, v131, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v97, v97
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v100, v100, v100
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v87, v87, v87 :: v_dual_max_f32 v100, v100, v100
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v144, 0x400000, v98
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v145, v99, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v134, v134, v98, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v97, v132, v133, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v98, v98
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v146, 0x400000, v99
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v147, v100, 16, 1
+; GFX11-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s23, v85, v85
+; GFX11-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s24, v86, v86
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v145, v145, v99, 0x7fff
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v98, v134, v144, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v99, v99
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v115, v101, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s1, 0, v15.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s5, 0, v31.l
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
+; GFX11-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s25, v87, v87
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s23
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s24
+; GFX11-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v99, v145, v146, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v100, v100
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v85, 0x400000, v101
-; GFX11-TRUE16-NEXT:    v_add3_u32 v115, v115, v101, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s2, 0, v32.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s25
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.h, v116.h, v15.l, s1
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v118.h, v31.l, s5
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s7, 0, v34.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s6, 0, v33.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s3, 0, v35.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s8, 0, v36.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.h, v97.h, v34.l, s7
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0, v37.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.h, v96.h, v33.l, s6
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v85, v101, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.l, v98.h, v35.l, s3
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.h, v99.h, v36.l, s8
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0, v37.l
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v101
+; GFX11-TRUE16-NEXT:    v_add3_u32 v85, v85, v101, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s0, 0, v14.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s9, 0, v38.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v115, v85, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s11, 0, v54.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s12, 0, v55.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v85, v86, s22
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.l, v114.h, v14.l, s0
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v117, v102, 16, 1
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v87, v102, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s13, 0, v64.l
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v102
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v102
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v102, v102
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.l, v14.l, v54.l, s11
-; GFX11-TRUE16-NEXT:    v_add3_u32 v117, v117, v102, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add3_u32 v87, v87, v102, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0, v39.l
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v114
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v15.l, v64.l, s13
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s14, 0, v65.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v117, v86, s22
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v128
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v128
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v87, v115, s22
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v87, 0xffff0000, v114
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v113.l, v53.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s15, 0, v66.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v135.l, v83.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v103.l, v81.l, v48.l, s43
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v116
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v116
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.h, v15.h, v65.l, s14
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v115
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v102
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s20, 0, v71.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v113, 16, v113
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v102
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v101
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s16, 0, v67.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s17, 0, v68.l
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v131, 0xffff0000, v98
@@ -15445,30 +15444,30 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v132, 0xffff0000, v84
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.l, v33.l, v68.l, s17
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s5, 0, v131
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v119, v103, 16, 1
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v87, 0x400000, v103
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v117, v103, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v103
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s7, 0, v132
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v96
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v96
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s21, 0, v80.l
-; GFX11-TRUE16-NEXT:    v_add3_u32 v119, v119, v103, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add3_u32 v117, v117, v103, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v129, 0xffff0000, v97
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s18, 0, v69.l
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v117
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v115
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v85
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s8, 0, v133
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v51
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v51
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.l, v50.h, v51.h, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v101, v101
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v34.l, v31.l
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v118
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v118
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.l, v51.h, v31.l, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v35, 16, v34
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v84.h, v37.l, s4
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.h, v85.h, v38.l, s9
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v101
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v100
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v32.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s4, 0, v129
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v34.l, v70.l, s19
@@ -15481,7 +15480,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v86
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0, v31.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.l, v32.l, v31.l, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v87
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.h, v118.h, v15.l, s1
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s9, 0, v39
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v35.l, v35.l, v80.l, s21
@@ -15526,7 +15525,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v135
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v103, v103
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f32_e64 s0, v113, v54
-; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v119, v87 :: v_dual_and_b32 v54, 0xffff0000, v0
+; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v117, v119 :: v_dual_and_b32 v54, 0xffff0000, v0
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v48.l
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v83.l, v53.l, s0
@@ -16037,13 +16036,13 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v80, 16, v9
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v13
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v15
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v38, 16, v29
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v39, 16, v13
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v35, v33, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v84, 16, v8
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v49, 0xffff0000, v12
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v34, v35, v33 :: v_dual_and_b32 v49, 0xffff0000, v12
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v29
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v11
@@ -16054,42 +16053,41 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v67, 16, v26
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v68, 16, v10
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v10
-; GFX11-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v52, v51, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v30
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v82, 0xffff0000, v8
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v96, 16, v7
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v39, v64, v55 :: v_dual_and_b32 v70, 0xffff0000, v9
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v6
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v103, 16, v21
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v112, 16, v5
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v68, v67, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v86, 0xffff0000, v7
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v4
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v19
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v80, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v82, v82
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v6
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v18
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v84, v83, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v28
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v5
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v1
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v96, v87, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v26
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v3
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v100, v99, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v4
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v102, v102
@@ -16176,516 +16174,517 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v48, v48, v48
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v53, v117
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e64 s1, v48, v48
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v55, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v65, v118
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v66
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v118, v48, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v67, v49, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v69, v119
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v70
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_lshlrev_b32 v102, 16, v80
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v119, 0x400000, v48
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v71, v52, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v81, v128
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v68
-; GFX11-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v128, v53, 16, 1
+; GFX11-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v83, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v85, v129
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v82
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v53
-; GFX11-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v69, v69, v69
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v81, 16, v81
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v87, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e64 s2, v53, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s1
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v81, v81, v81
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s2
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v144, 0x400000, v81
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v97
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v85, v85, v85
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v96, v130
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v146, v85, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v147, 0x400000, v85
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v96, v97, v66, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v98, v131
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v146, v146, v85, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v101
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v98, v99, v68, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v100, v132
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v96, v96, v96
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v100, v101, v70, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v102, v133
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
+; GFX11-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v98, v98, v98
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v102, v103, v80, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v112, v134
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v134, v81, 16, 1
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v100, v100, v100
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v98, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v98
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v112, v113, v82, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v86, v96, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v134, v134, v81, 0x7fff
+; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v98, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v114, v114, v115, vcc_lo
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v115, 0x400000, v96
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v86, v86, v96, 0x7fff
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v69, v69, v69
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
-; GFX11-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v36, v36, v36
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v36, v98, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v116, v116, v117, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v48, v48
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v98
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v100, 16, 1
-; GFX11-FAKE16-NEXT:    v_add3_u32 v36, v36, v98, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v118, v118, v119 :: v_dual_max_f32 v65, v65, v65
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v119, 0x400000, v100
-; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v100, 0x7fff
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v65, v65, v65
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v130, v65, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v131, 0x400000, v65
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v128, v128, v129, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v130, v130, v65, 0x7fff
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v130, v131, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v132, v133, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v132, v133 :: v_dual_lshlrev_b32 v36, 16, v36
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v36, v36, v36
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v134, v144, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e64 s0, v36, v36
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v130, 16, v81
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX11-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v96
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v146, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v146, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v100, v100, v100
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s0
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v85
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v86, v86, v115, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v86, v36, vcc_lo
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v115, v100, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v118
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v114
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v86
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v117, vcc_lo
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v100
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v86, 16, v114
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v116
+; GFX11-FAKE16-NEXT:    v_add3_u32 v115, v115, v100, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v48, v53, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v116
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v117, 16, v128
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v133, 16, v36
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v96, v48, v119, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v48, 16, v114
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v36
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v116
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v128
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v36
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v115, v117, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v34
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v65
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v134, 0xffff0000, v36
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v116
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v116, 0xffff0000, v118
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v48, v34, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v118
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v134, 16, v48
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v146, 16, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v86, v34, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v35
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v128
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v96
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v100, v35, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v96, 0xffff0000, v114
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v114, 16, v118
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v118, 16, v65
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v98, v35, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v37
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v115, v37, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v128
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v144, 0xffff0000, v48
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v114, v37, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v39
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v117, v39, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v116, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v49
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v119, v49, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v118, v49, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v52
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v128, v52, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v128, v52, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v54
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v130, v54 :: v_dual_and_b32 v65, 0xffff0000, v65
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v52, 16, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v130, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v64
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v131, v64, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v131, v64, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v147, v32, v54, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v147, v32, v52 :: v_dual_and_b32 v64, 0xffff0000, v50
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v66
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v102, v102, v102
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v132, v66, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v68
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v32, v133, v68 :: v_dual_and_b32 v81, 0xffff0000, v81
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v146, v146
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v54, v147 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v65
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v119, v102, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v134, v68, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
+; GFX11-FAKE16-NEXT:    v_add3_u32 v119, v119, v102, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v52, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v70
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v54
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v144, v70, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v81, 0xffff0000, v81
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v52
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v146, v70, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v33
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v0
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v34, v33, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v38
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v147
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v102, v102, v102
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v35, v38, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v35, v35, v38 :: v_dual_lshlrev_b32 v34, 16, v147
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v51
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v102, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v51, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v34, v68
-; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v102, 0x7fff
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v54, v147, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v36, v36, v51 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v34, v66
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v52, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v55
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v37, v37, v55 :: v_dual_lshlrev_b32 v34, 16, v34
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v37, v55, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v67
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v34, v34, v34
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v39, v67, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v71
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v55, v34, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v49, v71, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v48, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v83
-; GFX11-FAKE16-NEXT:    v_add3_u32 v55, v55, v34, 0x7fff
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v52, v83, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v49, v83, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v87
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v64, v87, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v49, v54, v87 :: v_dual_lshlrev_b32 v34, 16, v34
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v97
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v31, v97, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v31, v97, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v99
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v32, v99, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v98
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v48, v33, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v114
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v48, 0x400000, v34
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v100, v35, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v116
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v33, v115, v36 :: v_dual_and_b32 v86, 0xffff0000, v86
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v34, v34, v34 :: v_dual_cndmask_b32 v55, v32, v99
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v96
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v34, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v34
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v86, v33, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v54, v54, v34, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v98, v35, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v115
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v114, v36, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v55, v48, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v118
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v48
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v117, v37, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v66, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v117
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v116, v37, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v96
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v119, v38, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v54
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v118, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v69
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v69, 16, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v128, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v81
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v49, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v48, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e64 vcc_lo, 0, v147
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v147, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v85
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v51, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v54
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v51, 0xffff0000, v48
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v54, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v86
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v132, v52, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v134
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v133, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v51
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v51, 16, v112
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v39, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v49, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v52
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v133
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v48, v132, v51 :: v_dual_lshlrev_b32 v51, 16, v112
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v144
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v134, v55, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v84
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v145, v145
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v67, v51, v51
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v135, v84, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v102, v102
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v53, v129 :: v_dual_lshlrev_b32 v55, 16, v54
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v84
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v119, v129, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v101
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v101, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v53, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v54
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v67, v51, v51
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v66, 16, v52
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v101, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v55, v65
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v52, 0xffff0000, v52
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v54, v84, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v84, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v80
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v80, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_lshlrev_b32 v80, 16, v15
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v144, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v64, v67, 16, 1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v66, v80, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v67, 16, 1
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v51, v146, v64 :: v_dual_lshlrev_b32 v80, 16, v15
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v103
-; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v64, v67, 0x7fff
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v103, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v67, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v65, v103, vcc_lo
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v65, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v53, v65 :: v_dual_and_b32 v68, 0xffff0000, v0
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v68, v53, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v67, v70, v69, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v52
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v66, v55, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v66, v64 :: v_dual_lshlrev_b32 v55, 16, v55
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v64
-; GFX11-FAKE16-NEXT:    v_add3_u32 v66, v68, v53, 0x7fff
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v69, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v69, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v55, v55, v55
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v82
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v71, v65, v82 :: v_dual_lshlrev_b32 v70, 16, v55
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v66, v68, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v68, v55, 16, 1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v65, v82, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX11-FAKE16-NEXT:    v_add3_u32 v66, v68, v55, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v68, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v69, v70
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v55, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v64, v67, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v15, v50 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v15, v50, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v15, 16, v50
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v113
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v55
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v66, v66, v66
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v71, v113 :: v_dual_lshlrev_b32 v80, 16, v68
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v15, 0xffff0000, v64
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v68, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v68 :: v_dual_and_b32 v15, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v84
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v70, v84, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v70, v84, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v15
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v15, v65, v69, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v54
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v14
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v64, v54, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v80, v71
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v50, v68 :: v_dual_lshlrev_b32 v80, 16, v30
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v66, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v80, 16, v30
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v55
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v50, v68, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
+; GFX11-FAKE16-NEXT:    v_add3_u32 v54, v54, v66, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v14, v30, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v65, v65, v65 :: v_dual_max_f32 v66, v66, v66
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v64, v66, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v64, v66, 0x7fff
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v64, v71 :: v_dual_lshlrev_b32 v71, 16, v13
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v13
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v69, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v64
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v54, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v67
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v30
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v53, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v54
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v69
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v65, 16, 1
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_lshlrev_b32 v66, 16, v30
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v54, v54, v65, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v67
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_and_b32 v54, 0xffff0000, v54
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v70, v66
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v30, v69, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v71, v71
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v65, v65, v65
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v13, v29, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v55, v65, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v80, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v55, v55, v65, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v80, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v71
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v29, v71, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v55
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v67, v55, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v66, v13, v13 :: v_dual_cndmask_b32 v29, v67, v64
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v55
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v65
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v64
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v54
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v66, v13, v13
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v13, v53, v29 :: v_dual_lshlrev_b32 v70, 16, v71
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v13, v53, v29, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v68
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v68, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v70, v67
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v65, v71, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v50
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v55
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v29, v50, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v50, v53, v66, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v66
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v11
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_max_f32 v53, v55, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_and_b32 v55, 0xffff0000, v55
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v28
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v54, v54 :: v_dual_lshlrev_b32 v54, 16, v28
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v29, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v50
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v29, v64, v29 :: v_dual_and_b32 v50, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v64, v53, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v28, v28, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v69
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v64, v53, 0x7fff
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v69, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v28
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v55, v69, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v27
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v11, v27, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v12
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v64, v67 :: v_dual_lshlrev_b32 v68, 16, v28
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v64, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v69, v68
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v28, v12 :: v_dual_lshlrev_b32 v67, 16, v11
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v27, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v30
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v55, v30, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v30, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v53
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v71
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v30, v71 :: v_dual_lshlrev_b32 v55, 16, v64
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v64
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v66
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v71
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v54, v54, v54
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v30, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v67, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v27, vcc_lo
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v50, v54, 16, 1
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v50, v50, v54, 0x7fff
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v64, v64, v64 :: v_dual_cndmask_b32 v27, v55, v27
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v65
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v68, v65 :: v_dual_max_f32 v55, v55, v55
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v50, v55, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v55
-; GFX11-FAKE16-NEXT:    v_add3_u32 v50, v50, v55, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v68, v65, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v10
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v10, v26, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v64, 16, 1
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v67 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v64, 16, v64
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v64, v64, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v68, 16, v9
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v55, v64, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v54, vcc_lo
+; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v54, v64, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v55, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v50
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v12
-; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v55, v64, 0x7fff
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v65
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v12, v30, v12 :: v_dual_lshlrev_b32 v67, 16, v10
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v9
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v67, v55
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v65, v10 :: v_dual_lshlrev_b32 v55, 16, v25
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v10, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v9, v25, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v9, v25 :: v_dual_lshlrev_b32 v54, 16, v54
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v25
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v54, v54, v54
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v53
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v25, v9, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v11
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v25
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v28, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v30, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v66
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v12, v33, v12, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v11, v66, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v64, v55
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v24
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v54, v54, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v25, v9, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v50, 16, v50
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v50, v50, v50 :: v_dual_lshlrev_b32 v55, 16, v8
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v64, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v30, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v24, v24, v8, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v11, v28, v11 :: v_dual_lshlrev_b32 v54, 16, v24
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v30
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v10
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v11, v34, v11, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v55, v54
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v24, v8 :: v_dual_lshlrev_b32 v55, 16, v23
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v7, v7, v23 :: v_dual_lshlrev_b32 v50, 16, v50
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v50, v50, v50
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v7, v23, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v53, v66, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v54
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v23, v23, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v65
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_and_b32 v50, 0xffff0000, v50
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v55, 16, v23
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v23
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v53, 16, v54
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v30
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_cndmask_b32 v10, v28, v10
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v9
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v28, v53, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v54, v9, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v64, v55
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v23, v7 :: v_dual_lshlrev_b32 v55, 16, v6
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v23, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v25
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v9, v25 :: v_dual_lshlrev_b32 v30, 16, v30
@@ -16700,16 +16699,17 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v50, 16, v25
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v28, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v54, v9 :: v_dual_lshlrev_b32 v64, 16, v6
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v28, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v28
+; GFX11-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v9, v36, v9, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v22, v22, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v50, v8 :: v_dual_lshlrev_b32 v55, 16, v22
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v21
@@ -16723,12 +16723,10 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v21, v21, v5, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v24
-; GFX11-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v8, v24 :: v_dual_lshlrev_b32 v53, 16, v21
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v24, 0xffff0000, v25
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v28
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v24
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v24, v30, v30
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v50, v8, vcc_lo
@@ -16746,14 +16744,15 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v20, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v23, v23, v24, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v4
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v20, v20, v4, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v24, v24
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v24, v30, 16, 1
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v23, v23, v50 :: v_dual_lshlrev_b32 v50, 16, v20
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v28
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v24, v24, v30, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v28, 0x400000, v30
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v25, v7, vcc_lo
@@ -16764,6 +16763,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v7, v38, v7, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v20, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v25, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v19
@@ -16783,19 +16783,18 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v22
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v5, v28, v5 :: v_dual_lshlrev_b32 v50, 16, v19
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v21
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v30, v30, v22, 0x7fff
-; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v5, v5, v21, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v53, v50
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v17
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v21, v19, v3, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v22, v30, v54 :: v_dual_lshlrev_b32 v21, 16, v21
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v23
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_max_f32 v21, v21, v21 :: v_dual_lshlrev_b32 v30, 16, v0
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v23, 16, v22
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v25, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v24
@@ -16843,6 +16842,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v30, v25
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v21
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v17, v1, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v24, v22
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
@@ -16876,11 +16876,10 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v19, v50, v53, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v21
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v21, 16, v24
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v19
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v30, v3, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v2
-; GFX11-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v3, v52, v3, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v54, v2, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v1
@@ -16953,277 +16952,273 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v134, 16, v134
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v7
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v22
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s0, v15, v15
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s44, v112, v134
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s3, v33, v33
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s44, v112, v134
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v13.h, v29.h, s2
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.l, v10.h, v26.h, s8
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v71, 0xffff0000, v3
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s14, v55, v55
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s17, v66, v66
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v55.l, v29.h, v15.l, s3
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v9
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v83, 0xffff0000, v1
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v71, v71
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v85.l, v15.l
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s9, v39, v39
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v39.l, v4.h, v20.h, s20
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v71, v71
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v85.l, v15.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v66.l, v26.h, v33.l, s9
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v80.l, v20.h, v39.l, s21
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v102.l, v39.l
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v118.l, v66.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v132.l, v80.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v48.l, v3.h, v19.h, s22
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v115, 16, v115
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s42, v102, v132
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v118, 16, v118
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s42, v102, v132
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v81.l, v19.h, v48.l, s23
-; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s23, v85, v115
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v102.l, v80.l, v39.l, s42
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s23, v85, v115
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v34, 0xffff0000, v12
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
+; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v8
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s41, v101, v131
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v102, v102, v102
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
+; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v102, v102, v102 :: v_dual_lshlrev_b32 v101, 16, v101
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s12, v53, v53
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v53.l, v1.h, v17.h, s26
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s26, v96, v118
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
 ; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v85, v85, v85
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.l, v49.h, v30.h, s0
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v84, 0xffff0000, v17
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s41, v101, v131
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v30
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s4, v34, v34
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v96, v96, v96 :: v_dual_lshlrev_b32 v101, 16, v101
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
+; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v96, v96, v96
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s1, v31, v31
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v64, 0xffff0000, v23
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v64, 0xffff0000, v23
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v54.l, v30.h, v14.l, s1
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v101, v101, v101
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v114.l, v54.l
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s15, v64, v64
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v114, 16, v114
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s16, v65, v65
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s22, v84, v114
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s24, v86, v116
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v97.l, v34.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v68.l, v24.h, v35.l, s13
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v119.l, v67.l
-; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s25, v87, v117
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s22, v84, v114
+; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v101, v101, v101
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v84, v84, v84
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v37.l, v6.h, v22.h, s16
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v98.l, v35.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v69.l, v23.h, v36.l, s15
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v128.l, v68.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v119, 16, v119
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v99.l, v36.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v70.l, v22.h, v37.l, s17
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v83.l, v17.h, v53.l, s27
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v129.l, v69.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v128, 16, v128
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s27, v97, v119
-; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v86, v86, v86 :: v_dual_lshlrev_b32 v87, 16, v87
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v100.l, v37.l
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v99, 16, v99
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v70.l
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v129, 16, v129
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s28, v98, v128
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v97.l, v67.l, v34.l, s27
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v87, v87, v87
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v85, v85
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v84, v84, v84 :: v_dual_lshlrev_b32 v87, 16, v87
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v130
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s24, v86, v116
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s29, v99, v129
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v98.l, v68.l, v35.l, s28
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s22
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v86, v86
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v103.l, v48.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v133.l, v81.l
+; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s25, v87, v117
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s40, v100, v130
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v99.l, v69.l, v36.l, s29
 ; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v97, v97, v97 :: v_dual_lshlrev_b32 v98, 16, v98
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v130, v96, 16, 1
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s22
-; GFX12-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v87, v87
+; GFX12-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v133, 16, v133
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v100.l, v70.l, v37.l, s40
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v98, v98, v98 :: v_dual_lshlrev_b32 v99, 16, v99
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v131, 0x400000, v96
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v132, v97, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v130, v130, v96, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s22
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
+; GFX12-TRUE16-NEXT:    v_add3_u32 v130, v130, v96, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v96, v96
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s43, v103, v133
-; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v99, v99, v99 :: v_dual_lshlrev_b32 v100, 16, v100
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v86, v86, v86 :: v_dual_max_num_f32 v99, v99, v99
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v133, 0x400000, v97
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v134, v98, 16, 1
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v132, v132, v97, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v96, v130, v131, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v97, v97
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v100, v100, v100
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v87, v87, v87 :: v_dual_max_num_f32 v100, v100, v100
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v144, 0x400000, v98
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v145, v99, 16, 1
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v134, v134, v98, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v97, v132, v133, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v98, v98
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v146, 0x400000, v99
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v147, v100, 16, 1
+; GFX12-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s23, v85, v85
+; GFX12-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s24, v86, v86
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v145, v145, v99, 0x7fff
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v98, v134, v144, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v99, v99
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v115, v101, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s1, 0, v15.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s5, 0, v31.l
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
+; GFX12-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s25, v87, v87
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s23
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s24
+; GFX12-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v99, v145, v146, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v100, v100
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v85, 0x400000, v101
-; GFX12-TRUE16-NEXT:    v_add3_u32 v115, v115, v101, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s2, 0, v32.l
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s25
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.h, v116.h, v15.l, s1
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v118.h, v31.l, s5
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s7, 0, v34.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s6, 0, v33.l
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s3, 0, v35.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s8, 0, v36.l
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.h, v97.h, v34.l, s7
-; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0, v37.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.h, v96.h, v33.l, s6
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v85, v101, 16, 1
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.l, v98.h, v35.l, s3
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.h, v99.h, v36.l, s8
+; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0, v37.l
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v101
+; GFX12-TRUE16-NEXT:    v_add3_u32 v85, v85, v101, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s0, 0, v14.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s9, 0, v38.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v115, v85, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s11, 0, v54.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s12, 0, v55.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v85, v86, s22
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.l, v114.h, v14.l, s0
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v117, v102, 16, 1
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v87, v102, 16, 1
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s13, 0, v64.l
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v102
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v102
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v102, v102
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.l, v14.l, v54.l, s11
-; GFX12-TRUE16-NEXT:    v_add3_u32 v117, v117, v102, 0x7fff
+; GFX12-TRUE16-NEXT:    v_add3_u32 v87, v87, v102, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0, v39.l
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v114
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v15.l, v64.l, s13
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s14, 0, v65.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v117, v86, s22
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v128
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v128
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v87, v115, s22
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v87, 0xffff0000, v114
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v113.l, v53.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s15, 0, v66.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v135.l, v83.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v103.l, v81.l, v48.l, s43
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v116
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v116
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.h, v15.h, v65.l, s14
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v115
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v102
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s20, 0, v71.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v113, 16, v113
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v102
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v101
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s16, 0, v67.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s17, 0, v68.l
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v131, 0xffff0000, v98
@@ -17233,32 +17228,32 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.l, v33.l, v68.l, s17
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s5, 0, v131
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v119, v103, 16, 1
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v87, 0x400000, v103
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v117, v103, 16, 1
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v103
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s7, 0, v132
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v96
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v96
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s21, 0, v80.l
-; GFX12-TRUE16-NEXT:    v_add3_u32 v119, v119, v103, 0x7fff
+; GFX12-TRUE16-NEXT:    v_add3_u32 v117, v117, v103, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v129, 0xffff0000, v97
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s18, 0, v69.l
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v117
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v115
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v85
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s8, 0, v133
 ; GFX12-TRUE16-NEXT:    s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v51
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v51
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.l, v50.h, v51.h, vcc_lo
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v101, v101
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v34.l, v31.l
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v118
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v118
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.l, v51.h, v31.l, vcc_lo
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v35, 16, v34
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v84.h, v37.l, s4
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.h, v85.h, v38.l, s9
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v101
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v100
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v32.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s4, 0, v129
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v34.l, v70.l, s19
@@ -17272,7 +17267,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0, v31.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.l, v32.l, v31.l, vcc_lo
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v87
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.h, v118.h, v15.l, s1
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s9, 0, v39
@@ -17323,7 +17318,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_cmp_gt_f32_e64 s0, v113, v54
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v119, v87 :: v_dual_and_b32 v54, 0xffff0000, v0
+; GFX12-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v117, v119 :: v_dual_and_b32 v54, 0xffff0000, v0
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v48.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v83.l, v53.l, s0
@@ -17929,14 +17924,14 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v80, 16, v9
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v13
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX12-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v15
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v38, 16, v29
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v39, 16, v13
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v83, 16, v24
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v35, v33, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v84, 16, v8
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v49, 0xffff0000, v12
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v34, v35, v33 :: v_dual_and_b32 v49, 0xffff0000, v12
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v29
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v11
@@ -17948,48 +17943,47 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v67, 16, v26
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v68, 16, v10
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v10
-; GFX12-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v52, v51, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v30
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v82, 0xffff0000, v8
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v96, 16, v7
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v39, v64, v55 :: v_dual_and_b32 v70, 0xffff0000, v9
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v6
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v103, 16, v21
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v112, 16, v5
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v68, v67, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v86, 0xffff0000, v7
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v4
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v19
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v80, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v82, v82
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v6
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v18
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v2
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v84, v83, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v28
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v5
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v1
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v96, v87, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v26
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v3
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v100, v99, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v4
@@ -18098,6 +18092,8 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v48, v48, v48
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v53, v117
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e64 s1, v48, v48
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v55, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v65, v118
@@ -18115,21 +18111,26 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v71, v52, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v81, v128
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v68
-; GFX12-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v128, v53, 16, 1
+; GFX12-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v83, v54, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v85, v129
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v82
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v53
-; GFX12-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v69, v69, v69
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v81, 16, v81
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v87, v64, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e64 s2, v53, v53
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s1
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v81, v81, v81
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s2
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v144, 0x400000, v81
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v97
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v85, v85, v85
@@ -18147,432 +18148,415 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v98, v99, v68, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v100, v132
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v96, v96, v96
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v100, v101, v70, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v102, v133
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
+; GFX12-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v98, v98, v98
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v102, v103, v80, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v112, v134
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v134, v81, 16, 1
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v100, v100, v100
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v48, v98, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v98
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v112, v113, v82, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v86, v96, 16, 1
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v134, v134, v81, 0x7fff
+; GFX12-FAKE16-NEXT:    v_add3_u32 v48, v48, v98, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v114, v114, v115, vcc_lo
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v115, 0x400000, v96
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v86, v86, v96, 0x7fff
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v69, v69, v69
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v36, v36, v36
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX12-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v36, v98, 16, 1
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v116, v116, v117, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v48, v48
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v98
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v48, v100, 16, 1
-; GFX12-FAKE16-NEXT:    v_add3_u32 v36, v36, v98, 0x7fff
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v118, v118, v119 :: v_dual_max_num_f32 v65, v65, v65
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v119, 0x400000, v100
-; GFX12-FAKE16-NEXT:    v_add3_u32 v48, v48, v100, 0x7fff
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v65, v65, v65
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v130, v65, 16, 1
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v131, 0x400000, v65
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v128, v128, v129, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v130, v130, v65, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v130, v131, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v132, v133, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v132, v133 :: v_dual_lshlrev_b32 v36, 16, v36
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v36, v36, v36
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v134, v144, vcc_lo
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e64 s0, v36, v36
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v130, 16, v81
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX12-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v96
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v146, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX12-FAKE16-NEXT:    s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v146, 0xffff0000, v50
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v100, v100, v100
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s0
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v85
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v86, v86, v115, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v86, v36, vcc_lo
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v115, v100, 16, 1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v118
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v114
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v86
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v100
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v86, 16, v114
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v116
+; GFX12-FAKE16-NEXT:    v_add3_u32 v115, v115, v100, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v117, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v48, v53, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v116
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v117, 16, v128
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v133, 16, v36
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v36
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v116
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v128
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v36
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v96, v48, v119, vcc_lo
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v48, 16, v114
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v115, v117, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v34
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v65
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v134, 0xffff0000, v36
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v116
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v116, 0xffff0000, v118
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v118
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v134, 16, v48
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v146, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v48, v34, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v86, v34, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v35
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v128
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v96
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v96, 0xffff0000, v114
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v114, 16, v118
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v118, 16, v65
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v100, v35, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v98, v35, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v37
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v128
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v144, 0xffff0000, v48
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v115, v37, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v114, v37, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v39
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v117, v39, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v116, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v49
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v119, v49, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v118, v49, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v128, v52, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v128, v52, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v54
+; GFX12-FAKE16-NEXT:    s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v52, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v130, v54 :: v_dual_and_b32 v65, 0xffff0000, v65
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v130, v54, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v64
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v131, v64, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v131, v64, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v147, v32, v54, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v147, v32, v52 :: v_dual_and_b32 v64, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v66
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v102, v102, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v132, v66, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v68
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v65
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v119, v102, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v32, v133, v68 :: v_dual_and_b32 v81, 0xffff0000, v81
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v146, v146
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v134, v68, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
+; GFX12-FAKE16-NEXT:    v_add3_u32 v119, v119, v102, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v54, v147 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v52, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v70
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v54
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v81, 0xffff0000, v81
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v144, v70, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v146, v70, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v33
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v0
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v34, v33, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v38
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v147
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v102, v102, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v35, v38, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v35, v35, v38 :: v_dual_lshlrev_b32 v34, 16, v147
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v51
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v102, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v51, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v34, v68
-; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v102, 0x7fff
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v0
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v36, v36, v51 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v34, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v54, v147, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v52, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v37, v37, v55 :: v_dual_lshlrev_b32 v34, 16, v34
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v37, v55, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v67
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v34, v34, v34
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v39, v67, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v71
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v55, v34, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v49, v71, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v48, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v83
-; GFX12-FAKE16-NEXT:    v_add3_u32 v55, v55, v34, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v52, v83, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v49, v83, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v87
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v64, v87, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v49, v54, v87 :: v_dual_lshlrev_b32 v34, 16, v34
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v97
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v31, v97, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v31, v97, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v99
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v32, v99, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v98
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v34, v34, v34 :: v_dual_cndmask_b32 v55, v32, v99
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v96
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v34, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v34
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v48, v33, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v114
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v48, 0x400000, v34
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v86, v33, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX12-FAKE16-NEXT:    v_add3_u32 v54, v54, v34, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v100, v35, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v116
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v98, v35, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v115
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v33, v115, v36 :: v_dual_and_b32 v86, 0xffff0000, v86
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v114, v36, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v55, v48, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v118
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v48
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v66, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v117
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v117, v37, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v116, v37, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v96
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v119, v38, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v118, v38, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v69
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v69, 16, v16
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v128, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v81
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v49, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v48, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e64 vcc_lo, 0, v147
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v147, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v85
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v51, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v54
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v51, 0xffff0000, v48
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v49, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v54, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v86
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v133
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v132, v52, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v134
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v48, v132, v51 :: v_dual_lshlrev_b32 v51, 16, v112
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v144
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v133, v64, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v51
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v51, 16, v112
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v134, v55, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v84
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v39, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v145, v145
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v67, v51, v51
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v135, v84, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v102, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v53, v129 :: v_dual_lshlrev_b32 v55, 16, v54
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v84
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v119, v129, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v101
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v101, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v53, v55
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v54
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v67, v51, v51
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v66, 16, v52
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v101, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v55, v65
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v52, 0xffff0000, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v54, v84, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v84, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v80
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v80, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v53, v53 :: v_dual_lshlrev_b32 v80, 16, v15
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v66, v80, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v67, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v144, v64, vcc_lo
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v64, v67, 16, 1
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v51, v146, v64 :: v_dual_lshlrev_b32 v80, 16, v15
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v103
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v64, v64, v67, 0x7fff
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v67, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v103, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v65, v103, vcc_lo
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v65, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v53, v65 :: v_dual_and_b32 v68, 0xffff0000, v0
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v68, v53, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v67, v70, v69, vcc_lo
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v66, v55, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v66, v64 :: v_dual_lshlrev_b32 v55, 16, v55
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v64
-; GFX12-FAKE16-NEXT:    v_add3_u32 v66, v68, v53, 0x7fff
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v53
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v69, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v69, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v55, v55, v55
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v82
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v67
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v64
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v68, v55, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v71, v65, v82 :: v_dual_lshlrev_b32 v70, 16, v55
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v65, v82, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v66, v68, v55, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v66, v68, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v68, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v69, v70
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v55, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v64, v67, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v15, v50 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v15, v50, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v15, 16, v50
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v113
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v55
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v66, v66, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v71, v113 :: v_dual_lshlrev_b32 v80, 16, v68
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v15, 0xffff0000, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v68, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v68 :: v_dual_and_b32 v15, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v84
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v70, v84, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v70, v84, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v15
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v15, v65, v69, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v54
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v14
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v64, v54, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v80, v71
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v66, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v80, 16, v30
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v50, v68 :: v_dual_lshlrev_b32 v80, 16, v30
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v50, v68, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
+; GFX12-FAKE16-NEXT:    v_add3_u32 v54, v54, v66, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v14, v30, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v65, v65, v65 :: v_dual_max_num_f32 v66, v66, v66
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v64, v66, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v64, v64, v66, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v64, v71 :: v_dual_lshlrev_b32 v71, 16, v13
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v13
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v69, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v64
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v54, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v67
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v53, vcc_lo
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v54
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v69
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v65, 16, 1
+; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v67
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_lshlrev_b32 v66, 16, v30
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v54, v54, v65, 0x7fff
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_and_b32 v54, 0xffff0000, v54
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v70, v66
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v30, v69, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v71, v71
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v65, v65, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v13, v29, vcc_lo
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v55, v65, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v55, v55, v65, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v80, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v80, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v71
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v29, v71, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v55
+; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v67, v55, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v66, v13, v13 :: v_dual_cndmask_b32 v29, v67, v64
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v55
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v65
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v64
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v54
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v66, v13, v13
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v13, v53, v29 :: v_dual_lshlrev_b32 v70, 16, v71
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v13, v53, v29, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v68
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v68, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v70, v67
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v65, v71, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v50
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v55
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v29, v50, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v50, v53, v66, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v66
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v11
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_max_num_f32 v53, v55, v55
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_and_b32 v55, 0xffff0000, v55
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v28
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v54, v54 :: v_dual_lshlrev_b32 v54, 16, v28
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v50
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v29, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v29, v64, v29 :: v_dual_and_b32 v50, 0xffff0000, v50
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v64, v53, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v28, v28, v12, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v69
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v64, v64, v53, 0x7fff
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v28
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v69, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v55, v69, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v27
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -18580,8 +18564,8 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v12
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v64, v67 :: v_dual_lshlrev_b32 v68, 16, v28
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v64, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v69, v68
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v28, v12 :: v_dual_lshlrev_b32 v67, 16, v11
@@ -18590,70 +18574,74 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v27, v11, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v55, v30, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v30, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v53
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v64
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v66
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v71
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v54, v54, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v30, v71 :: v_dual_lshlrev_b32 v55, 16, v64
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v66
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v30, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v67, v64
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v11, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v50, v54, 16, 1
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v64
+; GFX12-FAKE16-NEXT:    v_add3_u32 v50, v50, v54, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v27, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v64, v64, v64 :: v_dual_cndmask_b32 v27, v55, v27
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v68, v65 :: v_dual_max_num_f32 v55, v55, v55
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v50, v55, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v55
-; GFX12-FAKE16-NEXT:    v_add3_u32 v50, v50, v55, 0x7fff
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v68, v65, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v10
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v10, v26, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v64, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v67 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v64, 16, v64
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v64, v64, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v68, 16, v9
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v55, v64, 16, 1
+; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v54, v64, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v54, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v55, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v50
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v12
-; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v55, v64, 0x7fff
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v65
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v12, v30, v12 :: v_dual_lshlrev_b32 v67, 16, v10
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v9
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v67, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v65, v10 :: v_dual_lshlrev_b32 v55, 16, v25
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v10, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v9, v25, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v9, v25 :: v_dual_lshlrev_b32 v54, 16, v54
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v25
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v28
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v54, v54, v54
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v53
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v25, v9, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v11
@@ -18665,82 +18653,78 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v30, v12, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v66
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v12, v33, v12, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v11, v66, vcc_lo
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v64, v55
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
+; GFX12-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v24
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v54, v54, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v25, v9, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v50, 16, v50
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v50, v50, v50 :: v_dual_lshlrev_b32 v55, 16, v8
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX12-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v64, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v30, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v24, v24, v8, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v11, v28, v11 :: v_dual_lshlrev_b32 v54, 16, v24
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v30
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v10
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
+; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v11, v34, v11, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v55, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v24, v8 :: v_dual_lshlrev_b32 v55, 16, v23
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v7, v7, v23 :: v_dual_lshlrev_b32 v50, 16, v50
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v50, v50, v50
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v7, v23, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v53, v66, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v54
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v23, v23, v7, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v65
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v53, v53 :: v_dual_and_b32 v50, 0xffff0000, v50
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v23
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v55, 16, v23
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v53, 16, v54
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v30
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v53, v53 :: v_dual_cndmask_b32 v10, v28, v10
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v9
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v28, v53, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v54, v9, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v64, v55
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v6
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v23, v7 :: v_dual_lshlrev_b32 v55, 16, v6
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v23, v7, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v25
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
@@ -18761,15 +18745,17 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v54, v9 :: v_dual_lshlrev_b32 v64, 16, v6
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v28, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v28
+; GFX12-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v9, v36, v9, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v22, v22, v6, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v8
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v50, v8 :: v_dual_lshlrev_b32 v55, 16, v22
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v21
@@ -18782,18 +18768,16 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v22, v6 :: v_dual_lshlrev_b32 v54, 16, v5
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v21, v21, v5, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v24
-; GFX12-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v8, v24 :: v_dual_lshlrev_b32 v53, 16, v21
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v24, 0xffff0000, v25
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v28
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v24
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v24, v30, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -18820,12 +18804,13 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v4
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v20, v20, v4, vcc_lo
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v24, v24
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v24, v30, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v23, v23, v50 :: v_dual_lshlrev_b32 v50, 16, v20
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v28
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v24, v24, v30, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v28, 0x400000, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -18838,6 +18823,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v20, v4, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v6
+; GFX12-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v25, v6, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
@@ -18863,8 +18849,8 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v5, v28, v5 :: v_dual_lshlrev_b32 v50, 16, v19
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v21
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v30, v30, v22, 0x7fff
-; GFX12-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v5, v5, v21, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v53, v50
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v17
@@ -18936,6 +18922,7 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v30, v25
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v21
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
+; GFX12-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v17, v1, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v24, v22
@@ -18974,12 +18961,11 @@ define <32 x bfloat> @v_maximumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v19, v50, v53, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v21
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v21, 16, v24
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v19
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v30, v3, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v2
-; GFX12-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v3, v52, v3, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v54, v2, vcc_lo
@@ -20431,9 +20417,9 @@ define <3 x bfloat> @v_maximumnum_v3bf16_no_ieee(<3 x bfloat> %x, <3 x bfloat> %
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v10, 16, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v5
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
@@ -20646,10 +20632,10 @@ define <3 x bfloat> @v_maximumnum_v3bf16_no_ieee(<3 x bfloat> %x, <3 x bfloat> %
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v5
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0, v0
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
index 0f1c1cf0d80af..ee52611fae5ca 100644
--- a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
+++ b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
@@ -13497,18 +13497,18 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
 ; ALIGNED-NEXT:    v_lshl_or_b32 v3, v18, 8, v20
 ; ALIGNED-NEXT:    v_lshl_or_b32 v4, v14, 8, v15
 ; ALIGNED-NEXT:    v_lshl_or_b32 v103, v4, 16, v3
-; ALIGNED-NEXT:    buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:7
 ; ALIGNED-NEXT:    v_lshl_or_b32 v4, v17, 8, v19
+; ALIGNED-NEXT:    buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:7
 ; ALIGNED-NEXT:    s_waitcnt vmcnt(4)
 ; ALIGNED-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:1292 ; 4-byte Folded Spill
 ; ALIGNED-NEXT:    s_waitcnt vmcnt(3)
 ; ALIGNED-NEXT:    buffer_store_dword v106, off, s[0:3], s32 offset:1296 ; 4-byte Folded Spill
 ; ALIGNED-NEXT:    s_waitcnt vmcnt(2)
 ; ALIGNED-NEXT:    buffer_store_dword v123, off, s[0:3], s32 offset:1304 ; 4-byte Folded Spill
-; ALIGNED-NEXT:    s_waitcnt vmcnt(1)
-; ALIGNED-NEXT:    buffer_store_dword v125, off, s[0:3], s32 offset:1308 ; 4-byte Folded Spill
 ; ALIGNED-NEXT:    v_lshl_or_b32 v86, v77, 16, v4
 ; ALIGNED-NEXT:    v_lshl_or_b32 v77, v11, 8, v12
+; ALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; ALIGNED-NEXT:    buffer_store_dword v125, off, s[0:3], s32 offset:1308 ; 4-byte Folded Spill
 ; ALIGNED-NEXT:    v_lshl_or_b32 v71, v91, 16, v77
 ; ALIGNED-NEXT:    v_lshl_or_b32 v77, v6, 8, v8
 ; ALIGNED-NEXT:    v_lshl_or_b32 v91, v7, 8, v5
diff --git a/llvm/test/CodeGen/AMDGPU/minimumnum.bf16.ll b/llvm/test/CodeGen/AMDGPU/minimumnum.bf16.ll
index 1d3f163c36698..b78a655c4f2e4 100644
--- a/llvm/test/CodeGen/AMDGPU/minimumnum.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/minimumnum.bf16.ll
@@ -1909,9 +1909,9 @@ define <3 x bfloat> @v_minimumnum_v3bf16(<3 x bfloat> %x, <3 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v10, 16, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v5
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
@@ -2124,10 +2124,10 @@ define <3 x bfloat> @v_minimumnum_v3bf16(<3 x bfloat> %x, <3 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v5
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v0
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
@@ -5068,8 +5068,8 @@ define <6 x bfloat> @v_minimumnum_v6bf16(<6 x bfloat> %x, <6 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v15, v8, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v8, v10, v13, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v1
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v1, v1, v4 :: v_dual_lshlrev_b32 v16, 16, v2
@@ -5464,8 +5464,8 @@ define <6 x bfloat> @v_minimumnum_v6bf16(<6 x bfloat> %x, <6 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v15, v8, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v8, v10, v13, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v1
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, 16, v5
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v10, v10
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -10200,9 +10200,9 @@ define <16 x bfloat> @v_minimumnum_v16bf16(<16 x bfloat> %x, <16 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v12
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc_lo
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v25, v25
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v5, v18, v5, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v2 :: v_dual_lshlrev_b32 v25, 16, v1
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -11278,9 +11278,9 @@ define <16 x bfloat> @v_minimumnum_v16bf16(<16 x bfloat> %x, <16 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v12
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc_lo
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v25, v25
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v5, v18, v5, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v2 :: v_dual_lshlrev_b32 v25, 16, v1
@@ -15227,255 +15227,254 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v134, 16, v134
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v7
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v22
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s0, v15, v15
-; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s44, v112, v134
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s3, v33, v33
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s44, v112, v134
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v13.h, v29.h, s2
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.l, v10.h, v26.h, s8
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v71, 0xffff0000, v3
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s14, v55, v55
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s17, v66, v66
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v55.l, v29.h, v15.l, s3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v9
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v83, 0xffff0000, v1
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s9, v39, v39
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v39.l, v4.h, v20.h, s20
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v71, v71
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v85.l, v15.l
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s9, v39, v39
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v39.l, v4.h, v20.h, s20
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v66.l, v26.h, v33.l, s9
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v80.l, v20.h, v39.l, s21
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v102.l, v39.l
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v118.l, v66.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v132.l, v80.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v48.l, v3.h, v19.h, s22
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v115, 16, v115
-; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s42, v102, v132
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v118, 16, v118
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s42, v102, v132
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v81.l, v19.h, v48.l, s23
-; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v102.l, v80.l, v39.l, s42
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s23, v85, v115
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v102.l, v80.l, v39.l, s42
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v34, 0xffff0000, v12
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
+; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v8
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
-; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s41, v101, v131
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v102, v102, v102
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v102, v102, v102 :: v_dual_lshlrev_b32 v101, 16, v101
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s12, v53, v53
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v53.l, v1.h, v17.h, s26
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s26, v96, v118
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
 ; GFX11-TRUE16-NEXT:    v_max_f32_e32 v85, v85, v85
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.l, v49.h, v30.h, s0
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v84, 0xffff0000, v17
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s41, v101, v131
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v30
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s4, v34, v34
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_dual_max_f32 v96, v96, v96 :: v_dual_lshlrev_b32 v101, 16, v101
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
+; GFX11-TRUE16-NEXT:    v_max_f32_e32 v96, v96, v96
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s1, v31, v31
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v64, 0xffff0000, v23
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v54.l, v30.h, v14.l, s1
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v101, v101, v101
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v114.l, v54.l
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s15, v64, v64
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v114, 16, v114
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s16, v65, v65
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
-; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s22, v84, v114
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
-; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s24, v86, v116
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v97.l, v34.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v68.l, v24.h, v35.l, s13
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v119.l, v67.l
-; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s25, v87, v117
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v84, v84, v84
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s22, v84, v114
+; GFX11-TRUE16-NEXT:    v_max_f32_e32 v101, v101, v101
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v37.l, v6.h, v22.h, s16
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v98.l, v35.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v69.l, v23.h, v36.l, s15
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v128.l, v68.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v119, 16, v119
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v99.l, v36.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v70.l, v22.h, v37.l, s17
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v83.l, v17.h, v53.l, s27
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v129.l, v69.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v128, 16, v128
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s27, v97, v119
-; GFX11-TRUE16-NEXT:    v_dual_max_f32 v86, v86, v86 :: v_dual_lshlrev_b32 v87, 16, v87
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v100.l, v37.l
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v99, 16, v99
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v70.l
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v129, 16, v129
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s28, v98, v128
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v97.l, v67.l, v34.l, s27
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v87, v87, v87
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v85, v85
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v84, v84, v84 :: v_dual_lshlrev_b32 v87, 16, v87
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v130
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s24, v86, v116
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s29, v99, v129
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v98.l, v68.l, v35.l, s28
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s22
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v86, v86
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v103.l, v48.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v133.l, v81.l
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s25, v87, v117
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s40, v100, v130
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v99.l, v69.l, v36.l, s29
 ; GFX11-TRUE16-NEXT:    v_dual_max_f32 v97, v97, v97 :: v_dual_lshlrev_b32 v98, 16, v98
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v130, v96, 16, 1
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s22
-; GFX11-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v87, v87
+; GFX11-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v133, 16, v133
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v100.l, v70.l, v37.l, s40
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-TRUE16-NEXT:    v_dual_max_f32 v98, v98, v98 :: v_dual_lshlrev_b32 v99, 16, v99
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v131, 0x400000, v96
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v132, v97, 16, 1
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v130, v130, v96, 0x7fff
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v96, v96
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s43, v103, v133
-; GFX11-TRUE16-NEXT:    v_dual_max_f32 v99, v99, v99 :: v_dual_lshlrev_b32 v100, 16, v100
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v86, v86, v86 :: v_dual_max_f32 v99, v99, v99
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v133, 0x400000, v97
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v134, v98, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v132, v132, v97, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v96, v130, v131, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v97, v97
-; GFX11-TRUE16-NEXT:    v_max_f32_e32 v100, v100, v100
+; GFX11-TRUE16-NEXT:    v_dual_max_f32 v87, v87, v87 :: v_dual_max_f32 v100, v100, v100
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v144, 0x400000, v98
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v145, v99, 16, 1
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v134, v134, v98, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v97, v132, v133, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v98, v98
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v146, 0x400000, v99
 ; GFX11-TRUE16-NEXT:    v_bfe_u32 v147, v100, 16, 1
+; GFX11-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s23, v85, v85
+; GFX11-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s24, v86, v86
 ; GFX11-TRUE16-NEXT:    v_add3_u32 v145, v145, v99, 0x7fff
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v98, v134, v144, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v99, v99
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v115, v101, 16, 1
-; GFX11-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s1, 0x8000, v15.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s5, 0x8000, v31.l
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
+; GFX11-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s25, v87, v87
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s23
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s24
+; GFX11-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v99, v145, v146, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v100, v100
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v85, 0x400000, v101
-; GFX11-TRUE16-NEXT:    v_add3_u32 v115, v115, v101, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s2, 0x8000, v32.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s25
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.h, v116.h, v15.l, s1
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v118.h, v31.l, s5
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s7, 0x8000, v34.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s6, 0x8000, v33.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s3, 0x8000, v35.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s8, 0x8000, v36.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.h, v97.h, v34.l, s7
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0x8000, v37.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.h, v96.h, v33.l, s6
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v85, v101, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.l, v98.h, v35.l, s3
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.h, v99.h, v36.l, s8
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0x8000, v37.l
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v101
+; GFX11-TRUE16-NEXT:    v_add3_u32 v85, v85, v101, 0x7fff
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s0, 0x8000, v14.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s9, 0x8000, v38.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v115, v85, s22
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s11, 0x8000, v54.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s12, 0x8000, v55.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v85, v86, s22
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.l, v114.h, v14.l, s0
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v117, v102, 16, 1
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v87, v102, 16, 1
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s13, 0x8000, v64.l
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v102
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v102
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v102, v102
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v14.l, v14.l, v54.l, s11
-; GFX11-TRUE16-NEXT:    v_add3_u32 v117, v117, v102, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add3_u32 v87, v87, v102, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0x8000, v39.l
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v114
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v15.l, v64.l, s13
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s14, 0x8000, v65.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v117, v86, s22
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v128
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v128
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v87, v115, s22
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v87, 0xffff0000, v114
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v113.l, v53.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s15, 0x8000, v66.l
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v135.l, v83.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v103.l, v81.l, v48.l, s43
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v116
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v116
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.h, v15.h, v65.l, s14
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v115
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v102
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s20, 0x8000, v71.l
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v113, 16, v113
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v102
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v101
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s16, 0x8000, v67.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s17, 0x8000, v68.l
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v131, 0xffff0000, v98
@@ -15484,30 +15483,30 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v132, 0xffff0000, v84
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v33.l, v33.l, v68.l, s17
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s5, 0, v131
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v119, v103, 16, 1
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v87, 0x400000, v103
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v117, v103, 16, 1
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v103
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s7, 0, v132
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v96
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v96
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s21, 0x8000, v80.l
-; GFX11-TRUE16-NEXT:    v_add3_u32 v119, v119, v103, 0x7fff
+; GFX11-TRUE16-NEXT:    v_add3_u32 v117, v117, v103, 0x7fff
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v129, 0xffff0000, v97
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s18, 0x8000, v69.l
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v117
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v115
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v85
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s8, 0, v133
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v51
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v51
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v31.l, v50.h, v51.h, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v101, v101
+; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v34.l, v31.l
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v118
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v118
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v32.l, v51.h, v31.l, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v35, 16, v34
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v84.h, v37.l, s4
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.h, v85.h, v38.l, s9
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v101
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v100
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v32.l
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s4, 0, v129
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v34.l, v34.l, v70.l, s19
@@ -15520,7 +15519,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v86
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0x8000, v31.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.l, v32.l, v31.l, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v87
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v36.h, v118.h, v15.l, s1
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_f32_e64 s9, 0, v39
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v35.l, v35.l, v80.l, s21
@@ -15565,7 +15564,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v135
 ; GFX11-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v103, v103
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f32_e64 s0, v113, v54
-; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v119, v87 :: v_dual_and_b32 v54, 0xffff0000, v0
+; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v117, v119 :: v_dual_and_b32 v54, 0xffff0000, v0
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v48.l
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-TRUE16-NEXT:    v_cndmask_b16 v15.l, v83.l, v53.l, s0
@@ -16076,13 +16075,13 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v80, 16, v9
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v13
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v15
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v38, 16, v29
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v39, 16, v13
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v35, v33, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v84, 16, v8
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v49, 0xffff0000, v12
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v34, v35, v33 :: v_dual_and_b32 v49, 0xffff0000, v12
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v29
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v11
@@ -16093,42 +16092,41 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v67, 16, v26
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v68, 16, v10
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v10
-; GFX11-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v52, v51, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v30
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v82, 0xffff0000, v8
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v96, 16, v7
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v39, v64, v55 :: v_dual_and_b32 v70, 0xffff0000, v9
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v6
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v103, 16, v21
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v112, 16, v5
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v68, v67, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v86, 0xffff0000, v7
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v4
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v19
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v80, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v82, v82
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v6
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v18
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v84, v83, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v28
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v5
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v1
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v96, v87, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v26
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v3
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v100, v99, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v4
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v102, v102
@@ -16215,516 +16213,517 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v48, v48, v48
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v53, v117
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e64 s1, v48, v48
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v55, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v65, v118
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v66
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v118, v48, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v67, v49, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v69, v119
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v70
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_lshlrev_b32 v102, 16, v80
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v119, 0x400000, v48
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v71, v52, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v81, v128
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v68
-; GFX11-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v128, v53, 16, 1
+; GFX11-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v83, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v85, v129
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v82
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v53
-; GFX11-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v69, v69, v69
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v81, 16, v81
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v87, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e64 s2, v53, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s1
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v81, v81, v81
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s2
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v144, 0x400000, v81
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v97
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v85, v85, v85
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v96, v130
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v146, v85, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v147, 0x400000, v85
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v96, v97, v66, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v98, v131
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v146, v146, v85, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v101
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v98, v99, v68, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v100, v132
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v96, v96, v96
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v100, v101, v70, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v102, v133
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
+; GFX11-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v98, v98, v98
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v102, v103, v80, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v112, v134
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v134, v81, 16, 1
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v100, v100, v100
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v98, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v98
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v112, v113, v82, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v86, v96, 16, 1
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v134, v134, v81, 0x7fff
+; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v98, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v114, v114, v115, vcc_lo
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v115, 0x400000, v96
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v86, v86, v96, 0x7fff
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v69, v69, v69
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
-; GFX11-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v36, v36, v36
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v36, v98, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v116, v116, v117, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v48, v48
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v98
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v48, v100, 16, 1
-; GFX11-FAKE16-NEXT:    v_add3_u32 v36, v36, v98, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v118, v118, v119 :: v_dual_max_f32 v65, v65, v65
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v119, 0x400000, v100
-; GFX11-FAKE16-NEXT:    v_add3_u32 v48, v48, v100, 0x7fff
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v65, v65, v65
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v130, v65, 16, 1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v131, 0x400000, v65
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v128, v128, v129, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v130, v130, v65, 0x7fff
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v130, v131, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v132, v133, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v132, v133 :: v_dual_lshlrev_b32 v36, 16, v36
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v36, v36, v36
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v134, v144, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e64 s0, v36, v36
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v130, 16, v81
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX11-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v96
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v146, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v146, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v100, v100, v100
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s0
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v85
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v86, v86, v115, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v86, v36, vcc_lo
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v115, v100, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v118
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v114
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v86
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v117, vcc_lo
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v100
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v86, 16, v114
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v116
+; GFX11-FAKE16-NEXT:    v_add3_u32 v115, v115, v100, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v48, v53, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v116
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v117, 16, v128
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v133, 16, v36
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v96, v48, v119, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v48, 16, v114
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v36
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v116
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v128
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v36
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v115, v117, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v34
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v65
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v134, 0xffff0000, v36
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v116
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v116, 0xffff0000, v118
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v48, v34, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v118
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v134, 16, v48
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v146, 16, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v86, v34, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v35
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v128
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v96
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v100, v35, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v96, 0xffff0000, v114
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v114, 16, v118
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v118, 16, v65
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v98, v35, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v37
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v115, v37, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v128
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v144, 0xffff0000, v48
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v114, v37, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v39
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v117, v39, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v116, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v49
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v119, v49, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v118, v49, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v52
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v128, v52, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v128, v52, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v54
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v130, v54 :: v_dual_and_b32 v65, 0xffff0000, v65
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v52, 16, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v130, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v64
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v131, v64, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v131, v64, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v147, v32, v54, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v147, v32, v52 :: v_dual_and_b32 v64, 0xffff0000, v50
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v66
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v102, v102, v102
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v132, v66, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v68
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v32, v133, v68 :: v_dual_and_b32 v81, 0xffff0000, v81
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v146, v146
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v54, v147 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v65
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v119, v102, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v134, v68, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
+; GFX11-FAKE16-NEXT:    v_add3_u32 v119, v119, v102, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v52, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v70
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v54
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v144, v70, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v81, 0xffff0000, v81
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v52
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v146, v70, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v33
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v0
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v34, v33, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v38
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v147
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v102, v102, v102
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v35, v38, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v35, v35, v38 :: v_dual_lshlrev_b32 v34, 16, v147
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v51
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v102, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v51, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v34, v68
-; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v102, 0x7fff
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v54, v147, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v36, v36, v51 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v34, v66
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v52, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v55
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v37, v37, v55 :: v_dual_lshlrev_b32 v34, 16, v34
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v37, v55, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v67
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v34, v34, v34
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v39, v67, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v71
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v55, v34, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v49, v71, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v48, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v83
-; GFX11-FAKE16-NEXT:    v_add3_u32 v55, v55, v34, 0x7fff
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v52, v83, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v49, v83, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v87
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v64, v87, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v49, v54, v87 :: v_dual_lshlrev_b32 v34, 16, v34
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v97
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v31, v97, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v31, v97, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v99
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v32, v99, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v98
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v48, v33, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v114
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v48, 0x400000, v34
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v100, v35, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v116
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v33, v115, v36 :: v_dual_and_b32 v86, 0xffff0000, v86
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v34, v34, v34 :: v_dual_cndmask_b32 v55, v32, v99
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v96
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v34, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v34
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v86, v33, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v54, v54, v34, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v98, v35, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v115
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v114, v36, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v55, v48, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v118
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v48
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v117, v37, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v66, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v117
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v116, v37, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v96
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v119, v38, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v54
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v118, v38, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v69
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v69, 16, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v128, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v81
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v49, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v48, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e64 vcc_lo, 0x8000, v147
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v147, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v85
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v51, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v54
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v51, 0xffff0000, v48
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v54, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v86
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v132, v52, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v134
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v133, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v51
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v51, 16, v112
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v39, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v49, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v52
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v133
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v48, v132, v51 :: v_dual_lshlrev_b32 v51, 16, v112
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v144
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v134, v55, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v84
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v39, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v145, v145
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v67, v51, v51
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v135, v84, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v102, v102
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v53, v129 :: v_dual_lshlrev_b32 v55, 16, v54
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v84
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v119, v129, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v101
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v101, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v53, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v54
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v67, v51, v51
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v66, 16, v52
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v101, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v55, v65
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v52, 0xffff0000, v52
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v54, v84, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v84, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v80
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v80, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_lshlrev_b32 v80, 16, v15
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v144, v64, vcc_lo
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v64, v67, 16, 1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v66, v80, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v67, 16, 1
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v51, v146, v64 :: v_dual_lshlrev_b32 v80, 16, v15
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v103
-; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v64, v67, 0x7fff
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v103, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v67, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v65, v103, vcc_lo
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v65, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v53, v65 :: v_dual_and_b32 v68, 0xffff0000, v0
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v68, v53, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v67, v70, v69, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v52
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v66, v55, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v66, v64 :: v_dual_lshlrev_b32 v55, 16, v55
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v64
-; GFX11-FAKE16-NEXT:    v_add3_u32 v66, v68, v53, 0x7fff
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v69, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v53
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v69, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v55, v55, v55
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v82
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v71, v65, v82 :: v_dual_lshlrev_b32 v70, 16, v55
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v66, v68, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v68, v55, 16, 1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v65, v82, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX11-FAKE16-NEXT:    v_add3_u32 v66, v68, v55, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v68, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v69, v70
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v53
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v55, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v64, v67, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v15, v50 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v15, v50, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v15, 16, v50
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v113
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v55
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v66, v66, v66
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v71, v113 :: v_dual_lshlrev_b32 v80, 16, v68
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v15, 0xffff0000, v64
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v68, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v68 :: v_dual_and_b32 v15, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v84
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v70, v84, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v70, v84, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v15
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v15, v65, v69, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v54
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v14
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v64, v54, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v80, v71
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v50, v68 :: v_dual_lshlrev_b32 v80, 16, v30
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v66, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v80, 16, v30
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v55
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v50, v68, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
+; GFX11-FAKE16-NEXT:    v_add3_u32 v54, v54, v66, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v14, v30, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v65, v65, v65 :: v_dual_max_f32 v66, v66, v66
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v64, v66, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v64, v66, 0x7fff
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v64, v71 :: v_dual_lshlrev_b32 v71, 16, v13
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v13
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v69, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v64
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v54, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v67
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v30
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v53, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v54
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v69
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v65, 16, 1
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_lshlrev_b32 v66, 16, v30
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v54, v54, v65, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v67
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_and_b32 v54, 0xffff0000, v54
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v70, v66
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v30, v69, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v71, v71
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v65, v65, v65
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v13, v29, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v55, v65, 16, 1
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v80, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v55, v55, v65, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v80, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v71
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v29, v71, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v55
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v67, v55, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v66, v13, v13 :: v_dual_cndmask_b32 v29, v67, v64
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v55
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v65
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v64
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v54
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v66, v13, v13
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v13, v53, v29 :: v_dual_lshlrev_b32 v70, 16, v71
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v13, v53, v29, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v68
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v68, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v70, v67
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v65, v71, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v50
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v55
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v29, v50, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v50, v53, v66, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v66
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v11
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_max_f32 v53, v55, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_and_b32 v55, 0xffff0000, v55
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v28
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v54, v54 :: v_dual_lshlrev_b32 v54, 16, v28
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v29, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v50
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v29, v64, v29 :: v_dual_and_b32 v50, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v64, v53, 16, 1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v28, v28, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v69
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v64, v64, v53, 0x7fff
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v69, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v28
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v55, v69, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v27
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v11, v27, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v12
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v64, v67 :: v_dual_lshlrev_b32 v68, 16, v28
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v64, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v69, v68
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v28, v12 :: v_dual_lshlrev_b32 v67, 16, v11
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v27, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v30
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v55, v30, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v30, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v53
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v71
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v30, v71 :: v_dual_lshlrev_b32 v55, 16, v64
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v64
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v66
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v71
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v54, v54, v54
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v30, v71, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v67, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v27, vcc_lo
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v50, v54, 16, 1
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v64
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v50, v50, v54, 0x7fff
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v64, v64, v64 :: v_dual_cndmask_b32 v27, v55, v27
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v65
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v68, v65 :: v_dual_max_f32 v55, v55, v55
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v50, v55, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v55
-; GFX11-FAKE16-NEXT:    v_add3_u32 v50, v50, v55, 0x7fff
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v68, v65, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v10
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v10, v26, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v67, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v54, v64, 16, 1
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v67 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v64, 16, v64
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v64, v64, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v68, 16, v9
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v55, v64, 16, 1
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v54, vcc_lo
+; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v54, v64, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v55, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v50
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v12
-; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v55, v64, 0x7fff
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v65
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v12, v30, v12 :: v_dual_lshlrev_b32 v67, 16, v10
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v9
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v67, v55
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v65, v10 :: v_dual_lshlrev_b32 v55, 16, v25
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v10, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v9, v25, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v9, v25 :: v_dual_lshlrev_b32 v54, 16, v54
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v25
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v28
+; GFX11-FAKE16-NEXT:    v_max_f32_e32 v54, v54, v54
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v53
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v25, v9, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v11
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v25
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v28, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v30, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v66
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v12, v33, v12, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v11, v66, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v64, v55
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v24
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v54, v54, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v25, v9, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v50, 16, v50
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v50, v50, v50 :: v_dual_lshlrev_b32 v55, 16, v8
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v64, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v30, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v24, v24, v8, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v11, v28, v11 :: v_dual_lshlrev_b32 v54, 16, v24
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v30
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v10
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v11, v34, v11, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v55, v54
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v24, v8 :: v_dual_lshlrev_b32 v55, 16, v23
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v7, v7, v23 :: v_dual_lshlrev_b32 v50, 16, v50
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_max_f32_e32 v50, v50, v50
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v7, v23, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v53, v66, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v54
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v23, v23, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v65
-; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_and_b32 v50, 0xffff0000, v50
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v55, 16, v23
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v23
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v53, 16, v54
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v30
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_dual_max_f32 v53, v53, v53 :: v_dual_cndmask_b32 v10, v28, v10
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v9
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v28, v53, 16, 1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v54, v9, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v64, v55
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v23, v7 :: v_dual_lshlrev_b32 v55, 16, v6
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v23, v7, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v25
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v9, v25 :: v_dual_lshlrev_b32 v30, 16, v30
@@ -16739,16 +16738,17 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v50, 16, v25
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v53, v28, 16, 1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v54, v9 :: v_dual_lshlrev_b32 v64, 16, v6
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v53, v53, v28, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v28
+; GFX11-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v9, v36, v9, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v22, v22, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v50, v8 :: v_dual_lshlrev_b32 v55, 16, v22
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v21
@@ -16762,12 +16762,10 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v53
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v21, v21, v5, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v24
-; GFX11-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v8, v24 :: v_dual_lshlrev_b32 v53, 16, v21
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v24, 0xffff0000, v25
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v28
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v24
 ; GFX11-FAKE16-NEXT:    v_max_f32_e32 v24, v30, v30
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v50, v8, vcc_lo
@@ -16785,14 +16783,15 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v20, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v23, v23, v24, 0x7fff
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v4
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v20, v20, v4, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v24, v24
 ; GFX11-FAKE16-NEXT:    v_bfe_u32 v24, v30, 16, 1
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v23, v23, v50 :: v_dual_lshlrev_b32 v50, 16, v20
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v28
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v24, v24, v30, 0x7fff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v28, 0x400000, v30
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v25, v7, vcc_lo
@@ -16803,6 +16802,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v7, v38, v7, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v20, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v6
+; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v25, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v19
@@ -16822,19 +16822,18 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v22
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v5, v28, v5 :: v_dual_lshlrev_b32 v50, 16, v19
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v21
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_add3_u32 v30, v30, v22, 0x7fff
-; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v5, v5, v21, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v53, v50
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v17
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v21, v19, v3, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v22, v30, v54 :: v_dual_lshlrev_b32 v21, 16, v21
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v23
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_dual_max_f32 v21, v21, v21 :: v_dual_lshlrev_b32 v30, 16, v0
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v23, 16, v22
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v25, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v24
@@ -16882,6 +16881,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v30, v25
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v21
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v17, v1, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v24, v22
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
@@ -16915,11 +16915,10 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v19, v50, v53, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v21
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v21, 16, v24
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v19
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v30, v3, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v2
-; GFX11-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v3, v52, v3, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v54, v2, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v1
@@ -16992,277 +16991,273 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v134, 16, v134
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v7
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v22
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s0, v15, v15
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s44, v112, v134
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s3, v33, v33
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s44, v112, v134
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v13.h, v29.h, s2
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.l, v10.h, v26.h, s8
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v71, 0xffff0000, v3
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s14, v55, v55
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v112.l, v82.l, v52.l, s44
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s17, v66, v66
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v55.l, v29.h, v15.l, s3
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v9
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v67, 0xffff0000, v5
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v112
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v39, 0xffff0000, v26
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v83, 0xffff0000, v1
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v71, v71
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v85.l, v15.l
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s9, v39, v39
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v39.l, v4.h, v20.h, s20
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v71, v71
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v21
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v85.l, v15.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v66.l, v26.h, v33.l, s9
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v80.l, v20.h, v39.l, s21
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v102.l, v39.l
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v96.l, v33.l
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v115.l, v55.l
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s18, v67, v67
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v118.l, v66.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v132.l, v80.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s10, v48, v48
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s26, v83, v83
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v48.l, v3.h, v19.h, s22
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v132, 16, v132
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v115, 16, v115
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s42, v102, v132
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v118, 16, v118
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s19, v68, v68
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v38.l, v5.h, v21.h, s18
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s42, v102, v132
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v81.l, v19.h, v48.l, s23
-; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s23, v85, v115
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v71.l, v21.h, v38.l, s19
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v102.l, v80.l, v39.l, s42
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s23, v85, v115
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v101.l, v38.l
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v34, 0xffff0000, v12
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
+; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v131.l, v71.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v8
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v85.l, v55.l, v15.l, s23
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v24
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s41, v101, v131
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v102, v102, v102
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
+; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v102, v102, v102 :: v_dual_lshlrev_b32 v101, 16, v101
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s12, v53, v53
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v53.l, v1.h, v17.h, s26
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s26, v96, v118
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v14, 0xffff0000, v50
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v131, 16, v131
 ; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v85, v85, v85
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v14, v14
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.l, v49.h, v30.h, s0
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v96.l, v66.l, v33.l, s26
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v84, 0xffff0000, v17
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s41, v101, v131
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff0000, v28
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v96, 16, v96
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v30
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s4, v34, v34
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s13, v54, v54
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v96, v96, v96 :: v_dual_lshlrev_b32 v101, 16, v101
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
+; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v96, v96, v96
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s1, v31, v31
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v64, 0xffff0000, v23
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v101.l, v71.l, v38.l, s41
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v64, 0xffff0000, v23
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v54.l, v30.h, v14.l, s1
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s27, v84, v84
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v84.l, v14.l
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v101, v101, v101
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s5, v35, v35
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v114.l, v54.l
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v6
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v101, 16, v101
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v11
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v27
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s15, v64, v64
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v114, 16, v114
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s16, v65, v65
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s22, v84, v114
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v9.h, v25.h, s10
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v35.l, v8.h, v24.h, s12
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v67.l, v25.h, v34.l, s11
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s24, v86, v116
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.l, v12.h, v28.h, s4
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v97.l, v34.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v68.l, v24.h, v35.l, s13
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v119.l, v67.l
-; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s25, v87, v117
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s22, v84, v114
+; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v101, v101, v101
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s6, v36, v36
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.l, v7.h, v23.h, s14
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s7, v37, v37
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v84, v84, v84
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v37.l, v6.h, v22.h, s16
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v64.l, v28.h, v31.l, s5
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.l, v11.h, v27.h, s6
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v98.l, v35.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v69.l, v23.h, v36.l, s15
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v128.l, v68.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v119, 16, v119
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v84.l, v54.l, v14.l, s22
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v86.l, v31.l
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v65.l, v27.h, v32.l, s7
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v99.l, v36.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v70.l, v22.h, v37.l, s17
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v83.l, v17.h, v53.l, s27
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v116.l, v64.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v129.l, v69.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v128, 16, v128
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s27, v97, v119
-; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v86, v86, v86 :: v_dual_lshlrev_b32 v87, 16, v87
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v84, 16, v84
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v87.l, v32.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v100.l, v37.l
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
+; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v117.l, v65.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v99, 16, v99
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v70.l
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v116, 16, v116
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v129, 16, v129
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s28, v98, v128
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v97.l, v67.l, v34.l, s27
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v87, v87, v87
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v85, v85
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v84, v84, v84 :: v_dual_lshlrev_b32 v87, 16, v87
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v117, 16, v117
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v130
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s24, v86, v116
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s29, v99, v129
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v98.l, v68.l, v35.l, s28
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v97, 16, v97
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s22
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v86, v86
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v114, v84, 16, 1
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v103.l, v48.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v133.l, v81.l
+; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s25, v87, v117
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s40, v100, v130
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v86.l, v64.l, v31.l, s24
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v99.l, v69.l, v36.l, s29
 ; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v97, v97, v97 :: v_dual_lshlrev_b32 v98, 16, v98
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v84
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v130, v96, 16, 1
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s22
-; GFX12-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v87, v87
+; GFX12-TRUE16-NEXT:    v_add3_u32 v114, v114, v84, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v84, v84
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v133, 16, v133
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v87.l, v65.l, v32.l, s25
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v100.l, v70.l, v37.l, s40
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v98, v98, v98 :: v_dual_lshlrev_b32 v99, 16, v99
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v131, 0x400000, v96
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v132, v97, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v130, v130, v96, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s22
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v114, v114, v115, s22
+; GFX12-TRUE16-NEXT:    v_add3_u32 v130, v130, v96, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v96, v96
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s43, v103, v133
-; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v99, v99, v99 :: v_dual_lshlrev_b32 v100, 16, v100
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v87, 16, v87
+; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v86, v86, v86 :: v_dual_max_num_f32 v99, v99, v99
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v133, 0x400000, v97
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v134, v98, 16, 1
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v132, v132, v97, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v96, v130, v131, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v97, v97
-; GFX12-TRUE16-NEXT:    v_max_num_f32_e32 v100, v100, v100
+; GFX12-TRUE16-NEXT:    v_dual_max_num_f32 v87, v87, v87 :: v_dual_max_num_f32 v100, v100, v100
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v116, v85, 16, 1
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v118, v86, 16, 1
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v144, 0x400000, v98
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v145, v99, 16, 1
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v134, v134, v98, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v97, v132, v133, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v98, v98
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v117, 0x400000, v85
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v86
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v128, v87, 16, 1
 ; GFX12-TRUE16-NEXT:    v_or_b32_e32 v146, 0x400000, v99
 ; GFX12-TRUE16-NEXT:    v_bfe_u32 v147, v100, 16, 1
+; GFX12-TRUE16-NEXT:    v_add3_u32 v116, v116, v85, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s23, v85, v85
+; GFX12-TRUE16-NEXT:    v_add3_u32 v118, v118, v86, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s24, v86, v86
 ; GFX12-TRUE16-NEXT:    v_add3_u32 v145, v145, v99, 0x7fff
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v98, v134, v144, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v99, v99
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v115, v101, 16, 1
-; GFX12-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s1, 0x8000, v15.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s5, 0x8000, v31.l
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v129, 0x400000, v87
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v84, 0x400000, v100
+; GFX12-TRUE16-NEXT:    v_add3_u32 v128, v128, v87, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s25, v87, v87
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s23
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s24
+; GFX12-TRUE16-NEXT:    v_add3_u32 v147, v147, v100, 0x7fff
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v99, v145, v146, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v100, v100
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v85, 0x400000, v101
-; GFX12-TRUE16-NEXT:    v_add3_u32 v115, v115, v101, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s2, 0x8000, v32.l
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s25
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.h, v116.h, v15.l, s1
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v118.h, v31.l, s5
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s7, 0x8000, v34.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s6, 0x8000, v33.l
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.h, v128.h, v32.l, s2
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s3, 0x8000, v35.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s8, 0x8000, v36.l
-; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.h, v97.h, v34.l, s7
-; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0x8000, v37.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.h, v96.h, v33.l, s6
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v85, v101, 16, 1
+; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.l, v98.h, v35.l, s3
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.h, v99.h, v36.l, s8
+; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s4, 0x8000, v37.l
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v101
+; GFX12-TRUE16-NEXT:    v_add3_u32 v85, v85, v101, 0x7fff
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v84, v147, v84, s22
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v101, v101
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s0, 0x8000, v14.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s9, 0x8000, v38.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v115, v85, s22
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s11, 0x8000, v54.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s12, 0x8000, v55.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v85, v85, v86, s22
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.l, v114.h, v14.l, s0
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v117, v102, 16, 1
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v87, v102, 16, 1
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s13, 0x8000, v64.l
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v86, 0x400000, v102
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v115, 0x400000, v102
 ; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e64 s22, v102, v102
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v14.l, v14.l, v54.l, s11
-; GFX12-TRUE16-NEXT:    v_add3_u32 v117, v117, v102, 0x7fff
+; GFX12-TRUE16-NEXT:    v_add3_u32 v87, v87, v102, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0x8000, v39.l
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v114
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v15.l, v64.l, s13
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s14, 0x8000, v65.l
-; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v117, v86, s22
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v128
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v128
+; GFX12-TRUE16-NEXT:    v_cndmask_b32_e64 v86, v87, v115, s22
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v87, 0xffff0000, v114
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v113.l, v53.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s15, 0x8000, v66.l
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v135.l, v83.l
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v103.l, v81.l, v48.l, s43
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v116
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v116
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.h, v15.h, v65.l, s14
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v115
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s2, 0, v102
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s20, 0x8000, v71.l
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v113, 16, v113
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v103, 16, v103
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v102
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s0, 0, v101
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s16, 0x8000, v67.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s17, 0x8000, v68.l
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v131, 0xffff0000, v98
@@ -17272,32 +17267,32 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v33.l, v33.l, v68.l, s17
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s5, 0, v131
-; GFX12-TRUE16-NEXT:    v_bfe_u32 v119, v103, 16, 1
-; GFX12-TRUE16-NEXT:    v_or_b32_e32 v87, 0x400000, v103
+; GFX12-TRUE16-NEXT:    v_bfe_u32 v117, v103, 16, 1
+; GFX12-TRUE16-NEXT:    v_or_b32_e32 v119, 0x400000, v103
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s7, 0, v132
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v96
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v96
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s21, 0x8000, v80.l
-; GFX12-TRUE16-NEXT:    v_add3_u32 v119, v119, v103, 0x7fff
+; GFX12-TRUE16-NEXT:    v_add3_u32 v117, v117, v103, 0x7fff
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v129, 0xffff0000, v97
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s18, 0x8000, v69.l
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v117
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s3, 0, v115
 ; GFX12-TRUE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v85
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s8, 0, v133
 ; GFX12-TRUE16-NEXT:    s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v51
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v51
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v31.l, v50.h, v51.h, vcc_lo
-; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v101, v101
+; GFX12-TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e32 v34.l, v31.l
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v101, 0xffff0000, v118
+; GFX12-TRUE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v118
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v32.l, v51.h, v31.l, vcc_lo
 ; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v35, 16, v34
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v84.h, v37.l, s4
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.h, v85.h, v38.l, s9
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v101
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s1, 0, v100
 ; GFX12-TRUE16-NEXT:    v_mov_b16_e64 v130.l, v32.l
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s4, 0, v129
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v34.l, v34.l, v70.l, s19
@@ -17311,7 +17306,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e64 s10, 0x8000, v31.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.l, v32.l, v31.l, vcc_lo
-; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v87
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v36.h, v118.h, v15.l, s1
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_f32_e64 s9, 0, v39
@@ -17362,7 +17357,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-TRUE16-NEXT:    v_cmp_lt_f32_e64 s0, v113, v54
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v119, v87 :: v_dual_and_b32 v54, 0xffff0000, v0
+; GFX12-TRUE16-NEXT:    v_dual_cndmask_b32 v55, v117, v119 :: v_dual_and_b32 v54, 0xffff0000, v0
 ; GFX12-TRUE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v48.l
 ; GFX12-TRUE16-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-TRUE16-NEXT:    v_cndmask_b16 v15.l, v83.l, v53.l, s0
@@ -17968,14 +17963,14 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v80, 16, v9
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v37, 0xffff0000, v13
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX12-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v31, 0xffff0000, v15
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v38, 16, v29
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v39, 16, v13
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v83, 16, v24
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v35, v33, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v84, 16, v8
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v49, 0xffff0000, v12
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v34, v35, v33 :: v_dual_and_b32 v49, 0xffff0000, v12
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v37, v37
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v48, 0xffff0000, v29
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v11
@@ -17987,48 +17982,47 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v67, 16, v26
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v68, 16, v10
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v66, 0xffff0000, v10
-; GFX12-FAKE16-NEXT:    scratch_load_b32 v50, off, s32
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v52, v51, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v36, 0xffff0000, v30
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v82, 0xffff0000, v8
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v87, 16, v23
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v96, 16, v7
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v39, v64, v55 :: v_dual_and_b32 v70, 0xffff0000, v9
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v22
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v6
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v103, 16, v21
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v112, 16, v5
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v68, v67, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v86, 0xffff0000, v7
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v20
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v4
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v19
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v80, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v82, v82
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v6
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v3
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v18
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v2
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v84, v83, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v28
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v102, 0xffff0000, v5
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v135, 16, v17
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v1
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v96, v87, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v26
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v3
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v15
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v100, v99, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v4
@@ -18137,6 +18131,8 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v48, v48, v48
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v53, v117
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e64 s1, v48, v48
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v55, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v65, v118
@@ -18154,21 +18150,26 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v71, v52, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v81, v128
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v68
-; GFX12-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v128, v53, 16, 1
+; GFX12-FAKE16-NEXT:    v_add3_u32 v118, v118, v48, 0x7fff
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v83, v54, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v85, v129
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v112, 16, v82
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v53
-; GFX12-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v69, v69, v69
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v81, 16, v81
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v87, v64, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v128, v128, v53, 0x7fff
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e64 s2, v53, v53
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v118, v118, v119, s1
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v81, v81, v81
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v85, 16, v85
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v128, v128, v129, s2
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v144, 0x400000, v81
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v130, 16, v97
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v85, v85, v85
@@ -18186,432 +18187,415 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v98, v99, v68, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v100, v132
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v96, v96, v96
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v98, 16, v98
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v100, v101, v70, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v102, v133
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
+; GFX12-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v98, v98, v98
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v102, v103, v80, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v112, v134
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v134, v81, 16, 1
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v100, v100, v100
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v48, v98, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v98
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v112, v113, v82, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v86, v86
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v86, v96, 16, 1
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v134, v134, v81, 0x7fff
+; GFX12-FAKE16-NEXT:    v_add3_u32 v48, v48, v98, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v114, v114, v115, vcc_lo
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v115, 0x400000, v96
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v86, v86, v96, 0x7fff
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v69
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v69, v69, v69
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v132, v69, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v133, 0x400000, v69
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v132, v132, v69, 0x7fff
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v36, v36, v36
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX12-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v36, v98, 16, 1
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v116, v116, v117, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v48, v48
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v98
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v48, v100, 16, 1
-; GFX12-FAKE16-NEXT:    v_add3_u32 v36, v36, v98, 0x7fff
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v118, v118, v119 :: v_dual_max_num_f32 v65, v65, v65
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v119, 0x400000, v100
-; GFX12-FAKE16-NEXT:    v_add3_u32 v48, v48, v100, 0x7fff
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v65, v65, v65
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v130, v65, 16, 1
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v131, 0x400000, v65
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v128, v128, v129, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v130, v130, v65, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v130, v131, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v132, v133, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v132, v133 :: v_dual_lshlrev_b32 v36, 16, v36
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v36, v36, v36
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v81, v134, v144, vcc_lo
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v116, v36, 16, 1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v36
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e64 s0, v36, v36
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v130, 16, v81
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v100, 16, v100
+; GFX12-FAKE16-NEXT:    v_add3_u32 v116, v116, v36, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v36, 0x400000, v96
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v85, v146, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX12-FAKE16-NEXT:    s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v146, 0xffff0000, v50
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v100, v100, v100
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e64 v116, v116, v117, s0
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v131, 16, v85
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v86, v86, v115, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v86, v36, vcc_lo
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v115, v100, 16, 1
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v115, 16, v118
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff0000, v114
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v86
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v117, 0x400000, v100
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v86, 16, v114
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v116
+; GFX12-FAKE16-NEXT:    v_add3_u32 v115, v115, v100, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v117, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v48, v53, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v100, v100
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v116
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v117, 16, v128
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v133, 16, v36
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v132, 16, v36
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v100, 0xffff0000, v116
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v116, 16, v128
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v133, 0xffff0000, v36
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v96, v48, v119, vcc_lo
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v48, 16, v114
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v115, v117, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v34
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v119, 16, v65
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v134, 0xffff0000, v36
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v114, 0xffff0000, v116
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v116, 0xffff0000, v118
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v115, 0xffff0000, v118
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v134, 16, v48
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v146, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v48, v34, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v86, v34, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v35
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v118, 0xffff0000, v128
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v96
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v96, 0xffff0000, v114
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v114, 16, v118
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v118, 16, v65
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v100, v35, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v98, v35, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v37
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v117, 0xffff0000, v128
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v128, 16, v69
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v144, 0xffff0000, v48
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v115, v37, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v114, v37, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v39
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v117, v39, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v116, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v49
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v119, v49, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v118, v49, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v128, v52, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v128, v52, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v54
+; GFX12-FAKE16-NEXT:    s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v52, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v130, v54 :: v_dual_and_b32 v65, 0xffff0000, v65
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v130, v54, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v64
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v131, v64, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v131, v64, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v147, v32, v54, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v147, v32, v52 :: v_dual_and_b32 v64, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v66
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v102, v102, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v132, v66, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v68
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v65
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v119, v102, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v32, v133, v68 :: v_dual_and_b32 v81, 0xffff0000, v81
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v146, v146
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v134, v68, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v69, 0xffff0000, v69
+; GFX12-FAKE16-NEXT:    v_add3_u32 v119, v119, v102, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v54, v147 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v52, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v70
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v54
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v81, 0xffff0000, v81
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v144, v70, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v146, v70, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v33
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v102, 16, v102
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v0
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v34, v33, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v38
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v34, 16, v147
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v102, v102, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v35, v38, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v35, v35, v38 :: v_dual_lshlrev_b32 v34, 16, v147
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v51
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v102, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v129, 0x400000, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v36, v51, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v34, v68
-; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v102, 0x7fff
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v68, 0xffff0000, v0
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v36, v36, v51 :: v_dual_and_b32 v85, 0xffff0000, v85
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v34, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v54, v147, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v52, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v37, v37, v55 :: v_dual_lshlrev_b32 v34, 16, v34
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v37, v55, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v67
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v34, v34, v34
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v39, v67, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v71
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v55, v34, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v49, v71, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v48, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v83
-; GFX12-FAKE16-NEXT:    v_add3_u32 v55, v55, v34, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v52, v83, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v49, v83, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v87
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v64, v87, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v49, v54, v87 :: v_dual_lshlrev_b32 v34, 16, v34
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v97
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v31, v97, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v31, v97, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v99
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v32, v99, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v98
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v34, v34, v34 :: v_dual_cndmask_b32 v55, v32, v99
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v96
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v34, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v34
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v48, v33, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v114
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v48, 0x400000, v34
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v86, v33, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v100
+; GFX12-FAKE16-NEXT:    v_add3_u32 v54, v54, v34, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v100, v35, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v116
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v98, v35, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v115
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v33, v115, v36 :: v_dual_and_b32 v86, 0xffff0000, v86
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v33, v114, v36, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v34, v34
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v55, v48, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v118
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v48
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v66, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v117
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v117, v37, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v116, v37, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v96
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v119, v38, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v35, v118, v38, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v69
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v69, 16, v16
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v36, v128, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v81
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v49, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v37, v130, v48, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e64 vcc_lo, 0x8000, v147
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v147, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v147, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v85
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v51, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v54
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v51, 0xffff0000, v48
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v38, v131, v49, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v54, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v86
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v133
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v48, v132, v52, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v134
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v48, v132, v51 :: v_dual_lshlrev_b32 v51, 16, v112
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v144
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v133, v64, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v51
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v51, 16, v112
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v49, v134, v55, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v84
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v55, v39, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v39, v65, v39, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v145, v145
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v67, v51, v51
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v135, v84, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v102, v102
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v53, v129 :: v_dual_lshlrev_b32 v55, 16, v54
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v84
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v119, v129, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v101
-; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v101, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v53, v55
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v54
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v67, v51, v51
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v66, 16, v52
+; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v101, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v55, v65
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v52, 0xffff0000, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v54, v84, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v84, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v80
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v80, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v65
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v53, v53 :: v_dual_lshlrev_b32 v80, 16, v15
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v66, v80, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v67, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v51, v144, v64, vcc_lo
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v64, v67, 16, 1
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v51, v146, v64 :: v_dual_lshlrev_b32 v80, 16, v15
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v103
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v64, v64, v67, 0x7fff
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v67, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v103, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v65, v103, vcc_lo
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v65, 0x400000, v67
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v64, v65, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v53, v65 :: v_dual_and_b32 v68, 0xffff0000, v0
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v68, v53, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v67, v70, v69, vcc_lo
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v65, 0xffff0000, v16
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v52
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v52, v66, v55, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v52, v66, v64 :: v_dual_lshlrev_b32 v55, 16, v55
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v64
-; GFX12-FAKE16-NEXT:    v_add3_u32 v66, v68, v53, 0x7fff
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v53
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v65, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v69, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v69, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v55, v55, v55
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v82
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v67
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v64
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v68, v55, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v71, v65, v82 :: v_dual_lshlrev_b32 v70, 16, v55
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v65, v82, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v66, v68, v55, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v68, 0x400000, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v66, v68, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v66, v68, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v69, v70
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v55, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v64, v67, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v15, v50 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v15, v50, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v15, 16, v50
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v113
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v70, 16, v55
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v66, v66, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v69, v71, v113 :: v_dual_lshlrev_b32 v80, 16, v68
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v15, 0xffff0000, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v68, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v68 :: v_dual_and_b32 v15, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v84
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v70, v84, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v70, v84, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v15
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v15, v65, v69, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v54
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v14
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v64, v54, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v80, v71
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v66, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v80, 16, v30
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v50, v68 :: v_dual_lshlrev_b32 v80, 16, v30
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v50, v68, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v66
+; GFX12-FAKE16-NEXT:    v_add3_u32 v54, v54, v66, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v69, v14, v30, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v65, v65, v65 :: v_dual_max_num_f32 v66, v66, v66
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v64, v66, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v71, 0x400000, v66
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v64, v64, v66, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v64, v71 :: v_dual_lshlrev_b32 v71, 16, v13
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v71, 16, v13
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v69, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v64
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v54, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v67
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v14, v70, v53, vcc_lo
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v53, 16, v54
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v69
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v65, 16, 1
+; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v67
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_lshlrev_b32 v66, 16, v30
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_add3_u32 v54, v54, v65, 0x7fff
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v67, v53, v67 :: v_dual_and_b32 v54, 0xffff0000, v54
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v70, v66
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v30, v69, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v71, v71
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v65, v65, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v71, v13, v29, vcc_lo
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v29
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v55, v65, 16, 1
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v80, 0x400000, v65
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_add3_u32 v55, v55, v65, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v54, v80, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v55, v80, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v70, v70
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v70, 16, v71
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v65, v29, v71, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v55
+; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v67, v55, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v66, v13, v13 :: v_dual_cndmask_b32 v29, v67, v64
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v55
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v65
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v55, 0xffff0000, v64
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v13, 16, v66
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v64, 16, v54
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v66, v13, v13
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v13, v53, v29 :: v_dual_lshlrev_b32 v70, 16, v71
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v13, v53, v29, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v68
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v68, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v70, v67
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v65, v71, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v50
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v55
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v66, 16, 1
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v29, v50, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v54, 0xffff0000, v54
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v50, v53, v66, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v53, 0x400000, v66
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v67, 16, v12
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v11
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_max_num_f32 v53, v55, v55
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v53 :: v_dual_and_b32 v55, 0xffff0000, v55
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v28
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v54, v54 :: v_dual_lshlrev_b32 v54, 16, v28
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v54
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
+; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v55
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v55, 16, v50
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v29, v64, v29, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v29, v64, v29 :: v_dual_and_b32 v50, 0xffff0000, v50
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v64, v53, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v28, v28, v12, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v69
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v64, v64, v53, 0x7fff
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v28
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v54, v69, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v55, v69, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v27
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -18619,8 +18603,8 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v69, 16, v12
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v53, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v64, v67 :: v_dual_lshlrev_b32 v68, 16, v28
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v64, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v69, v68
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v64, v28, v12 :: v_dual_lshlrev_b32 v67, 16, v11
@@ -18629,70 +18613,74 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v66, v27, v11, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v55, v30, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v30, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v53
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v64
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v66
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v71
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v54, v54, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v68, v30, v71 :: v_dual_lshlrev_b32 v55, 16, v64
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v66
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v68, v30, v71, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v67, v64
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v64, v66, v11, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v50
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v50, v54, 16, 1
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v64
+; GFX12-FAKE16-NEXT:    v_add3_u32 v50, v50, v54, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v27, v54, v27, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v64, v64, v64 :: v_dual_cndmask_b32 v27, v55, v27
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v68, v65 :: v_dual_max_num_f32 v55, v55, v55
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v50, v55, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v67, 0x400000, v55
-; GFX12-FAKE16-NEXT:    v_add3_u32 v50, v50, v55, 0x7fff
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v55, v68, v65, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v65, 16, v10
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v26
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v65, v65
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v10, v26, vcc_lo
-; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v54, v64, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v50, v67, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v50, v50, v67 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v64, 16, v64
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v64, v64, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v65, v26, v10 :: v_dual_lshlrev_b32 v68, 16, v9
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v55, v64, 16, 1
+; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v54, v64, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v54, vcc_lo
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v26, v30, v55, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v50
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v12
-; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v55, v64, 0x7fff
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v64
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v65
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v12, v30, v12 :: v_dual_lshlrev_b32 v67, 16, v10
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v68, 16, v9
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v53, v53, v54, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v67, v55
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v65, v10 :: v_dual_lshlrev_b32 v55, 16, v25
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v54, v65, v10, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v68, v68
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v9, v25, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v9, v25 :: v_dual_lshlrev_b32 v54, 16, v54
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v25
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v28
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v54, v54, v54
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v12, v28, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v53
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v25, v9, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v11
@@ -18704,82 +18692,78 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v30, v12, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v66
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v9
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v12, v33, v12, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v11, v66, vcc_lo
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v54, 16, v54
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v64, v55
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
+; GFX12-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v66, 16, v24
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v54, v54, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v25, v9, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v50, 16, v50
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v50, v50, v50 :: v_dual_lshlrev_b32 v55, 16, v8
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v30, v54, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v64, 0x400000, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX12-FAKE16-NEXT:    v_add3_u32 v30, v30, v54, 0x7fff
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v30, v64, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v30, v64 :: v_dual_and_b32 v53, 0xffff0000, v53
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v66, v66
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
+; GFX12-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v24, v24, v8, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v53
+; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v11, v28, v11 :: v_dual_lshlrev_b32 v54, 16, v24
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v28, 16, v30
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v10
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v8
+; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v11, v34, v11, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v55, v54
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v54, v24, v8 :: v_dual_lshlrev_b32 v55, 16, v23
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v64, v64
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v7, v7, v23 :: v_dual_lshlrev_b32 v50, 16, v50
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
-; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v50, v50, v50
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT:    v_bfe_u32 v53, v50, 16, 1
-; GFX12-FAKE16-NEXT:    v_or_b32_e32 v66, 0x400000, v50
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v7, v23, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v50, 0x7fff
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v64, 16, v7
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v53, v66, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v54
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
-; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v23, v23, v7, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v65
-; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v53, v53 :: v_dual_and_b32 v50, 0xffff0000, v50
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v23
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v55, 16, v23
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v10, v10, v65 :: v_dual_lshlrev_b32 v53, 16, v54
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v30
+; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v54, 16, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v28, v10, vcc_lo
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    v_dual_max_num_f32 v53, v53, v53 :: v_dual_cndmask_b32 v10, v28, v10
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v9
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v28, v53, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v54, v9, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v64, v55
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v55, 16, v6
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v50, 0xffff0000, v50
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v30, v23, v7 :: v_dual_lshlrev_b32 v55, 16, v6
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v30, v23, v7, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v25
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
@@ -18800,15 +18784,17 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v9, v54, v9 :: v_dual_lshlrev_b32 v64, 16, v6
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v53, v53, v28, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v54, 0x400000, v28
+; GFX12-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v9, v36, v9, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v22, v22, v6, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v8
+; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v5
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v50, v8 :: v_dual_lshlrev_b32 v55, 16, v22
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v21
@@ -18821,18 +18807,16 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v53, v22, v6 :: v_dual_lshlrev_b32 v54, 16, v5
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v30, 16, v53
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v21, v21, v5, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v24
-; GFX12-FAKE16-NEXT:    v_perm_b32 v10, v35, v10, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v8, v8, v24 :: v_dual_lshlrev_b32 v53, 16, v21
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v24, 0xffff0000, v25
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v28
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v24
 ; GFX12-FAKE16-NEXT:    v_max_num_f32_e32 v24, v30, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -18859,12 +18843,13 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v4
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v20, v20, v4, vcc_lo
+; GFX12-FAKE16-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v24, v24
 ; GFX12-FAKE16-NEXT:    v_bfe_u32 v24, v30, 16, 1
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v23, v23, v50 :: v_dual_lshlrev_b32 v50, 16, v20
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v28
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v24, v24, v30, 0x7fff
 ; GFX12-FAKE16-NEXT:    v_or_b32_e32 v28, 0x400000, v30
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
@@ -18877,6 +18862,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v50, v20, v4, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v6
+; GFX12-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v25, v6, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v30, v30
@@ -18902,8 +18888,8 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v5, v28, v5 :: v_dual_lshlrev_b32 v50, 16, v19
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v21
 ; GFX12-FAKE16-NEXT:    v_add3_u32 v30, v30, v22, 0x7fff
-; GFX12-FAKE16-NEXT:    v_perm_b32 v8, v37, v8, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v5, v5, v21, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v53, v50
 ; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v53, 16, v17
@@ -18975,6 +18961,7 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v30, v25
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v30, 16, v21
 ; GFX12-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
+; GFX12-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v25, v17, v1, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v24, v22
@@ -19013,12 +19000,11 @@ define <32 x bfloat> @v_minimumnum_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y) {
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v19, v50, v53, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v21
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v21, 16, v24
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v25, 16, v19
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v30, v3, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v2
-; GFX12-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX12-FAKE16-NEXT:    v_perm_b32 v3, v52, v3, 0x5040100
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v54, v2, vcc_lo
@@ -20479,9 +20465,9 @@ define <3 x bfloat> @v_minimumnum_v3bf16_no_ieee(<3 x bfloat> %x, <3 x bfloat> %
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v10, 16, v8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v5
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
@@ -20694,10 +20680,10 @@ define <3 x bfloat> @v_minimumnum_v3bf16_no_ieee(<3 x bfloat> %x, <3 x bfloat> %
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v5
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v4, v4, v5 :: v_dual_and_b32 v5, 0xffff0000, v7
+; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc_lo
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_u16_e32 vcc_lo, 0x8000, v0
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc_lo
+; GFX12-FAKE16-NEXT:    v_dual_cndmask_b32 v0, v9, v0 :: v_dual_and_b32 v5, 0xffff0000, v7
 ; GFX12-FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, 0, v6
 ; GFX12-FAKE16-NEXT:    s_wait_alu 0xfffd
 ; GFX12-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v13, v4, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
index 151456e82ae51..57805063b92b1 100644
--- a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
+++ b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
@@ -40,8 +40,8 @@ define <2 x i64> @narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 {
 ; CHECK-NEXT:    v_and_b32_e32 v3, 0x7ffffffe, v6
 ; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; CHECK-NEXT:    v_add_nc_u32_e32 v0, v0, v2
-; CHECK-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, v1, v3
-; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v2, v1, v3
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %zext0 = and <2 x i64> %a, <i64 2147483647, i64 30>
   %zext1 = and <2 x i64> %b, <i64 2147483647, i64 2147483646>
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 120aebf2bf7c8..b4e5fa088b533 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -1944,71 +1944,71 @@ define hidden void @srem_store_div(ptr addrspace(1) %in0, ptr addrspace(1) %in1,
 ; GFX9-NEXT:    global_load_dword v9, v[0:1], off
 ; GFX9-NEXT:    s_mov_b32 s4, 0x2070306
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_cvt_f32_i32_sdwa v3, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+; GFX9-NEXT:    v_cvt_f32_i32_sdwa v10, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v14, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v13, sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
-; GFX9-NEXT:    v_cvt_f32_i32_sdwa v10, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
-; GFX9-NEXT:    v_cvt_f32_i32_sdwa v3, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v18, v14
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v17, v3
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v16, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v19, v10
+; GFX9-NEXT:    v_xor_b32_sdwa v15, sext(v4), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_2
 ; GFX9-NEXT:    v_perm_b32 v1, v4, v9, s4
-; GFX9-NEXT:    v_mul_f32_e32 v18, v13, v18
-; GFX9-NEXT:    v_trunc_f32_e32 v18, v18
-; GFX9-NEXT:    v_mad_f32 v13, -v18, v14, v13
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v13|, |v14|
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v13, v3
-; GFX9-NEXT:    v_mul_f32_e32 v14, v16, v19
-; GFX9-NEXT:    v_trunc_f32_e32 v14, v14
-; GFX9-NEXT:    v_mad_f32 v19, -v14, v10, v16
-; GFX9-NEXT:    v_mul_f32_e32 v13, v10, v13
-; GFX9-NEXT:    v_trunc_f32_e32 v13, v13
-; GFX9-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v19|, |v10|
-; GFX9-NEXT:    v_mad_f32 v10, -v13, v3, v10
-; GFX9-NEXT:    v_cvt_f32_i32_sdwa v19, sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
-; GFX9-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v10|, |v3|
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v16
-; GFX9-NEXT:    v_xor_b32_sdwa v12, sext(v9), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_1
+; GFX9-NEXT:    v_mul_f32_e32 v17, v10, v17
+; GFX9-NEXT:    v_trunc_f32_e32 v17, v17
+; GFX9-NEXT:    v_mad_f32 v19, -v17, v3, v10
+; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v19|, |v3|
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v14
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v19, v10
 ; GFX9-NEXT:    v_xor_b32_sdwa v2, sext(v4), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_0
-; GFX9-NEXT:    v_xor_b32_sdwa v15, sext(v4), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_2
-; GFX9-NEXT:    v_mul_f32_e32 v3, v19, v3
+; GFX9-NEXT:    v_xor_b32_sdwa v12, sext(v9), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_1
+; GFX9-NEXT:    v_mul_f32_e32 v3, v13, v3
 ; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
-; GFX9-NEXT:    v_ashrrev_i32_e32 v12, 30, v12
-; GFX9-NEXT:    v_xor_b32_sdwa v10, sext(v9), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_3
-; GFX9-NEXT:    v_cvt_i32_f32_e32 v13, v13
-; GFX9-NEXT:    v_cvt_i32_f32_e32 v18, v18
-; GFX9-NEXT:    v_cvt_i32_f32_e32 v14, v14
-; GFX9-NEXT:    v_mad_f32 v19, -v3, v16, v19
-; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
-; GFX9-NEXT:    v_ashrrev_i32_e32 v15, 30, v15
-; GFX9-NEXT:    v_or_b32_e32 v12, 1, v12
+; GFX9-NEXT:    v_mad_f32 v13, -v3, v14, v13
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v13|, |v14|
+; GFX9-NEXT:    v_ashrrev_i32_e32 v14, 30, v15
+; GFX9-NEXT:    v_mul_f32_e32 v15, v16, v19
+; GFX9-NEXT:    v_trunc_f32_e32 v15, v15
+; GFX9-NEXT:    v_mad_f32 v19, -v15, v10, v16
+; GFX9-NEXT:    v_cvt_f32_i32_sdwa v13, sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v19|, |v10|
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v10, v16
 ; GFX9-NEXT:    v_ashrrev_i32_e32 v2, 30, v2
-; GFX9-NEXT:    v_ashrrev_i32_e32 v10, 30, v10
-; GFX9-NEXT:    v_or_b32_e32 v15, 1, v15
+; GFX9-NEXT:    v_xor_b32_sdwa v19, sext(v9), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_3
+; GFX9-NEXT:    v_cvt_i32_f32_e32 v17, v17
+; GFX9-NEXT:    v_mul_f32_e32 v10, v13, v10
+; GFX9-NEXT:    v_trunc_f32_e32 v10, v10
+; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GFX9-NEXT:    v_cvt_i32_f32_e32 v15, v15
+; GFX9-NEXT:    v_mad_f32 v13, -v10, v16, v13
+; GFX9-NEXT:    v_cvt_i32_f32_e32 v10, v10
 ; GFX9-NEXT:    v_or_b32_e32 v2, 1, v2
-; GFX9-NEXT:    v_or_b32_e32 v10, 1, v10
-; GFX9-NEXT:    v_cndmask_b32_e32 v12, 0, v12, vcc
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v19|, |v16|
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, 0, v15, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e32 v10, 0, v10, vcc
+; GFX9-NEXT:    v_ashrrev_i32_e32 v12, 30, v12
+; GFX9-NEXT:    v_ashrrev_i32_e32 v19, 30, v19
+; GFX9-NEXT:    v_or_b32_e32 v12, 1, v12
+; GFX9-NEXT:    v_or_b32_e32 v14, 1, v14
+; GFX9-NEXT:    v_or_b32_e32 v19, 1, v19
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v13|, |v16|
+; GFX9-NEXT:    v_cndmask_b32_e64 v12, 0, v12, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v14, 0, v14, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e32 v13, 0, v19, vcc
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v0, 16, v4
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v11, 8, v4
-; GFX9-NEXT:    v_lshrrev_b32_e32 v17, 24, v4
-; GFX9-NEXT:    v_add_u32_e32 v2, v13, v2
-; GFX9-NEXT:    v_add_u32_e32 v12, v18, v12
-; GFX9-NEXT:    v_add_u32_e32 v13, v14, v15
-; GFX9-NEXT:    v_add_u32_e32 v3, v3, v10
+; GFX9-NEXT:    v_lshrrev_b32_e32 v18, 24, v4
+; GFX9-NEXT:    v_add_u32_e32 v2, v17, v2
+; GFX9-NEXT:    v_add_u32_e32 v3, v3, v12
+; GFX9-NEXT:    v_add_u32_e32 v12, v15, v14
+; GFX9-NEXT:    v_add_u32_e32 v10, v10, v13
 ; GFX9-NEXT:    v_mul_lo_u32 v2, v2, v4
-; GFX9-NEXT:    v_mul_lo_u32 v4, v12, v11
-; GFX9-NEXT:    v_mul_lo_u32 v10, v13, v0
-; GFX9-NEXT:    v_mul_lo_u32 v3, v3, v17
+; GFX9-NEXT:    v_mul_lo_u32 v3, v3, v11
+; GFX9-NEXT:    v_mul_lo_u32 v4, v12, v0
+; GFX9-NEXT:    v_mul_lo_u32 v10, v10, v18
 ; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v2
-; GFX9-NEXT:    v_sub_u32_sdwa v2, v9, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
-; GFX9-NEXT:    v_sub_u32_e32 v4, v17, v10
-; GFX9-NEXT:    v_sub_u32_sdwa v3, v9, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_sub_u32_sdwa v2, v9, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX9-NEXT:    v_sub_u32_e32 v3, v18, v4
+; GFX9-NEXT:    v_sub_u32_sdwa v4, v9, v10 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT:    v_or_b32_sdwa v2, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    v_or_b32_sdwa v2, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX9-NEXT:    global_store_dword v[5:6], v0, off
 ; GFX9-NEXT:    global_store_dword v[7:8], v1, off
diff --git a/llvm/test/CodeGen/AMDGPU/pr51516.mir b/llvm/test/CodeGen/AMDGPU/pr51516.mir
index 81925de8910f8..cc299789af8dd 100644
--- a/llvm/test/CodeGen/AMDGPU/pr51516.mir
+++ b/llvm/test/CodeGen/AMDGPU/pr51516.mir
@@ -6,7 +6,7 @@
 
 # GCN-LABEL: name: global_sextload_v32i32_to_v32i64
 # GCN: renamable $vgpr34_vgpr35_vgpr36_vgpr37 = SI_SPILL_V128_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.0, align 4, addrspace 5)
-# GCN: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr47, killed renamable $vgpr26_vgpr27_vgpr28_vgpr29, killed renamable $sgpr0_sgpr1, 16, 0, implicit $exec, implicit killed renamable $vgpr46
+# GCN: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr5, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, killed renamable $sgpr0_sgpr1, 16, 0, implicit $exec, implicit killed renamable $vgpr4
 
 # GCN-GCNTRACKER-LABEL: name: global_sextload_v32i32_to_v32i64
 # GCN-GCNTRACKER-NOT: SI_SPILL
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index e452af7d60c0c..b1a618ae4d5c5 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -748,18 +748,18 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX90A-NEXT:    global_load_dwordx2 v[12:13], v[12:13], off
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v15, vcc, -1, v7, vcc
 ; GFX90A-NEXT:    global_load_dwordx2 v[18:19], v[14:15], off offset:-2048
+; GFX90A-NEXT:    global_load_dwordx2 v[20:21], v[14:15], off
 ; GFX90A-NEXT:    v_add_co_u32_e32 v16, vcc, s0, v6
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v17, vcc, -1, v7, vcc
 ; GFX90A-NEXT:    global_load_dwordx2 v[16:17], v[16:17], off offset:-2048
-; GFX90A-NEXT:    v_add_co_u32_e32 v20, vcc, s1, v6
-; GFX90A-NEXT:    global_load_dwordx2 v[14:15], v[14:15], off
-; GFX90A-NEXT:    v_addc_co_u32_e32 v21, vcc, -1, v7, vcc
-; GFX90A-NEXT:    global_load_dwordx2 v[24:25], v[20:21], off offset:-4096
-; GFX90A-NEXT:    global_load_dwordx2 v[26:27], v[20:21], off offset:-2048
-; GFX90A-NEXT:    global_load_dwordx2 v[28:29], v[20:21], off
+; GFX90A-NEXT:    v_add_co_u32_e32 v14, vcc, s1, v6
+; GFX90A-NEXT:    v_addc_co_u32_e32 v15, vcc, -1, v7, vcc
+; GFX90A-NEXT:    global_load_dwordx2 v[24:25], v[14:15], off offset:-4096
+; GFX90A-NEXT:    global_load_dwordx2 v[26:27], v[14:15], off offset:-2048
+; GFX90A-NEXT:    global_load_dwordx2 v[28:29], v[14:15], off
 ; GFX90A-NEXT:    v_add_co_u32_e32 v22, vcc, s2, v6
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v23, vcc, -1, v7, vcc
-; GFX90A-NEXT:    global_load_dwordx2 v[20:21], v[22:23], off offset:-2048
+; GFX90A-NEXT:    global_load_dwordx2 v[14:15], v[22:23], off offset:-2048
 ; GFX90A-NEXT:    global_load_dwordx2 v[30:31], v[6:7], off
 ; GFX90A-NEXT:    v_add_co_u32_e32 v6, vcc, 0x10000, v6
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v7, vcc, 0, v7, vcc
@@ -771,9 +771,10 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(7)
 ; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v18, v1
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v19, v4, vcc
+; GFX90A-NEXT:    s_waitcnt vmcnt(6)
+; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v20, v1
+; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v21, v4, vcc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(5)
-; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v14, v1
-; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v15, v4, vcc
 ; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v16, v1
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v17, v4, vcc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(4)
@@ -786,8 +787,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v28, v1
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v29, v4, vcc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v20, v1
-; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v21, v4, vcc
+; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v14, v1
+; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v15, v4, vcc
 ; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v8, v1
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v4, vcc, v9, v4, vcc
 ; GFX90A-NEXT:    v_add_co_u32_e32 v1, vcc, v10, v1
@@ -847,14 +848,13 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v4, 0xffffc000
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, -1, v5, vcc_lo
 ; GFX11-NEXT:    v_add_co_u32 v9, vcc_lo, 0xffffc000, v4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v10, null, -1, v5, vcc_lo
 ; GFX11-NEXT:    global_load_b64 v[13:14], v[7:8], off offset:-4096
 ; GFX11-NEXT:    v_add_co_u32 v11, vcc_lo, 0xffffd000, v4
-; GFX11-NEXT:    global_load_b64 v[9:10], v[9:10], off offset:-2048
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v12, null, -1, v5, vcc_lo
 ; GFX11-NEXT:    v_add_co_u32 v15, vcc_lo, v4, 0xffffe000
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    global_load_b64 v[9:10], v[9:10], off offset:-2048
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v16, null, -1, v5, vcc_lo
 ; GFX11-NEXT:    global_load_b64 v[11:12], v[11:12], off offset:-2048
 ; GFX11-NEXT:    v_add_co_u32 v17, vcc_lo, 0xffffe000, v4
diff --git a/llvm/test/CodeGen/AMDGPU/repeated-divisor.ll b/llvm/test/CodeGen/AMDGPU/repeated-divisor.ll
index 04eea20993608..d34d2050b157e 100644
--- a/llvm/test/CodeGen/AMDGPU/repeated-divisor.ll
+++ b/llvm/test/CodeGen/AMDGPU/repeated-divisor.ll
@@ -232,8 +232,8 @@ define <2 x float> @v_repeat_divisor_f32_x2_arcp_daz(float %x, float %y, float %
 ; GFX11-NEXT:    v_div_fmas_f32 v3, v3, v4, v6
 ; GFX11-NEXT:    v_div_fixup_f32 v2, v3, v2, 1.0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_mul_f32_e32 v1, v1, v2
 ; GFX11-NEXT:    v_mul_f32_e32 v0, v0, v2
+; GFX11-NEXT:    v_mul_f32_e32 v1, v1, v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %div0 = fdiv arcp float %x, %D
   %div1 = fdiv arcp float %y, %D
@@ -434,8 +434,8 @@ define <3 x float> @v_repeat_divisor_f32_x3_arcp(float %x, float %y, float %z, f
 ; GFX11-NEXT:    v_div_fmas_f32 v4, v4, v5, v6
 ; GFX11-NEXT:    v_div_fixup_f32 v3, v4, v3, 1.0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX11-NEXT:    v_mul_f32_e32 v0, v0, v3
+; GFX11-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX11-NEXT:    v_mul_f32_e32 v2, v2, v3
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %div0 = fdiv arcp float %x, %D
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv.ll b/llvm/test/CodeGen/AMDGPU/sdiv.ll
index d06d9f97db71c..676359fcec462 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv.ll
@@ -804,82 +804,83 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NEXT:    v_sub_i32_e32 v13, vcc, 0, v1
+; GCN-NEXT:    v_sub_i32_e32 v9, vcc, 0, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_sub_i32_e32 v12, vcc, 0, v5
-; GCN-NEXT:    v_xor_b32_e32 v11, v1, v5
-; GCN-NEXT:    v_max_i32_e32 v5, v5, v12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v12, v5
 ; GCN-NEXT:    v_sub_i32_e32 v10, vcc, 0, v4
 ; GCN-NEXT:    v_xor_b32_e32 v8, v0, v4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v12, v12
 ; GCN-NEXT:    v_max_i32_e32 v4, v4, v10
-; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 0, v5
-; GCN-NEXT:    v_mul_f32_e32 v10, 0x4f7ffffe, v12
+; GCN-NEXT:    v_cvt_f32_u32_e32 v10, v4
+; GCN-NEXT:    v_sub_i32_e32 v13, vcc, 0, v5
+; GCN-NEXT:    v_xor_b32_e32 v11, v1, v5
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v10, v10
+; GCN-NEXT:    v_max_i32_e32 v5, v5, v13
+; GCN-NEXT:    v_cvt_f32_u32_e32 v13, v5
+; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 0, v4
+; GCN-NEXT:    v_mul_f32_e32 v10, 0x4f7ffffe, v10
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v10, v10
-; GCN-NEXT:    v_cvt_f32_u32_e32 v12, v4
-; GCN-NEXT:    v_max_i32_e32 v1, v1, v13
-; GCN-NEXT:    v_sub_i32_e32 v15, vcc, 0, v6
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v13, v13
+; GCN-NEXT:    v_sub_i32_e32 v12, vcc, 0, v1
 ; GCN-NEXT:    v_mul_lo_u32 v16, v16, v10
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v12, v12
+; GCN-NEXT:    v_mul_f32_e32 v13, 0x4f7ffffe, v13
+; GCN-NEXT:    v_cvt_u32_f32_e32 v13, v13
+; GCN-NEXT:    v_max_i32_e32 v0, v0, v9
+; GCN-NEXT:    v_mul_hi_u32 v16, v10, v16
+; GCN-NEXT:    v_max_i32_e32 v1, v1, v12
+; GCN-NEXT:    v_sub_i32_e32 v15, vcc, 0, v6
+; GCN-NEXT:    v_add_i32_e32 v10, vcc, v10, v16
+; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 0, v5
+; GCN-NEXT:    v_mul_lo_u32 v16, v16, v13
+; GCN-NEXT:    v_mul_hi_u32 v10, v0, v10
 ; GCN-NEXT:    v_xor_b32_e32 v14, v2, v6
 ; GCN-NEXT:    v_max_i32_e32 v6, v6, v15
-; GCN-NEXT:    v_mul_hi_u32 v16, v10, v16
-; GCN-NEXT:    v_mul_f32_e32 v12, 0x4f7ffffe, v12
-; GCN-NEXT:    v_cvt_u32_f32_e32 v12, v12
+; GCN-NEXT:    v_mul_hi_u32 v12, v13, v16
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v15, v6
-; GCN-NEXT:    v_add_i32_e32 v10, vcc, v10, v16
-; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 0, v4
-; GCN-NEXT:    v_mul_lo_u32 v16, v16, v12
-; GCN-NEXT:    v_mul_hi_u32 v10, v1, v10
-; GCN-NEXT:    v_sub_i32_e32 v9, vcc, 0, v0
-; GCN-NEXT:    v_mul_hi_u32 v13, v12, v16
-; GCN-NEXT:    v_max_i32_e32 v0, v0, v9
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v9, v15
 ; GCN-NEXT:    v_ashrrev_i32_e32 v8, 31, v8
-; GCN-NEXT:    v_add_i32_e32 v12, vcc, v12, v13
-; GCN-NEXT:    v_mul_lo_u32 v13, v10, v5
-; GCN-NEXT:    v_mul_hi_u32 v12, v0, v12
-; GCN-NEXT:    v_mul_f32_e32 v9, 0x4f7ffffe, v9
-; GCN-NEXT:    v_cvt_u32_f32_e32 v9, v9
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, v1, v13
+; GCN-NEXT:    v_ashrrev_i32_e32 v11, 31, v11
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, v13, v12
+; GCN-NEXT:    v_mul_lo_u32 v13, v10, v4
+; GCN-NEXT:    v_mul_hi_u32 v12, v1, v12
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v9, v15
+; GCN-NEXT:    v_ashrrev_i32_e32 v14, 31, v14
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
 ; GCN-NEXT:    v_add_i32_e32 v13, vcc, 1, v10
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v1, v5
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v0, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v10, v10, v13, s[0:1]
-; GCN-NEXT:    v_sub_i32_e32 v13, vcc, v1, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v13, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v1, v5
-; GCN-NEXT:    v_mul_lo_u32 v1, v12, v4
-; GCN-NEXT:    v_sub_i32_e32 v5, vcc, 0, v6
-; GCN-NEXT:    v_mul_lo_u32 v5, v5, v9
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_sub_i32_e32 v13, vcc, v0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v13, s[0:1]
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v0, v4
+; GCN-NEXT:    v_mul_lo_u32 v0, v12, v5
+; GCN-NEXT:    v_mul_f32_e32 v9, 0x4f7ffffe, v9
+; GCN-NEXT:    v_cvt_u32_f32_e32 v9, v9
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v6
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, 1, v12
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v4
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v12, v1, s[2:3]
-; GCN-NEXT:    v_sub_i32_e32 v12, vcc, v0, v4
+; GCN-NEXT:    v_sub_i32_e32 v12, vcc, v0, v5
+; GCN-NEXT:    v_mul_lo_u32 v4, v4, v9
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v12, s[2:3]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v4
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v5
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 0, v7
-; GCN-NEXT:    v_mul_hi_u32 v4, v9, v5
 ; GCN-NEXT:    v_max_i32_e32 v5, v7, v0
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, v5
-; GCN-NEXT:    v_add_i32_e32 v12, vcc, 1, v1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v9, v4
+; GCN-NEXT:    v_mul_hi_u32 v4, v9, v4
+; GCN-NEXT:    v_add_i32_e32 v13, vcc, 1, v10
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v9, v4
 ; GCN-NEXT:    v_sub_i32_e32 v9, vcc, 0, v2
 ; GCN-NEXT:    v_max_i32_e32 v2, v2, v9
 ; GCN-NEXT:    v_mul_hi_u32 v4, v2, v4
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v9, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, v12, s[2:3]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v10, v13, s[0:1]
 ; GCN-NEXT:    v_xor_b32_e32 v0, v0, v8
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
 ; GCN-NEXT:    v_mul_lo_u32 v8, v4, v6
-; GCN-NEXT:    v_add_i32_e32 v13, vcc, 1, v10
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v10, v13, s[0:1]
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, 1, v1
 ; GCN-NEXT:    v_sub_i32_e32 v10, vcc, 0, v5
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, v2, v8
-; GCN-NEXT:    v_ashrrev_i32_e32 v11, 31, v11
+; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v12, s[2:3]
 ; GCN-NEXT:    v_mul_lo_u32 v10, v10, v9
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
 ; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v2, v6
@@ -896,7 +897,6 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_max_i32_e32 v6, v3, v6
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v9, v4
 ; GCN-NEXT:    v_mul_hi_u32 v4, v6, v4
-; GCN-NEXT:    v_ashrrev_i32_e32 v14, 31, v14
 ; GCN-NEXT:    v_xor_b32_e32 v2, v2, v14
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, v2, v14
 ; GCN-NEXT:    v_mul_lo_u32 v8, v4, v5
@@ -931,82 +931,83 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    s_mov_b32 s4, s0
 ; TONGA-NEXT:    s_mov_b32 s5, s1
 ; TONGA-NEXT:    s_waitcnt vmcnt(1)
-; TONGA-NEXT:    v_sub_u32_e32 v13, vcc, 0, v1
+; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, 0, v0
 ; TONGA-NEXT:    s_waitcnt vmcnt(0)
-; TONGA-NEXT:    v_sub_u32_e32 v12, vcc, 0, v5
-; TONGA-NEXT:    v_xor_b32_e32 v11, v1, v5
-; TONGA-NEXT:    v_max_i32_e32 v5, v5, v12
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v12, v5
 ; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, 0, v4
 ; TONGA-NEXT:    v_xor_b32_e32 v8, v0, v4
-; TONGA-NEXT:    v_rcp_iflag_f32_e32 v12, v12
 ; TONGA-NEXT:    v_max_i32_e32 v4, v4, v10
-; TONGA-NEXT:    v_sub_u32_e32 v16, vcc, 0, v5
-; TONGA-NEXT:    v_mul_f32_e32 v10, 0x4f7ffffe, v12
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v10, v4
+; TONGA-NEXT:    v_sub_u32_e32 v13, vcc, 0, v5
+; TONGA-NEXT:    v_xor_b32_e32 v11, v1, v5
+; TONGA-NEXT:    v_rcp_iflag_f32_e32 v10, v10
+; TONGA-NEXT:    v_max_i32_e32 v5, v5, v13
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v13, v5
+; TONGA-NEXT:    v_sub_u32_e32 v16, vcc, 0, v4
+; TONGA-NEXT:    v_mul_f32_e32 v10, 0x4f7ffffe, v10
 ; TONGA-NEXT:    v_cvt_u32_f32_e32 v10, v10
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v12, v4
-; TONGA-NEXT:    v_max_i32_e32 v1, v1, v13
-; TONGA-NEXT:    v_sub_u32_e32 v15, vcc, 0, v6
+; TONGA-NEXT:    v_rcp_iflag_f32_e32 v13, v13
+; TONGA-NEXT:    v_sub_u32_e32 v12, vcc, 0, v1
 ; TONGA-NEXT:    v_mul_lo_u32 v16, v16, v10
-; TONGA-NEXT:    v_rcp_iflag_f32_e32 v12, v12
+; TONGA-NEXT:    v_mul_f32_e32 v13, 0x4f7ffffe, v13
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v13, v13
+; TONGA-NEXT:    v_max_i32_e32 v0, v0, v9
+; TONGA-NEXT:    v_mul_hi_u32 v16, v10, v16
+; TONGA-NEXT:    v_max_i32_e32 v1, v1, v12
+; TONGA-NEXT:    v_sub_u32_e32 v15, vcc, 0, v6
+; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v10, v16
+; TONGA-NEXT:    v_sub_u32_e32 v16, vcc, 0, v5
+; TONGA-NEXT:    v_mul_lo_u32 v16, v16, v13
+; TONGA-NEXT:    v_mul_hi_u32 v10, v0, v10
 ; TONGA-NEXT:    v_xor_b32_e32 v14, v2, v6
 ; TONGA-NEXT:    v_max_i32_e32 v6, v6, v15
-; TONGA-NEXT:    v_mul_hi_u32 v16, v10, v16
-; TONGA-NEXT:    v_mul_f32_e32 v12, 0x4f7ffffe, v12
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v12, v12
+; TONGA-NEXT:    v_mul_hi_u32 v12, v13, v16
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v15, v6
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v10, v16
-; TONGA-NEXT:    v_sub_u32_e32 v16, vcc, 0, v4
-; TONGA-NEXT:    v_mul_lo_u32 v16, v16, v12
-; TONGA-NEXT:    v_mul_hi_u32 v10, v1, v10
-; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, 0, v0
-; TONGA-NEXT:    v_mul_hi_u32 v13, v12, v16
-; TONGA-NEXT:    v_max_i32_e32 v0, v0, v9
-; TONGA-NEXT:    v_rcp_iflag_f32_e32 v9, v15
 ; TONGA-NEXT:    v_ashrrev_i32_e32 v8, 31, v8
-; TONGA-NEXT:    v_add_u32_e32 v12, vcc, v12, v13
-; TONGA-NEXT:    v_mul_lo_u32 v13, v10, v5
-; TONGA-NEXT:    v_mul_hi_u32 v12, v0, v12
-; TONGA-NEXT:    v_mul_f32_e32 v9, 0x4f7ffffe, v9
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v9, v9
-; TONGA-NEXT:    v_sub_u32_e32 v1, vcc, v1, v13
+; TONGA-NEXT:    v_ashrrev_i32_e32 v11, 31, v11
+; TONGA-NEXT:    v_add_u32_e32 v12, vcc, v13, v12
+; TONGA-NEXT:    v_mul_lo_u32 v13, v10, v4
+; TONGA-NEXT:    v_mul_hi_u32 v12, v1, v12
+; TONGA-NEXT:    v_rcp_iflag_f32_e32 v9, v15
+; TONGA-NEXT:    v_ashrrev_i32_e32 v14, 31, v14
+; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v0, v13
 ; TONGA-NEXT:    v_add_u32_e32 v13, vcc, 1, v10
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[0:1], v1, v5
+; TONGA-NEXT:    v_cmp_ge_u32_e64 s[0:1], v0, v4
 ; TONGA-NEXT:    v_cndmask_b32_e64 v10, v10, v13, s[0:1]
-; TONGA-NEXT:    v_sub_u32_e32 v13, vcc, v1, v5
-; TONGA-NEXT:    v_cndmask_b32_e64 v1, v1, v13, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[0:1], v1, v5
-; TONGA-NEXT:    v_mul_lo_u32 v1, v12, v4
-; TONGA-NEXT:    v_sub_u32_e32 v5, vcc, 0, v6
-; TONGA-NEXT:    v_mul_lo_u32 v5, v5, v9
-; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT:    v_sub_u32_e32 v13, vcc, v0, v4
+; TONGA-NEXT:    v_cndmask_b32_e64 v0, v0, v13, s[0:1]
+; TONGA-NEXT:    v_cmp_ge_u32_e64 s[0:1], v0, v4
+; TONGA-NEXT:    v_mul_lo_u32 v0, v12, v5
+; TONGA-NEXT:    v_mul_f32_e32 v9, 0x4f7ffffe, v9
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v9, v9
+; TONGA-NEXT:    v_sub_u32_e32 v4, vcc, 0, v6
+; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v1, v0
 ; TONGA-NEXT:    v_add_u32_e32 v1, vcc, 1, v12
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v4
+; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v5
 ; TONGA-NEXT:    v_cndmask_b32_e64 v1, v12, v1, s[2:3]
-; TONGA-NEXT:    v_sub_u32_e32 v12, vcc, v0, v4
+; TONGA-NEXT:    v_sub_u32_e32 v12, vcc, v0, v5
+; TONGA-NEXT:    v_mul_lo_u32 v4, v4, v9
 ; TONGA-NEXT:    v_cndmask_b32_e64 v0, v0, v12, s[2:3]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v4
+; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v0, v5
 ; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, 0, v7
-; TONGA-NEXT:    v_mul_hi_u32 v4, v9, v5
 ; TONGA-NEXT:    v_max_i32_e32 v5, v7, v0
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, v5
-; TONGA-NEXT:    v_add_u32_e32 v12, vcc, 1, v1
-; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v9, v4
+; TONGA-NEXT:    v_mul_hi_u32 v4, v9, v4
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, 1, v10
 ; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v9, v4
 ; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, 0, v2
 ; TONGA-NEXT:    v_max_i32_e32 v2, v2, v9
 ; TONGA-NEXT:    v_mul_hi_u32 v4, v2, v4
 ; TONGA-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; TONGA-NEXT:    v_cvt_u32_f32_e32 v9, v0
-; TONGA-NEXT:    v_cndmask_b32_e64 v0, v1, v12, s[2:3]
+; TONGA-NEXT:    v_cndmask_b32_e64 v0, v10, v13, s[0:1]
 ; TONGA-NEXT:    v_xor_b32_e32 v0, v0, v8
 ; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v0, v8
 ; TONGA-NEXT:    v_mul_lo_u32 v8, v4, v6
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, 1, v10
-; TONGA-NEXT:    v_cndmask_b32_e64 v1, v10, v13, s[0:1]
+; TONGA-NEXT:    v_add_u32_e32 v12, vcc, 1, v1
 ; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, 0, v5
 ; TONGA-NEXT:    v_sub_u32_e32 v2, vcc, v2, v8
-; TONGA-NEXT:    v_ashrrev_i32_e32 v11, 31, v11
+; TONGA-NEXT:    v_cndmask_b32_e64 v1, v1, v12, s[2:3]
 ; TONGA-NEXT:    v_mul_lo_u32 v10, v10, v9
 ; TONGA-NEXT:    v_add_u32_e32 v8, vcc, 1, v4
 ; TONGA-NEXT:    v_cmp_ge_u32_e64 s[0:1], v2, v6
@@ -1023,7 +1024,6 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_max_i32_e32 v6, v3, v6
 ; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v9, v4
 ; TONGA-NEXT:    v_mul_hi_u32 v4, v6, v4
-; TONGA-NEXT:    v_ashrrev_i32_e32 v14, 31, v14
 ; TONGA-NEXT:    v_xor_b32_e32 v2, v2, v14
 ; TONGA-NEXT:    v_sub_u32_e32 v2, vcc, v2, v14
 ; TONGA-NEXT:    v_mul_lo_u32 v8, v4, v5
diff --git a/llvm/test/CodeGen/AMDGPU/select.f16.ll b/llvm/test/CodeGen/AMDGPU/select.f16.ll
index 21719226710de..2863fccac9fbc 100644
--- a/llvm/test/CodeGen/AMDGPU/select.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select.f16.ll
@@ -2169,37 +2169,41 @@ define <16 x half> @v_vselect_v16f16(<16 x half> %a, <16 x half> %b, <16 x i32>
 ; SI-LABEL: v_vselect_v16f16:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:4
-; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v16
-; SI-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:8
-; SI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:12
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
 ; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:16
 ; SI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:20
 ; SI-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:24
-; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:28
-; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:32
+; SI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:28
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v19, v19
 ; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
 ; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
-; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
 ; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
 ; SI-NEXT:    v_cvt_f16_f32_e32 v7, v7
 ; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
 ; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
 ; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
 ; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT:    v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
 ; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
 ; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v11, v11
@@ -2209,162 +2213,154 @@ define <16 x half> @v_vselect_v16f16(<16 x half> %a, <16 x half> %b, <16 x i32>
 ; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
 ; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
 ; SI-NEXT:    v_cvt_f16_f32_e32 v14, v14
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
 ; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
 ; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
 ; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
-; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v36
-; SI-NEXT:    v_cndmask_b32_e32 v0, v37, v0, vcc
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v38
-; SI-NEXT:    v_cndmask_b32_e32 v1, v17, v1, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v18
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v20
-; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:48
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT:    v_cndmask_b32_e32 v2, v17, v2, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v19
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v32
-; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:36
-; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:44
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; SI-NEXT:    v_cndmask_b32_e32 v3, v17, v3, vcc
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v33
-; SI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:40
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT:    v_cndmask_b32_e32 v4, v18, v4, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v22
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v34
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; SI-NEXT:    v_cvt_f16_f32_e32 v22, v23
-; SI-NEXT:    v_cvt_f32_f16_e32 v21, v18
-; SI-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:56
-; SI-NEXT:    v_cndmask_b32_e32 v5, v17, v5, vcc
-; SI-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:52
-; SI-NEXT:    s_waitcnt vmcnt(7)
+; SI-NEXT:    s_waitcnt vmcnt(3)
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v32
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v33
+; SI-NEXT:    v_cndmask_b32_e64 v4, v20, v4, s[6:7]
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:44
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v31
-; SI-NEXT:    v_cndmask_b32_e32 v6, v21, v6, vcc
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
-; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
-; SI-NEXT:    buffer_load_dword v21, off, s[0:3], s32
-; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT:    v_cvt_f16_f32_e32 v23, v24
-; SI-NEXT:    v_cvt_f16_f32_e32 v24, v25
-; SI-NEXT:    v_cndmask_b32_e32 v7, v22, v7, vcc
-; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:64
-; SI-NEXT:    v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v32
-; SI-NEXT:    v_cndmask_b32_e32 v8, v23, v8, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v23, v26
-; SI-NEXT:    v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v33
-; SI-NEXT:    v_cndmask_b32_e32 v9, v24, v9, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v24, v27
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v19, v28
-; SI-NEXT:    v_cndmask_b32_e32 v10, v23, v10, vcc
-; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v20
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:8
+; SI-NEXT:    v_cndmask_b32_e32 v2, v18, v2, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v3, v19, v3, s[4:5]
+; SI-NEXT:    v_cvt_f16_f32_e32 v19, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v21, v22
+; SI-NEXT:    s_waitcnt vmcnt(4)
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v34
+; SI-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:40
 ; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v20, v29
-; SI-NEXT:    v_cndmask_b32_e32 v11, v24, v11, vcc
-; SI-NEXT:    s_waitcnt vmcnt(3)
+; SI-NEXT:    v_cndmask_b32_e32 v5, v19, v5, vcc
+; SI-NEXT:    v_cvt_f32_f16_e32 v19, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v21, v23
+; SI-NEXT:    s_waitcnt vmcnt(4)
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v35
+; SI-NEXT:    v_cndmask_b32_e32 v6, v19, v6, vcc
+; SI-NEXT:    v_cvt_f32_f16_e32 v19, v21
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_cmp_eq_u32_e64 s[8:9], 0, v31
+; SI-NEXT:    v_cndmask_b32_e64 v0, v16, v0, s[8:9]
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v17
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cmp_eq_u32_e64 s[8:9], 0, v32
+; SI-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:36
+; SI-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:48
+; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:52
+; SI-NEXT:    buffer_load_dword v23, off, s[0:3], s32
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
+; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s[8:9]
+; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
+; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:56
+; SI-NEXT:    v_cndmask_b32_e32 v7, v19, v7, vcc
+; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:64
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v17
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v30
-; SI-NEXT:    v_cndmask_b32_e32 v12, v19, v12, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v8, v24, v8, vcc
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v25
+; SI-NEXT:    v_cvt_f16_f32_e32 v24, v26
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v18
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v29
+; SI-NEXT:    v_cndmask_b32_e32 v9, v17, v9, vcc
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v24
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v20, v28
+; SI-NEXT:    v_cndmask_b32_e32 v10, v17, v10, vcc
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v18
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v27
 ; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v21
 ; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT:    v_cndmask_b32_e32 v13, v20, v13, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v11, v18, v11, vcc
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v22
+; SI-NEXT:    v_cndmask_b32_e32 v12, v20, v12, vcc
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
-; SI-NEXT:    v_cndmask_b32_e32 v14, v17, v14, vcc
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v30
+; SI-NEXT:    v_cndmask_b32_e32 v13, v17, v13, vcc
+; SI-NEXT:    v_cvt_f16_f32_e32 v17, v23
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v31
+; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; SI-NEXT:    v_cndmask_b32_e32 v14, v16, v14, vcc
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v22
-; SI-NEXT:    v_cndmask_b32_e32 v15, v18, v15, vcc
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v19
+; SI-NEXT:    v_cndmask_b32_e32 v15, v17, v15, vcc
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: v_vselect_v16f16:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v16
-; VI-NEXT:    v_cmp_eq_u32_e64 s[18:19], 0, v17
-; VI-NEXT:    v_cmp_eq_u32_e64 s[40:41], 0, v29
-; VI-NEXT:    v_lshrrev_b32_e32 v16, 16, v6
-; VI-NEXT:    v_lshrrev_b32_e32 v17, 16, v14
-; VI-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v18
-; VI-NEXT:    v_cmp_eq_u32_e64 s[28:29], 0, v27
-; VI-NEXT:    v_cndmask_b32_e64 v16, v17, v16, s[40:41]
-; VI-NEXT:    v_lshrrev_b32_e32 v17, 16, v5
-; VI-NEXT:    v_lshrrev_b32_e32 v18, 16, v13
-; VI-NEXT:    v_cmp_eq_u32_e64 s[20:21], 0, v19
-; VI-NEXT:    v_cmp_eq_u32_e64 s[26:27], 0, v25
-; VI-NEXT:    v_cndmask_b32_e64 v17, v18, v17, s[28:29]
-; VI-NEXT:    v_lshrrev_b32_e32 v18, 16, v4
-; VI-NEXT:    v_lshrrev_b32_e32 v19, 16, v12
-; VI-NEXT:    v_cmp_eq_u32_e64 s[8:9], 0, v20
-; VI-NEXT:    v_cmp_eq_u32_e64 s[24:25], 0, v23
-; VI-NEXT:    v_cndmask_b32_e64 v18, v19, v18, s[26:27]
-; VI-NEXT:    v_lshrrev_b32_e32 v19, 16, v3
-; VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v11
-; VI-NEXT:    v_cmp_eq_u32_e64 s[22:23], 0, v21
-; VI-NEXT:    v_cndmask_b32_e64 v19, v20, v19, s[24:25]
-; VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v2
-; VI-NEXT:    v_lshrrev_b32_e32 v21, 16, v10
-; VI-NEXT:    v_cmp_eq_u32_e64 s[10:11], 0, v22
-; VI-NEXT:    v_cndmask_b32_e64 v20, v21, v20, s[22:23]
-; VI-NEXT:    v_lshrrev_b32_e32 v21, 16, v1
-; VI-NEXT:    v_lshrrev_b32_e32 v22, 16, v9
-; VI-NEXT:    v_cndmask_b32_e64 v21, v22, v21, s[20:21]
-; VI-NEXT:    v_lshrrev_b32_e32 v22, 16, v0
-; VI-NEXT:    v_lshrrev_b32_e32 v23, 16, v8
-; VI-NEXT:    v_cndmask_b32_e64 v0, v8, v0, s[4:5]
-; VI-NEXT:    buffer_load_dword v8, off, s[0:3], s32
-; VI-NEXT:    v_cndmask_b32_e64 v22, v23, v22, s[18:19]
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v30
-; VI-NEXT:    v_cndmask_b32_e64 v1, v9, v1, s[6:7]
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v22
-; VI-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s[8:9]
-; VI-NEXT:    v_or_b32_sdwa v0, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v7
-; VI-NEXT:    v_cndmask_b32_e32 v7, v15, v7, vcc
-; VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v15
-; VI-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v24
-; VI-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s[10:11]
-; VI-NEXT:    v_cmp_eq_u32_e64 s[14:15], 0, v26
-; VI-NEXT:    v_cndmask_b32_e64 v4, v12, v4, s[12:13]
-; VI-NEXT:    v_cmp_eq_u32_e64 s[16:17], 0, v28
-; VI-NEXT:    v_cndmask_b32_e64 v5, v13, v5, s[14:15]
-; VI-NEXT:    v_cndmask_b32_e64 v6, v14, v6, s[16:17]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v8
-; VI-NEXT:    v_cndmask_b32_e32 v8, v10, v9, vcc
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v21
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
+; VI-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; VI-NEXT:    v_cmp_eq_u32_e64 s[8:9], 0, v22
+; VI-NEXT:    v_cmp_eq_u32_e64 s[10:11], 0, v24
+; VI-NEXT:    v_lshrrev_b32_e32 v22, 16, v6
+; VI-NEXT:    v_lshrrev_b32_e32 v24, 16, v14
+; VI-NEXT:    v_cmp_eq_u32_e64 s[20:21], 0, v29
+; VI-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v26
+; VI-NEXT:    v_cmp_eq_u32_e64 s[14:15], 0, v28
+; VI-NEXT:    v_cmp_eq_u32_e64 s[18:19], 0, v27
+; VI-NEXT:    v_lshrrev_b32_e32 v26, 16, v4
+; VI-NEXT:    v_lshrrev_b32_e32 v27, 16, v12
+; VI-NEXT:    v_cndmask_b32_e64 v22, v24, v22, s[20:21]
+; VI-NEXT:    v_lshrrev_b32_e32 v24, 16, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v25
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v18
+; VI-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v20
+; VI-NEXT:    v_lshrrev_b32_e32 v18, 16, v5
+; VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v13
+; VI-NEXT:    v_cndmask_b32_e64 v6, v14, v6, s[14:15]
+; VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v3
+; VI-NEXT:    v_cndmask_b32_e64 v5, v13, v5, s[12:13]
+; VI-NEXT:    v_lshrrev_b32_e32 v13, 16, v11
+; VI-NEXT:    v_cndmask_b32_e32 v25, v27, v26, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v23
+; VI-NEXT:    v_cndmask_b32_e64 v4, v12, v4, s[10:11]
+; VI-NEXT:    v_lshrrev_b32_e32 v12, 16, v2
+; VI-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s[8:9]
+; VI-NEXT:    v_lshrrev_b32_e32 v11, 16, v10
+; VI-NEXT:    v_cndmask_b32_e32 v13, v13, v14, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v21
+; VI-NEXT:    v_cndmask_b32_e64 v2, v10, v2, s[6:7]
+; VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v1
+; VI-NEXT:    v_cndmask_b32_e64 v1, v9, v1, s[4:5]
+; VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
+; VI-NEXT:    v_cndmask_b32_e32 v11, v11, v12, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v19
+; VI-NEXT:    v_cndmask_b32_e32 v9, v9, v10, vcc
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v9
+; VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v17
 ; VI-NEXT:    v_or_b32_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v20
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v11
+; VI-NEXT:    v_cmp_eq_u32_e64 s[16:17], 0, v30
+; VI-NEXT:    v_cndmask_b32_e32 v8, v8, v24, vcc
 ; VI-NEXT:    v_or_b32_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v19
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v13
+; VI-NEXT:    v_cndmask_b32_e64 v18, v20, v18, s[18:19]
+; VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v7
+; VI-NEXT:    v_cndmask_b32_e64 v7, v15, v7, s[16:17]
+; VI-NEXT:    v_lshrrev_b32_e32 v15, 16, v15
+; VI-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
 ; VI-NEXT:    v_or_b32_sdwa v3, v3, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v18
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v25
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    v_or_b32_sdwa v4, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v17
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v18
 ; VI-NEXT:    v_or_b32_sdwa v5, v5, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v16
-; VI-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 16, v22
 ; VI-NEXT:    v_or_b32_sdwa v6, v6, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
+; VI-NEXT:    v_cndmask_b32_e32 v8, v15, v20, vcc
+; VI-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
 ; VI-NEXT:    v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -3480,8 +3476,8 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_clause 0x1f
-; GFX11-FAKE16-NEXT:    scratch_load_b32 v31, off, s32 offset:120
-; GFX11-FAKE16-NEXT:    scratch_load_b32 v32, off, s32 offset:112
+; GFX11-FAKE16-NEXT:    scratch_load_b32 v31, off, s32 offset:112
+; GFX11-FAKE16-NEXT:    scratch_load_b32 v32, off, s32 offset:120
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v33, off, s32
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v34, off, s32 offset:104
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v35, off, s32 offset:96
@@ -3513,8 +3509,6 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v85, off, s32 offset:4
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v86, off, s32 offset:20
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v87, off, s32 offset:128
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v97, 16, v14
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v30
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v99, 16, v13
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v29
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v101, 16, v12
@@ -3539,19 +3533,21 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v144, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v145, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v146, 16, v18
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v97, 16, v14
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v30
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v147, 16, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v96, 16, v15
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(32)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v31
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v97, v98, v97, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(31)
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v32
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v0
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e64 s0, 0, v32
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v32, 16, v0
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v99, v100, v99, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(29)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v34
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v97, v98, v97, s0
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v98, 16, v16
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v100, 16, v33
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v34, v102, v101, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(28)
@@ -3589,7 +3585,7 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v31, v31, v147, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(17)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v54
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v32, v98, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v32, v98, v32, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(16)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v55
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v15, v33, v15, vcc_lo
@@ -3606,7 +3602,8 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v28, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(12)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v67
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_perm_b32 v13, v99, v13, 0x5040100
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v12, v34, v12, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v27, v11, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(11)
@@ -3614,7 +3611,7 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v26, v10, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(10)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v69
-; GFX11-FAKE16-NEXT:    v_perm_b32 v13, v99, v13, 0x5040100
+; GFX11-FAKE16-NEXT:    v_perm_b32 v11, v35, v11, 0x5040100
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v10, v36, v10, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v25, v9, vcc_lo
@@ -3623,7 +3620,7 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v24, v8, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(8)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v71
-; GFX11-FAKE16-NEXT:    v_perm_b32 v11, v35, v11, 0x5040100
+; GFX11-FAKE16-NEXT:    v_perm_b32 v9, v37, v9, 0x5040100
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v8, v38, v8, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v7, v23, v7, vcc_lo
@@ -3632,7 +3629,7 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v6, v22, v6, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(6)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v81
-; GFX11-FAKE16-NEXT:    v_perm_b32 v9, v37, v9, 0x5040100
+; GFX11-FAKE16-NEXT:    v_perm_b32 v7, v39, v7, 0x5040100
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v6, v48, v6, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v5, v21, v5, vcc_lo
@@ -3641,7 +3638,7 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v20, v4, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(4)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v83
-; GFX11-FAKE16-NEXT:    v_perm_b32 v7, v39, v7, 0x5040100
+; GFX11-FAKE16-NEXT:    v_perm_b32 v5, v49, v5, 0x5040100
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v4, v50, v4, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v19, v3, vcc_lo
@@ -3653,17 +3650,16 @@ define <32 x half> @v_vselect_v32f16(<32 x half> %a, <32 x half> %b, <32 x i32>
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v16, v0, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(1)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v86
-; GFX11-FAKE16-NEXT:    v_perm_b32 v5, v49, v5, 0x5040100
+; GFX11-FAKE16-NEXT:    v_perm_b32 v3, v51, v3, 0x5040100
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v0, v32, v0, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v18, v2, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v87
-; GFX11-FAKE16-NEXT:    v_perm_b32 v3, v51, v3, 0x5040100
+; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v31, v1, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v2, v52, v2, 0x5040100
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v16, v100, v96, vcc_lo
-; GFX11-FAKE16-NEXT:    v_perm_b32 v1, v31, v1, 0x5040100
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v15, v16, v15, 0x5040100
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq <32 x i32> %cond, zeroinitializer
diff --git a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
index b7e6ebaa655b9..5aafb0f576fb4 100644
--- a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
+++ b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
@@ -94,8 +94,9 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
   ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %368:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.99, addrspace 4)
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 64, 0 :: (invariant load (s128) from %ir.107, addrspace 4)
-  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.117, addrspace 4)
-  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.124, addrspace 4)
+  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4)
+  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.117, addrspace 4)
+  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.124, addrspace 4)
   ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
   ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %352:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
   ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %363:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
@@ -104,7 +105,6 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
   ; CHECK-NEXT:   [[S_ADD_I32_3:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM1]], -114, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_4:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM2]], -130, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_5:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM2]], -178, implicit-def dead $scc
-  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4)
   ; CHECK-NEXT:   undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc
   ; CHECK-NEXT:   [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
   ; CHECK-NEXT:   undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc
@@ -121,17 +121,17 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.129, addrspace 4)
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.145, addrspace 4)
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4)
-  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 224, 0 :: (invariant load (s128) from %ir.134, addrspace 4)
   ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.162, addrspace 4)
-  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
-  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 224, 0 :: (invariant load (s128) from %ir.140, addrspace 4)
+  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
   ; CHECK-NEXT:   [[S_ADD_I32_7:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM4]], -217, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_8:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -233, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_9:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM5]], -249, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_10:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM3]], -297, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_11:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -313, implicit-def dead $scc
-  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 224, 0 :: (invariant load (s128) from %ir.140, addrspace 4)
   ; CHECK-NEXT:   [[S_ADD_I32_12:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -329, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_13:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -345, implicit-def dead $scc
   ; CHECK-NEXT:   [[S_ADD_I32_14:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM6]], -441, implicit-def dead $scc
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index 6423267be4b34..6da7d1b7ee868 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -6092,15 +6092,15 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mov_b32_e32 v8, 0
 ; TONGA-NEXT:    s_waitcnt lgkmcnt(0)
 ; TONGA-NEXT:    s_add_u32 s0, s6, 48
-; TONGA-NEXT:    s_addc_u32 s1, s7, 0
-; TONGA-NEXT:    s_add_u32 s2, s6, 32
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s6
-; TONGA-NEXT:    s_addc_u32 s3, s7, 0
-; TONGA-NEXT:    v_mov_b32_e32 v2, s2
+; TONGA-NEXT:    s_addc_u32 s1, s7, 0
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s7
-; TONGA-NEXT:    v_mov_b32_e32 v3, s3
-; TONGA-NEXT:    flat_load_dwordx4 v[10:13], v[2:3]
+; TONGA-NEXT:    s_add_u32 s2, s6, 32
 ; TONGA-NEXT:    flat_load_dwordx4 v[14:17], v[0:1]
+; TONGA-NEXT:    s_addc_u32 s3, s7, 0
+; TONGA-NEXT:    v_mov_b32_e32 v0, s2
+; TONGA-NEXT:    v_mov_b32_e32 v1, s3
+; TONGA-NEXT:    flat_load_dwordx4 v[10:13], v[0:1]
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s0
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s1
 ; TONGA-NEXT:    s_add_u32 s0, s6, 16
diff --git a/llvm/test/CodeGen/AMDGPU/store-local.128.ll b/llvm/test/CodeGen/AMDGPU/store-local.128.ll
index 76ed4f6238dbe..dcf3643756cb2 100644
--- a/llvm/test/CodeGen/AMDGPU/store-local.128.ll
+++ b/llvm/test/CodeGen/AMDGPU/store-local.128.ll
@@ -576,8 +576,8 @@ define amdgpu_kernel void @store_lds_v4i32_align8(ptr addrspace(3) %out, <4 x i3
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_mov_b32_e32 v4, s6
 ; GFX11-NEXT:    v_mov_b32_e32 v0, s0
-; GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT:    v_mov_b32_e32 v1, s1
+; GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-NEXT:    ds_store_2addr_b64 v4, v[0:1], v[2:3] offset1:1
 ; GFX11-NEXT:    s_endpgm
   store <4 x i32> %x, ptr addrspace(3) %out, align 8
diff --git a/llvm/test/CodeGen/AMDGPU/vopd-combine.mir b/llvm/test/CodeGen/AMDGPU/vopd-combine.mir
index 5a13401c1631c..3a2b0996edacf 100644
--- a/llvm/test/CodeGen/AMDGPU/vopd-combine.mir
+++ b/llvm/test/CodeGen/AMDGPU/vopd-combine.mir
@@ -443,8 +443,8 @@ body:             |
     ; SCHED-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
     ; SCHED-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
     ; SCHED-NEXT: $vgpr12 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
-    ; SCHED-NEXT: $vgpr19 = V_CNDMASK_B32_e32 $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
-    ; SCHED-NEXT: $vgpr11 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
+    ; SCHED-NEXT: $vgpr11 = V_CNDMASK_B32_e32 $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
+    ; SCHED-NEXT: $vgpr19 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
     ; SCHED-NEXT: $vgpr17 = V_MUL_F32_e32 killed $vgpr0, $vgpr0, implicit $mode, implicit $exec
     ; SCHED-NEXT: $vgpr10 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo
     ; SCHED-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, killed $vgpr2, implicit $mode, implicit $exec, implicit killed $vcc_lo
@@ -462,8 +462,8 @@ body:             |
     ; PAIR-GFX11-NEXT: $vgpr3, $vgpr6 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
     ; PAIR-GFX11-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
     ; PAIR-GFX11-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
-    ; PAIR-GFX11-NEXT: $vgpr12, $vgpr19 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
-    ; PAIR-GFX11-NEXT: $vgpr11 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
+    ; PAIR-GFX11-NEXT: $vgpr12, $vgpr11 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+    ; PAIR-GFX11-NEXT: $vgpr19 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
     ; PAIR-GFX11-NEXT: $vgpr17, $vgpr10 = V_DUAL_MUL_F32_e32_X_CNDMASK_B32_e32_gfx11 killed $vgpr0, $vgpr0, $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
     ; PAIR-GFX11-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, killed $vgpr2, implicit $mode, implicit $exec, implicit killed $vcc_lo
     ; PAIR-GFX11-NEXT: $vgpr16 = V_SUB_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
@@ -480,8 +480,8 @@ body:             |
     ; PAIR-GFX12-NEXT: $vgpr3, $vgpr6 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
     ; PAIR-GFX12-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
     ; PAIR-GFX12-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
-    ; PAIR-GFX12-NEXT: $vgpr12, $vgpr19 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
-    ; PAIR-GFX12-NEXT: $vgpr11 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
+    ; PAIR-GFX12-NEXT: $vgpr12, $vgpr11 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+    ; PAIR-GFX12-NEXT: $vgpr19 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
     ; PAIR-GFX12-NEXT: $vgpr17, $vgpr10 = V_DUAL_MUL_F32_e32_X_CNDMASK_B32_e32_gfx12 killed $vgpr0, $vgpr0, $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
     ; PAIR-GFX12-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, killed $vgpr2, implicit $mode, implicit $exec, implicit killed $vcc_lo
     ; PAIR-GFX12-NEXT: $vgpr16 = V_SUB_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
diff --git a/llvm/test/CodeGen/PowerPC/p10-fi-elim.ll b/llvm/test/CodeGen/PowerPC/p10-fi-elim.ll
index f70f95b428ff7..3f6838afd545b 100644
--- a/llvm/test/CodeGen/PowerPC/p10-fi-elim.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-fi-elim.ll
@@ -44,10 +44,10 @@ define dso_local signext i32 @test_FI_elim(ptr noalias nocapture dereferenceable
 ; CHECK-NEXT:    mfvsrd r10, v3
 ; CHECK-NEXT:    std r5, 0(r3)
 ; CHECK-NEXT:    lbz r5, 2(r7)
-; CHECK-NEXT:    mr r7, r9
 ; CHECK-NEXT:    stb r11, 0(r3)
 ; CHECK-NEXT:    stb r12, 0(r3)
 ; CHECK-NEXT:    std r2, 0(r3)
+; CHECK-NEXT:    mr r7, r9
 ; CHECK-NEXT:    neg r10, r10
 ; CHECK-NEXT:    rlwinm r5, r5, 0, 27, 27
 ; CHECK-NEXT:    stb r5, 0(0)
@@ -93,10 +93,10 @@ define dso_local signext i32 @test_FI_elim(ptr noalias nocapture dereferenceable
 ; CHECK-BE-NEXT:    neg r5, r5
 ; CHECK-BE-NEXT:    std r5, 0(r3)
 ; CHECK-BE-NEXT:    lbz r5, 2(r7)
-; CHECK-BE-NEXT:    mr r7, r9
 ; CHECK-BE-NEXT:    stb r11, 0(r3)
 ; CHECK-BE-NEXT:    stb r12, 0(r3)
 ; CHECK-BE-NEXT:    std r30, 0(r3)
+; CHECK-BE-NEXT:    mr r7, r9
 ; CHECK-BE-NEXT:    neg r10, r10
 ; CHECK-BE-NEXT:    rlwinm r5, r5, 0, 27, 27
 ; CHECK-BE-NEXT:    stb r5, 0(0)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll
index bc002fee4417c..5278c951ecddf 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -1481,24 +1481,24 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ;
 ; RV32I-LABEL: shl_16bytes:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lbu a3, 0(a0)
-; RV32I-NEXT:    lbu a4, 1(a0)
+; RV32I-NEXT:    lbu a3, 1(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
 ; RV32I-NEXT:    lbu a5, 2(a0)
 ; RV32I-NEXT:    lbu a6, 3(a0)
-; RV32I-NEXT:    lbu a7, 4(a0)
-; RV32I-NEXT:    lbu t0, 5(a0)
-; RV32I-NEXT:    lbu t1, 6(a0)
-; RV32I-NEXT:    lbu t2, 7(a0)
-; RV32I-NEXT:    slli a4, a4, 8
-; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    slli a3, a3, 8
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    lbu a7, 5(a0)
+; RV32I-NEXT:    lbu t0, 6(a0)
+; RV32I-NEXT:    lbu t1, 7(a0)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    slli t0, t0, 8
-; RV32I-NEXT:    slli t2, t2, 8
-; RV32I-NEXT:    or a4, a6, a5
-; RV32I-NEXT:    or a5, t0, a7
+; RV32I-NEXT:    slli a7, a7, 8
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    or a4, a7, a4
 ; RV32I-NEXT:    lbu a6, 0(a1)
 ; RV32I-NEXT:    lbu a7, 1(a1)
-; RV32I-NEXT:    or t0, t2, t1
+; RV32I-NEXT:    slli t1, t1, 8
+; RV32I-NEXT:    or t0, t1, t0
 ; RV32I-NEXT:    lbu t1, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a7, a7, 8
@@ -1507,11 +1507,11 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli a1, a1, 8
 ; RV32I-NEXT:    or a1, a1, t1
 ; RV32I-NEXT:    li t1, 32
-; RV32I-NEXT:    slli a4, a4, 16
+; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli t2, t0, 16
 ; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    or t0, a4, a3
-; RV32I-NEXT:    or a4, t2, a5
+; RV32I-NEXT:    or t0, a5, a3
+; RV32I-NEXT:    or a4, t2, a4
 ; RV32I-NEXT:    or a5, a1, a7
 ; RV32I-NEXT:    slli a5, a5, 3
 ; RV32I-NEXT:    neg t3, a5
@@ -1810,24 +1810,24 @@ define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw
 ;
 ; RV32I-LABEL: shl_16bytes_wordOff:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lbu a3, 0(a0)
-; RV32I-NEXT:    lbu a4, 1(a0)
+; RV32I-NEXT:    lbu a3, 1(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
 ; RV32I-NEXT:    lbu a5, 2(a0)
 ; RV32I-NEXT:    lbu a6, 3(a0)
-; RV32I-NEXT:    lbu a7, 4(a0)
-; RV32I-NEXT:    lbu t0, 5(a0)
-; RV32I-NEXT:    lbu t1, 6(a0)
-; RV32I-NEXT:    lbu t2, 7(a0)
-; RV32I-NEXT:    slli a4, a4, 8
-; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    slli a3, a3, 8
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    lbu a7, 5(a0)
+; RV32I-NEXT:    lbu t0, 6(a0)
+; RV32I-NEXT:    lbu t1, 7(a0)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    slli t0, t0, 8
-; RV32I-NEXT:    slli t2, t2, 8
-; RV32I-NEXT:    or a4, a6, a5
-; RV32I-NEXT:    or a5, t0, a7
+; RV32I-NEXT:    slli a7, a7, 8
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    or a4, a7, a4
 ; RV32I-NEXT:    lbu a6, 0(a1)
 ; RV32I-NEXT:    lbu a7, 1(a1)
-; RV32I-NEXT:    or t0, t2, t1
+; RV32I-NEXT:    slli t1, t1, 8
+; RV32I-NEXT:    or t0, t1, t0
 ; RV32I-NEXT:    lbu t1, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a7, a7, 8
@@ -1836,11 +1836,11 @@ define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw
 ; RV32I-NEXT:    slli a1, a1, 8
 ; RV32I-NEXT:    or a1, a1, t1
 ; RV32I-NEXT:    li t1, 32
-; RV32I-NEXT:    slli a4, a4, 16
+; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli t2, t0, 16
 ; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    or t0, a4, a3
-; RV32I-NEXT:    or a4, t2, a5
+; RV32I-NEXT:    or t0, a5, a3
+; RV32I-NEXT:    or a4, t2, a4
 ; RV32I-NEXT:    or a5, a1, a7
 ; RV32I-NEXT:    slli a5, a5, 5
 ; RV32I-NEXT:    neg t3, a5
@@ -5781,11 +5781,11 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    lbu a3, 15(a0)
 ; RV32I-NEXT:    slli a7, a7, 8
 ; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    or a5, a7, a5
 ; RV32I-NEXT:    or a7, t1, t0
 ; RV32I-NEXT:    lbu t0, 0(a1)
 ; RV32I-NEXT:    lbu t1, 1(a1)
+; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    or t2, t3, t2
 ; RV32I-NEXT:    lbu t3, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
@@ -6695,11 +6695,11 @@ define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw
 ; RV32I-NEXT:    lbu a3, 15(a0)
 ; RV32I-NEXT:    slli a7, a7, 8
 ; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    or a5, a7, a5
 ; RV32I-NEXT:    or a7, t1, t0
 ; RV32I-NEXT:    lbu t0, 0(a1)
 ; RV32I-NEXT:    lbu t1, 1(a1)
+; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    or t2, t3, t2
 ; RV32I-NEXT:    lbu t3, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
@@ -7609,11 +7609,11 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
 ; RV32I-NEXT:    lbu a3, 15(a0)
 ; RV32I-NEXT:    slli a7, a7, 8
 ; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    or a5, a7, a5
 ; RV32I-NEXT:    or a7, t1, t0
 ; RV32I-NEXT:    lbu t0, 0(a1)
 ; RV32I-NEXT:    lbu t1, 1(a1)
+; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    or t2, t3, t2
 ; RV32I-NEXT:    lbu t3, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
diff --git a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
index f9086ba9d6354..fc714e3faef43 100644
--- a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
@@ -4411,25 +4411,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV32-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV32:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT:    lbu a4, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-NEXT:    lbu a1, 3(a1)
 ; CHECK-ALIGNED-RV32-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV32-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV32-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV32-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV32-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV32-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a0, 3(a0)
 ; CHECK-ALIGNED-RV32-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV32-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV32-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV32-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV32-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV32-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV32-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV32-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV32-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV32-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV32-NEXT:    ret
@@ -4437,25 +4437,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV64:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV64-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT:    lb a4, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT:    lb a1, 3(a1)
 ; CHECK-ALIGNED-RV64-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV64-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV64-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV64-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV64-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV64-NEXT:    lb a0, 3(a0)
 ; CHECK-ALIGNED-RV64-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV64-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV64-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV64-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV64-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV64-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV64-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV64-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV64-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV64-NEXT:    ret
@@ -4463,25 +4463,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV32-ZBB:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a4, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a1, 3(a1)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a0, 3(a0)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    ret
@@ -4489,25 +4489,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV64-ZBB:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lb a4, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lb a1, 3(a1)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lb a0, 3(a0)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ret
@@ -4559,25 +4559,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV32-V-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV32-V:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a4, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a1, 3(a1)
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV32-V-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV32-V-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV32-V-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a0, 3(a0)
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV32-V-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV32-V-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV32-V-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV32-V-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV32-V-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV32-V-NEXT:    ret
@@ -4585,25 +4585,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-V-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV64-V:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV64-V-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT:    lb a4, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV64-V-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-V-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-V-NEXT:    lb a1, 3(a1)
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV64-V-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV64-V-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV64-V-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV64-V-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV64-V-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV64-V-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV64-V-NEXT:    lb a0, 3(a0)
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV64-V-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV64-V-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV64-V-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV64-V-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV64-V-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV64-V-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/memcmp.ll b/llvm/test/CodeGen/RISCV/memcmp.ll
index f0290298e362a..ddfbd649a43b8 100644
--- a/llvm/test/CodeGen/RISCV/memcmp.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp.ll
@@ -5981,25 +5981,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV32:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT:    lbu a4, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-NEXT:    lbu a1, 3(a1)
 ; CHECK-ALIGNED-RV32-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV32-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV32-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV32-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV32-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV32-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV32-NEXT:    lbu a0, 3(a0)
 ; CHECK-ALIGNED-RV32-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV32-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV32-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV32-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV32-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV32-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV32-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV32-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV32-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV32-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV32-NEXT:    ret
@@ -6007,25 +6007,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV64:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV64-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT:    lb a4, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT:    lb a1, 3(a1)
 ; CHECK-ALIGNED-RV64-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV64-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV64-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV64-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV64-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV64-NEXT:    lb a0, 3(a0)
 ; CHECK-ALIGNED-RV64-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV64-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV64-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV64-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV64-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV64-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV64-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV64-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV64-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV64-NEXT:    ret
@@ -6033,25 +6033,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV32-ZBB:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a4, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a1, 3(a1)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lbu a0, 3(a0)
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    ret
@@ -6059,25 +6059,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV64-ZBB:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lb a4, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lb a1, 3(a1)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    lb a0, 3(a0)
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ret
@@ -6129,25 +6129,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-V-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV32-V:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a4, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a1, 3(a1)
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV32-V-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV32-V-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV32-V-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV32-V-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV32-V-NEXT:    lbu a0, 3(a0)
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV32-V-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV32-V-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV32-V-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV32-V-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV32-V-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV32-V-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV32-V-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV32-V-NEXT:    ret
@@ -6155,25 +6155,25 @@ define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-V-LABEL: memcmp_eq_zero:
 ; CHECK-ALIGNED-RV64-V:       # %bb.0: # %entry
 ; CHECK-ALIGNED-RV64-V-NEXT:    lbu a2, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT:    lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT:    lb a4, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT:    lbu a1, 0(a1)
+; CHECK-ALIGNED-RV64-V-NEXT:    lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-V-NEXT:    lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-V-NEXT:    lb a1, 3(a1)
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT:    slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT:    slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT:    lbu a2, 0(a0)
+; CHECK-ALIGNED-RV64-V-NEXT:    or a2, a2, a3
+; CHECK-ALIGNED-RV64-V-NEXT:    lbu a3, 0(a0)
 ; CHECK-ALIGNED-RV64-V-NEXT:    lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT:    or a3, a4, a3
+; CHECK-ALIGNED-RV64-V-NEXT:    slli a4, a4, 16
+; CHECK-ALIGNED-RV64-V-NEXT:    slli a1, a1, 24
+; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a1, a4
 ; CHECK-ALIGNED-RV64-V-NEXT:    lbu a4, 2(a0)
 ; CHECK-ALIGNED-RV64-V-NEXT:    lb a0, 3(a0)
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT:    or a2, a5, a2
+; CHECK-ALIGNED-RV64-V-NEXT:    or a3, a5, a3
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a4, a4, 16
 ; CHECK-ALIGNED-RV64-V-NEXT:    slli a0, a0, 24
 ; CHECK-ALIGNED-RV64-V-NEXT:    or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT:    or a0, a0, a2
+; CHECK-ALIGNED-RV64-V-NEXT:    or a1, a1, a2
+; CHECK-ALIGNED-RV64-V-NEXT:    or a0, a0, a3
 ; CHECK-ALIGNED-RV64-V-NEXT:    xor a0, a0, a1
 ; CHECK-ALIGNED-RV64-V-NEXT:    seqz a0, a0
 ; CHECK-ALIGNED-RV64-V-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index aa55bd7af59c5..c12e322f4780f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -1406,12 +1406,12 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
 ; RV32VB-NEXT:    slli a7, a7, 16
 ; RV32VB-NEXT:    slli t0, t0, 24
 ; RV32VB-NEXT:    slli a5, a5, 8
-; RV32VB-NEXT:    slli a6, a6, 16
-; RV32VB-NEXT:    slli t1, t1, 24
 ; RV32VB-NEXT:    or a7, t0, a7
 ; RV32VB-NEXT:    or a4, a4, a5
 ; RV32VB-NEXT:    lbu a5, 12(a0)
 ; RV32VB-NEXT:    lbu t0, 13(a0)
+; RV32VB-NEXT:    slli a6, a6, 16
+; RV32VB-NEXT:    slli t1, t1, 24
 ; RV32VB-NEXT:    or a6, t1, a6
 ; RV32VB-NEXT:    lbu t1, 14(a0)
 ; RV32VB-NEXT:    lbu a0, 15(a0)
@@ -1767,41 +1767,41 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
 ; RV32VB-NEXT:    lbu a7, 55(a0)
 ; RV32VB-NEXT:    lbu t0, 75(a0)
 ; RV32VB-NEXT:    lbu t1, 82(a0)
+; RV32VB-NEXT:    lbu t2, 154(a0)
+; RV32VB-NEXT:    lbu t3, 161(a0)
 ; RV32VB-NEXT:    slli a2, a2, 8
 ; RV32VB-NEXT:    slli a3, a3, 16
 ; RV32VB-NEXT:    slli a4, a4, 24
-; RV32VB-NEXT:    or a1, a1, a2
-; RV32VB-NEXT:    or a3, a4, a3
-; RV32VB-NEXT:    lbu a2, 93(a0)
-; RV32VB-NEXT:    lbu a4, 105(a0)
-; RV32VB-NEXT:    lbu t2, 124(a0)
-; RV32VB-NEXT:    lbu t3, 144(a0)
 ; RV32VB-NEXT:    slli a7, a7, 8
 ; RV32VB-NEXT:    slli a5, a5, 16
 ; RV32VB-NEXT:    slli t0, t0, 24
-; RV32VB-NEXT:    slli a2, a2, 8
-; RV32VB-NEXT:    or a6, a6, a7
-; RV32VB-NEXT:    or a5, t0, a5
-; RV32VB-NEXT:    lbu a7, 154(a0)
-; RV32VB-NEXT:    lbu t0, 161(a0)
-; RV32VB-NEXT:    or a2, t1, a2
+; RV32VB-NEXT:    or a1, a1, a2
+; RV32VB-NEXT:    or a3, a4, a3
+; RV32VB-NEXT:    or a2, a6, a7
+; RV32VB-NEXT:    or a4, t0, a5
+; RV32VB-NEXT:    lbu a5, 93(a0)
+; RV32VB-NEXT:    lbu a6, 105(a0)
+; RV32VB-NEXT:    lbu a7, 124(a0)
+; RV32VB-NEXT:    lbu t0, 144(a0)
+; RV32VB-NEXT:    slli a5, a5, 8
+; RV32VB-NEXT:    or a5, t1, a5
 ; RV32VB-NEXT:    lbu a0, 163(a0)
-; RV32VB-NEXT:    slli a4, a4, 16
-; RV32VB-NEXT:    slli t0, t0, 24
-; RV32VB-NEXT:    or a4, t0, a4
+; RV32VB-NEXT:    slli a6, a6, 16
+; RV32VB-NEXT:    slli t3, t3, 24
+; RV32VB-NEXT:    or a6, t3, a6
 ; RV32VB-NEXT:    slli a0, a0, 8
-; RV32VB-NEXT:    or a0, t2, a0
-; RV32VB-NEXT:    slli t3, t3, 16
-; RV32VB-NEXT:    slli a7, a7, 24
-; RV32VB-NEXT:    or a7, a7, t3
+; RV32VB-NEXT:    or a0, a7, a0
+; RV32VB-NEXT:    slli t0, t0, 16
+; RV32VB-NEXT:    slli t2, t2, 24
+; RV32VB-NEXT:    or a7, t2, t0
 ; RV32VB-NEXT:    or a1, a1, a3
-; RV32VB-NEXT:    or a3, a6, a5
 ; RV32VB-NEXT:    or a2, a2, a4
+; RV32VB-NEXT:    or a3, a5, a6
 ; RV32VB-NEXT:    or a0, a0, a7
 ; RV32VB-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32VB-NEXT:    vmv.v.x v8, a1
-; RV32VB-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32VB-NEXT:    vslide1down.vx v8, v8, a2
+; RV32VB-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32VB-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32VB-NEXT:    ret
 ;
@@ -1893,52 +1893,52 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
 ;
 ; RVA22U64-LABEL: buildvec_v16i8_loads_gather:
 ; RVA22U64:       # %bb.0:
-; RVA22U64-NEXT:    lbu a1, 0(a0)
-; RVA22U64-NEXT:    lbu a2, 1(a0)
-; RVA22U64-NEXT:    lbu a3, 22(a0)
-; RVA22U64-NEXT:    lbu a4, 31(a0)
-; RVA22U64-NEXT:    lbu a6, 623(a0)
-; RVA22U64-NEXT:    lbu t0, 44(a0)
-; RVA22U64-NEXT:    lbu a7, 55(a0)
-; RVA22U64-NEXT:    lbu a5, 75(a0)
-; RVA22U64-NEXT:    lbu t1, 82(a0)
-; RVA22U64-NEXT:    slli a2, a2, 8
-; RVA22U64-NEXT:    slli a3, a3, 16
-; RVA22U64-NEXT:    slli a4, a4, 24
-; RVA22U64-NEXT:    or t2, a1, a2
-; RVA22U64-NEXT:    or t3, a4, a3
-; RVA22U64-NEXT:    lbu a2, 93(a0)
-; RVA22U64-NEXT:    lbu t4, 105(a0)
-; RVA22U64-NEXT:    lbu t6, 124(a0)
-; RVA22U64-NEXT:    lbu t5, 144(a0)
-; RVA22U64-NEXT:    slli t0, t0, 32
-; RVA22U64-NEXT:    slli a7, a7, 40
-; RVA22U64-NEXT:    slli a6, a6, 48
-; RVA22U64-NEXT:    slli a5, a5, 56
-; RVA22U64-NEXT:    slli a2, a2, 8
-; RVA22U64-NEXT:    or a7, a7, t0
-; RVA22U64-NEXT:    or a5, a5, a6
-; RVA22U64-NEXT:    lbu a3, 154(a0)
-; RVA22U64-NEXT:    lbu a1, 161(a0)
-; RVA22U64-NEXT:    or a2, t1, a2
+; RVA22U64-NEXT:    lbu a6, 0(a0)
+; RVA22U64-NEXT:    lbu a7, 1(a0)
+; RVA22U64-NEXT:    lbu t0, 22(a0)
+; RVA22U64-NEXT:    lbu t1, 31(a0)
+; RVA22U64-NEXT:    lbu t2, 623(a0)
+; RVA22U64-NEXT:    lbu a1, 44(a0)
+; RVA22U64-NEXT:    lbu a2, 55(a0)
+; RVA22U64-NEXT:    lbu a3, 75(a0)
+; RVA22U64-NEXT:    lbu t3, 82(a0)
+; RVA22U64-NEXT:    lbu t4, 154(a0)
+; RVA22U64-NEXT:    lbu t5, 161(a0)
+; RVA22U64-NEXT:    slli a7, a7, 8
+; RVA22U64-NEXT:    slli t0, t0, 16
+; RVA22U64-NEXT:    slli t1, t1, 24
+; RVA22U64-NEXT:    slli a1, a1, 32
+; RVA22U64-NEXT:    slli a2, a2, 40
+; RVA22U64-NEXT:    slli t2, t2, 48
+; RVA22U64-NEXT:    slli a3, a3, 56
+; RVA22U64-NEXT:    or a6, a6, a7
+; RVA22U64-NEXT:    or t0, t1, t0
+; RVA22U64-NEXT:    or a7, a2, a1
+; RVA22U64-NEXT:    or a2, a3, t2
+; RVA22U64-NEXT:    lbu a3, 93(a0)
+; RVA22U64-NEXT:    lbu a5, 105(a0)
+; RVA22U64-NEXT:    lbu a1, 124(a0)
+; RVA22U64-NEXT:    lbu a4, 144(a0)
+; RVA22U64-NEXT:    slli a3, a3, 8
+; RVA22U64-NEXT:    or a3, t3, a3
 ; RVA22U64-NEXT:    lbu a0, 163(a0)
-; RVA22U64-NEXT:    slli t4, t4, 16
-; RVA22U64-NEXT:    slli a1, a1, 24
-; RVA22U64-NEXT:    or a1, a1, t4
-; RVA22U64-NEXT:    slli t6, t6, 32
+; RVA22U64-NEXT:    slli a5, a5, 16
+; RVA22U64-NEXT:    slli t5, t5, 24
+; RVA22U64-NEXT:    or a5, t5, a5
+; RVA22U64-NEXT:    slli a1, a1, 32
 ; RVA22U64-NEXT:    slli a0, a0, 40
-; RVA22U64-NEXT:    or a0, a0, t6
-; RVA22U64-NEXT:    slli t5, t5, 48
-; RVA22U64-NEXT:    slli a3, a3, 56
-; RVA22U64-NEXT:    or a3, a3, t5
-; RVA22U64-NEXT:    or a4, t2, t3
-; RVA22U64-NEXT:    or a5, a5, a7
-; RVA22U64-NEXT:    or a1, a1, a2
-; RVA22U64-NEXT:    or a0, a0, a3
-; RVA22U64-NEXT:    or a4, a4, a5
 ; RVA22U64-NEXT:    or a0, a0, a1
+; RVA22U64-NEXT:    slli a4, a4, 48
+; RVA22U64-NEXT:    slli t4, t4, 56
+; RVA22U64-NEXT:    or a1, t4, a4
+; RVA22U64-NEXT:    or a4, a6, t0
+; RVA22U64-NEXT:    or a2, a2, a7
+; RVA22U64-NEXT:    or a3, a3, a5
+; RVA22U64-NEXT:    or a0, a0, a1
+; RVA22U64-NEXT:    or a2, a2, a4
+; RVA22U64-NEXT:    or a0, a0, a3
 ; RVA22U64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RVA22U64-NEXT:    vmv.v.x v8, a4
+; RVA22U64-NEXT:    vmv.v.x v8, a2
 ; RVA22U64-NEXT:    vslide1down.vx v8, v8, a0
 ; RVA22U64-NEXT:    ret
 ;
@@ -2111,24 +2111,24 @@ define <16 x i8> @buildvec_v16i8_undef_low_half(ptr %p) {
 ;
 ; RV32VB-LABEL: buildvec_v16i8_undef_low_half:
 ; RV32VB:       # %bb.0:
-; RV32VB-NEXT:    lbu a1, 93(a0)
-; RV32VB-NEXT:    lbu a2, 82(a0)
-; RV32VB-NEXT:    lbu a3, 105(a0)
-; RV32VB-NEXT:    lbu a4, 124(a0)
-; RV32VB-NEXT:    slli a1, a1, 8
-; RV32VB-NEXT:    lbu a5, 144(a0)
-; RV32VB-NEXT:    lbu a6, 154(a0)
-; RV32VB-NEXT:    lbu a7, 161(a0)
-; RV32VB-NEXT:    or a1, a2, a1
+; RV32VB-NEXT:    lbu a1, 82(a0)
+; RV32VB-NEXT:    lbu a2, 93(a0)
+; RV32VB-NEXT:    lbu a3, 144(a0)
+; RV32VB-NEXT:    lbu a4, 154(a0)
+; RV32VB-NEXT:    lbu a5, 161(a0)
+; RV32VB-NEXT:    lbu a6, 105(a0)
+; RV32VB-NEXT:    lbu a7, 124(a0)
+; RV32VB-NEXT:    slli a2, a2, 8
+; RV32VB-NEXT:    or a1, a1, a2
 ; RV32VB-NEXT:    lbu a0, 163(a0)
-; RV32VB-NEXT:    slli a3, a3, 16
-; RV32VB-NEXT:    slli a7, a7, 24
-; RV32VB-NEXT:    or a2, a7, a3
+; RV32VB-NEXT:    slli a6, a6, 16
+; RV32VB-NEXT:    slli a5, a5, 24
+; RV32VB-NEXT:    or a2, a5, a6
 ; RV32VB-NEXT:    slli a0, a0, 8
-; RV32VB-NEXT:    or a0, a4, a0
-; RV32VB-NEXT:    slli a5, a5, 16
-; RV32VB-NEXT:    slli a6, a6, 24
-; RV32VB-NEXT:    or a3, a6, a5
+; RV32VB-NEXT:    or a0, a7, a0
+; RV32VB-NEXT:    slli a3, a3, 16
+; RV32VB-NEXT:    slli a4, a4, 24
+; RV32VB-NEXT:    or a3, a4, a3
 ; RV32VB-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32VB-NEXT:    vmv.v.i v8, 0
 ; RV32VB-NEXT:    or a1, a1, a2
@@ -2186,25 +2186,25 @@ define <16 x i8> @buildvec_v16i8_undef_low_half(ptr %p) {
 ;
 ; RVA22U64-LABEL: buildvec_v16i8_undef_low_half:
 ; RVA22U64:       # %bb.0:
-; RVA22U64-NEXT:    lbu a1, 93(a0)
 ; RVA22U64-NEXT:    lbu a6, 82(a0)
-; RVA22U64-NEXT:    lbu a7, 105(a0)
-; RVA22U64-NEXT:    lbu a4, 124(a0)
-; RVA22U64-NEXT:    slli a1, a1, 8
-; RVA22U64-NEXT:    lbu a5, 144(a0)
-; RVA22U64-NEXT:    lbu a2, 154(a0)
-; RVA22U64-NEXT:    lbu a3, 161(a0)
-; RVA22U64-NEXT:    or a1, a6, a1
+; RVA22U64-NEXT:    lbu a7, 93(a0)
+; RVA22U64-NEXT:    lbu t0, 144(a0)
+; RVA22U64-NEXT:    lbu a4, 154(a0)
+; RVA22U64-NEXT:    lbu a5, 161(a0)
+; RVA22U64-NEXT:    lbu a1, 105(a0)
+; RVA22U64-NEXT:    lbu a2, 124(a0)
+; RVA22U64-NEXT:    slli a7, a7, 8
+; RVA22U64-NEXT:    or a3, a6, a7
 ; RVA22U64-NEXT:    lbu a0, 163(a0)
-; RVA22U64-NEXT:    slli a7, a7, 16
-; RVA22U64-NEXT:    slli a3, a3, 24
-; RVA22U64-NEXT:    or a3, a3, a7
-; RVA22U64-NEXT:    slli a4, a4, 32
+; RVA22U64-NEXT:    slli a1, a1, 16
+; RVA22U64-NEXT:    slli a5, a5, 24
+; RVA22U64-NEXT:    or a1, a1, a5
+; RVA22U64-NEXT:    slli a2, a2, 32
 ; RVA22U64-NEXT:    slli a0, a0, 40
-; RVA22U64-NEXT:    or a0, a0, a4
-; RVA22U64-NEXT:    slli a5, a5, 48
-; RVA22U64-NEXT:    slli a2, a2, 56
-; RVA22U64-NEXT:    or a2, a2, a5
+; RVA22U64-NEXT:    or a0, a0, a2
+; RVA22U64-NEXT:    slli t0, t0, 48
+; RVA22U64-NEXT:    slli a4, a4, 56
+; RVA22U64-NEXT:    or a2, a4, t0
 ; RVA22U64-NEXT:    or a1, a1, a3
 ; RVA22U64-NEXT:    or a0, a0, a2
 ; RVA22U64-NEXT:    or a0, a0, a1
@@ -2313,25 +2313,25 @@ define <16 x i8> @buildvec_v16i8_undef_high_half(ptr %p) {
 ; RV32VB-LABEL: buildvec_v16i8_undef_high_half:
 ; RV32VB:       # %bb.0:
 ; RV32VB-NEXT:    lbu a1, 1(a0)
-; RV32VB-NEXT:    lbu a2, 22(a0)
-; RV32VB-NEXT:    lbu a3, 31(a0)
-; RV32VB-NEXT:    lbu a4, 0(a0)
+; RV32VB-NEXT:    lbu a2, 0(a0)
+; RV32VB-NEXT:    lbu a3, 22(a0)
+; RV32VB-NEXT:    lbu a4, 31(a0)
 ; RV32VB-NEXT:    slli a1, a1, 8
-; RV32VB-NEXT:    slli a2, a2, 16
-; RV32VB-NEXT:    slli a3, a3, 24
-; RV32VB-NEXT:    or a1, a4, a1
-; RV32VB-NEXT:    lbu a4, 44(a0)
+; RV32VB-NEXT:    or a1, a2, a1
+; RV32VB-NEXT:    lbu a2, 44(a0)
 ; RV32VB-NEXT:    lbu a5, 55(a0)
-; RV32VB-NEXT:    or a2, a3, a2
-; RV32VB-NEXT:    lbu a3, 623(a0)
+; RV32VB-NEXT:    slli a3, a3, 16
+; RV32VB-NEXT:    slli a4, a4, 24
+; RV32VB-NEXT:    or a3, a4, a3
+; RV32VB-NEXT:    lbu a4, 623(a0)
 ; RV32VB-NEXT:    lbu a0, 75(a0)
 ; RV32VB-NEXT:    slli a5, a5, 8
-; RV32VB-NEXT:    or a4, a4, a5
-; RV32VB-NEXT:    slli a3, a3, 16
+; RV32VB-NEXT:    or a2, a2, a5
+; RV32VB-NEXT:    slli a4, a4, 16
 ; RV32VB-NEXT:    slli a0, a0, 24
-; RV32VB-NEXT:    or a0, a0, a3
-; RV32VB-NEXT:    or a1, a1, a2
-; RV32VB-NEXT:    or a0, a4, a0
+; RV32VB-NEXT:    or a0, a0, a4
+; RV32VB-NEXT:    or a1, a1, a3
+; RV32VB-NEXT:    or a0, a2, a0
 ; RV32VB-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32VB-NEXT:    vmv.v.x v8, a1
 ; RV32VB-NEXT:    vslide1down.vx v8, v8, a0
@@ -2389,26 +2389,26 @@ define <16 x i8> @buildvec_v16i8_undef_high_half(ptr %p) {
 ; RVA22U64-LABEL: buildvec_v16i8_undef_high_half:
 ; RVA22U64:       # %bb.0:
 ; RVA22U64-NEXT:    lbu a1, 1(a0)
-; RVA22U64-NEXT:    lbu a2, 22(a0)
-; RVA22U64-NEXT:    lbu a3, 31(a0)
-; RVA22U64-NEXT:    lbu a4, 0(a0)
+; RVA22U64-NEXT:    lbu a2, 0(a0)
+; RVA22U64-NEXT:    lbu a3, 22(a0)
+; RVA22U64-NEXT:    lbu a4, 31(a0)
 ; RVA22U64-NEXT:    slli a1, a1, 8
-; RVA22U64-NEXT:    slli a2, a2, 16
-; RVA22U64-NEXT:    slli a3, a3, 24
-; RVA22U64-NEXT:    or a1, a1, a4
-; RVA22U64-NEXT:    or a2, a2, a3
-; RVA22U64-NEXT:    lbu a3, 44(a0)
-; RVA22U64-NEXT:    lbu a4, 55(a0)
-; RVA22U64-NEXT:    lbu a5, 623(a0)
-; RVA22U64-NEXT:    lbu a0, 75(a0)
-; RVA22U64-NEXT:    slli a3, a3, 32
-; RVA22U64-NEXT:    slli a4, a4, 40
+; RVA22U64-NEXT:    or a1, a1, a2
+; RVA22U64-NEXT:    lbu a2, 44(a0)
+; RVA22U64-NEXT:    lbu a5, 55(a0)
+; RVA22U64-NEXT:    slli a3, a3, 16
+; RVA22U64-NEXT:    slli a4, a4, 24
 ; RVA22U64-NEXT:    or a3, a3, a4
-; RVA22U64-NEXT:    slli a5, a5, 48
+; RVA22U64-NEXT:    lbu a4, 623(a0)
+; RVA22U64-NEXT:    lbu a0, 75(a0)
+; RVA22U64-NEXT:    slli a2, a2, 32
+; RVA22U64-NEXT:    slli a5, a5, 40
+; RVA22U64-NEXT:    or a2, a2, a5
+; RVA22U64-NEXT:    slli a4, a4, 48
 ; RVA22U64-NEXT:    slli a0, a0, 56
-; RVA22U64-NEXT:    or a0, a0, a5
-; RVA22U64-NEXT:    or a1, a1, a2
-; RVA22U64-NEXT:    or a0, a0, a3
+; RVA22U64-NEXT:    or a0, a0, a4
+; RVA22U64-NEXT:    or a1, a1, a3
+; RVA22U64-NEXT:    or a0, a0, a2
 ; RVA22U64-NEXT:    or a0, a0, a1
 ; RVA22U64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RVA22U64-NEXT:    vmv.v.x v8, a0
@@ -2520,30 +2520,30 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
 ;
 ; RV32VB-LABEL: buildvec_v16i8_undef_edges:
 ; RV32VB:       # %bb.0:
-; RV32VB-NEXT:    lbu a1, 623(a0)
-; RV32VB-NEXT:    lbu a2, 55(a0)
-; RV32VB-NEXT:    lbu a3, 75(a0)
-; RV32VB-NEXT:    lbu a4, 31(a0)
-; RV32VB-NEXT:    lbu a5, 44(a0)
-; RV32VB-NEXT:    slli a2, a2, 8
-; RV32VB-NEXT:    slli a1, a1, 16
-; RV32VB-NEXT:    slli a3, a3, 24
-; RV32VB-NEXT:    or a2, a5, a2
-; RV32VB-NEXT:    lbu a5, 82(a0)
-; RV32VB-NEXT:    lbu a6, 93(a0)
+; RV32VB-NEXT:    lbu a1, 55(a0)
+; RV32VB-NEXT:    lbu a2, 31(a0)
+; RV32VB-NEXT:    lbu a3, 44(a0)
+; RV32VB-NEXT:    lbu a4, 623(a0)
+; RV32VB-NEXT:    lbu a5, 75(a0)
+; RV32VB-NEXT:    slli a1, a1, 8
 ; RV32VB-NEXT:    or a1, a3, a1
-; RV32VB-NEXT:    lbu a3, 105(a0)
+; RV32VB-NEXT:    lbu a3, 82(a0)
+; RV32VB-NEXT:    lbu a6, 93(a0)
+; RV32VB-NEXT:    slli a4, a4, 16
+; RV32VB-NEXT:    slli a5, a5, 24
+; RV32VB-NEXT:    or a4, a5, a4
+; RV32VB-NEXT:    lbu a5, 105(a0)
 ; RV32VB-NEXT:    lbu a0, 161(a0)
 ; RV32VB-NEXT:    slli a6, a6, 8
-; RV32VB-NEXT:    or a5, a5, a6
-; RV32VB-NEXT:    slli a3, a3, 16
+; RV32VB-NEXT:    or a3, a3, a6
+; RV32VB-NEXT:    slli a5, a5, 16
 ; RV32VB-NEXT:    slli a0, a0, 24
-; RV32VB-NEXT:    or a0, a0, a3
-; RV32VB-NEXT:    slli a4, a4, 24
-; RV32VB-NEXT:    or a1, a2, a1
-; RV32VB-NEXT:    or a0, a5, a0
+; RV32VB-NEXT:    or a0, a0, a5
+; RV32VB-NEXT:    slli a2, a2, 24
+; RV32VB-NEXT:    or a1, a1, a4
+; RV32VB-NEXT:    or a0, a3, a0
 ; RV32VB-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32VB-NEXT:    vmv.v.x v8, a4
+; RV32VB-NEXT:    vmv.v.x v8, a2
 ; RV32VB-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32VB-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32VB-NEXT:    vslide1down.vx v8, v8, zero
@@ -2614,11 +2614,11 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
 ; RVA22U64-NEXT:    lbu a5, 75(a0)
 ; RVA22U64-NEXT:    slli a2, a2, 32
 ; RVA22U64-NEXT:    slli a3, a3, 40
-; RVA22U64-NEXT:    slli a4, a4, 48
-; RVA22U64-NEXT:    slli a5, a5, 56
 ; RVA22U64-NEXT:    or a2, a2, a3
 ; RVA22U64-NEXT:    lbu a3, 82(a0)
 ; RVA22U64-NEXT:    lbu a1, 93(a0)
+; RVA22U64-NEXT:    slli a4, a4, 48
+; RVA22U64-NEXT:    slli a5, a5, 56
 ; RVA22U64-NEXT:    or a4, a4, a5
 ; RVA22U64-NEXT:    lbu a5, 105(a0)
 ; RVA22U64-NEXT:    lbu a0, 161(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 76eca8e034303..71760a6723783 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -14191,37 +14191,37 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
 ; RV64ZVE32F-NEXT:    lbu t0, 13(a0)
 ; RV64ZVE32F-NEXT:    slli a2, a2, 8
 ; RV64ZVE32F-NEXT:    slli a4, a4, 8
+; RV64ZVE32F-NEXT:    slli a6, a6, 8
 ; RV64ZVE32F-NEXT:    or a1, a2, a1
 ; RV64ZVE32F-NEXT:    or a3, a4, a3
-; RV64ZVE32F-NEXT:    lbu a2, 16(a0)
-; RV64ZVE32F-NEXT:    lbu a4, 17(a0)
-; RV64ZVE32F-NEXT:    lbu t1, 20(a0)
-; RV64ZVE32F-NEXT:    lbu t2, 21(a0)
-; RV64ZVE32F-NEXT:    slli a6, a6, 8
-; RV64ZVE32F-NEXT:    or a5, a6, a5
+; RV64ZVE32F-NEXT:    or a2, a6, a5
+; RV64ZVE32F-NEXT:    lbu a4, 16(a0)
+; RV64ZVE32F-NEXT:    lbu a5, 17(a0)
+; RV64ZVE32F-NEXT:    lbu a6, 20(a0)
+; RV64ZVE32F-NEXT:    lbu t1, 21(a0)
 ; RV64ZVE32F-NEXT:    slli t0, t0, 8
-; RV64ZVE32F-NEXT:    slli a4, a4, 8
-; RV64ZVE32F-NEXT:    slli t2, t2, 8
-; RV64ZVE32F-NEXT:    or a6, t0, a7
-; RV64ZVE32F-NEXT:    or a2, a4, a2
-; RV64ZVE32F-NEXT:    lbu a4, 24(a0)
-; RV64ZVE32F-NEXT:    lbu a7, 25(a0)
-; RV64ZVE32F-NEXT:    or t0, t2, t1
+; RV64ZVE32F-NEXT:    slli a5, a5, 8
+; RV64ZVE32F-NEXT:    or a7, t0, a7
+; RV64ZVE32F-NEXT:    or a4, a5, a4
+; RV64ZVE32F-NEXT:    lbu a5, 24(a0)
+; RV64ZVE32F-NEXT:    lbu t0, 25(a0)
+; RV64ZVE32F-NEXT:    slli t1, t1, 8
+; RV64ZVE32F-NEXT:    or a6, t1, a6
 ; RV64ZVE32F-NEXT:    lbu t1, 28(a0)
 ; RV64ZVE32F-NEXT:    lbu a0, 29(a0)
-; RV64ZVE32F-NEXT:    slli a7, a7, 8
-; RV64ZVE32F-NEXT:    or a4, a7, a4
+; RV64ZVE32F-NEXT:    slli t0, t0, 8
+; RV64ZVE32F-NEXT:    or a5, t0, a5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
 ; RV64ZVE32F-NEXT:    slli a0, a0, 8
 ; RV64ZVE32F-NEXT:    or a0, a0, t1
 ; RV64ZVE32F-NEXT:    vmv.v.x v8, a1
-; RV64ZVE32F-NEXT:    vmv.v.x v9, a2
+; RV64ZVE32F-NEXT:    vmv.v.x v9, a4
 ; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, t0
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a6
+; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a6
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a7
 ; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a0
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr125306.ll b/llvm/test/CodeGen/RISCV/rvv/pr125306.ll
index 9400c381bc87c..eee57f489cb10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr125306.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr125306.ll
@@ -43,24 +43,24 @@ define <2 x i32> @main(ptr %0) {
 ; CHECK-NEXT:    sh zero, -1710(a5)
 ; CHECK-NEXT:    sh zero, -784(a5)
 ; CHECK-NEXT:    sh zero, 142(a5)
-; CHECK-NEXT:    lw a5, -304(a1)
+; CHECK-NEXT:    lw a5, 1244(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT:    vadd.vi v9, v11, -1
 ; CHECK-NEXT:    vse32.v v10, (a3)
+; CHECK-NEXT:    lw a3, -188(a1)
 ; CHECK-NEXT:    sh zero, 0(a0)
-; CHECK-NEXT:    lw a0, -188(a1)
+; CHECK-NEXT:    lw a0, -304(a1)
+; CHECK-NEXT:    vadd.vi v9, v11, -1
 ; CHECK-NEXT:    vse32.v v10, (a2)
 ; CHECK-NEXT:    lw a2, -188(a1)
+; CHECK-NEXT:    vmv.v.x v8, a3
 ; CHECK-NEXT:    lw a3, 1244(a1)
-; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    lw a0, 1244(a1)
 ; CHECK-NEXT:    lw a1, -304(a1)
-; CHECK-NEXT:    vmv.v.x v10, a3
-; CHECK-NEXT:    vmv.v.x v11, a5
+; CHECK-NEXT:    vmv.v.x v10, a5
+; CHECK-NEXT:    vmv.v.x v11, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, zero
 ; CHECK-NEXT:    vslide1down.vx v10, v10, zero
 ; CHECK-NEXT:    vmin.vv v8, v10, v8
-; CHECK-NEXT:    vmv.s.x v10, a0
+; CHECK-NEXT:    vmv.s.x v10, a3
 ; CHECK-NEXT:    vslide1down.vx v11, v11, zero
 ; CHECK-NEXT:    vmin.vx v10, v10, a2
 ; CHECK-NEXT:    vmin.vx v10, v10, a1
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index 1cdfaa5c4154b..068b9f7620021 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -133,49 +133,49 @@ define i64 @load_i64(ptr %p) {
 ; RV32I-LABEL: load_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a1, 1(a0)
-; RV32I-NEXT:    lbu a2, 2(a0)
-; RV32I-NEXT:    lbu a3, 3(a0)
-; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a2, 0(a0)
+; RV32I-NEXT:    lbu a3, 2(a0)
+; RV32I-NEXT:    lbu a4, 3(a0)
 ; RV32I-NEXT:    slli a1, a1, 8
-; RV32I-NEXT:    slli a2, a2, 16
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    or a1, a1, a4
-; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    lbu a2, 4(a0)
 ; RV32I-NEXT:    lbu a5, 5(a0)
-; RV32I-NEXT:    or a2, a3, a2
-; RV32I-NEXT:    lbu a3, 6(a0)
+; RV32I-NEXT:    slli a3, a3, 16
+; RV32I-NEXT:    slli a4, a4, 24
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    lbu a4, 6(a0)
 ; RV32I-NEXT:    lbu a0, 7(a0)
 ; RV32I-NEXT:    slli a5, a5, 8
-; RV32I-NEXT:    or a4, a5, a4
-; RV32I-NEXT:    slli a3, a3, 16
+; RV32I-NEXT:    or a2, a5, a2
+; RV32I-NEXT:    slli a4, a4, 16
 ; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a3, a0, a3
-; RV32I-NEXT:    or a0, a2, a1
-; RV32I-NEXT:    or a1, a3, a4
+; RV32I-NEXT:    or a4, a0, a4
+; RV32I-NEXT:    or a0, a3, a1
+; RV32I-NEXT:    or a1, a4, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: load_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lbu a1, 1(a0)
-; RV64I-NEXT:    lbu a2, 2(a0)
-; RV64I-NEXT:    lbu a3, 3(a0)
-; RV64I-NEXT:    lbu a4, 0(a0)
+; RV64I-NEXT:    lbu a2, 0(a0)
+; RV64I-NEXT:    lbu a3, 2(a0)
+; RV64I-NEXT:    lbu a4, 3(a0)
 ; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    slli a2, a2, 16
-; RV64I-NEXT:    slli a3, a3, 24
-; RV64I-NEXT:    or a1, a1, a4
-; RV64I-NEXT:    lbu a4, 4(a0)
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    lbu a2, 4(a0)
 ; RV64I-NEXT:    lbu a5, 5(a0)
-; RV64I-NEXT:    or a2, a3, a2
-; RV64I-NEXT:    lbu a3, 6(a0)
+; RV64I-NEXT:    slli a3, a3, 16
+; RV64I-NEXT:    slli a4, a4, 24
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    lbu a4, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli a5, a5, 8
-; RV64I-NEXT:    or a4, a5, a4
-; RV64I-NEXT:    slli a3, a3, 16
+; RV64I-NEXT:    or a2, a5, a2
+; RV64I-NEXT:    slli a4, a4, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    or a0, a0, a4
+; RV64I-NEXT:    or a1, a3, a1
+; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 895d84b38be32..de7b256401842 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -209,9 +209,9 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64E-FPELIM:       # %bb.0:
 ; LP64E-FPELIM-NEXT:    addi sp, sp, -56
 ; LP64E-FPELIM-NEXT:    .cfi_def_cfa_offset 56
+; LP64E-FPELIM-NEXT:    sd a1, 16(sp)
 ; LP64E-FPELIM-NEXT:    addi a0, sp, 20
 ; LP64E-FPELIM-NEXT:    sd a0, 0(sp)
-; LP64E-FPELIM-NEXT:    sd a1, 16(sp)
 ; LP64E-FPELIM-NEXT:    lw a0, 16(sp)
 ; LP64E-FPELIM-NEXT:    sd a5, 48(sp)
 ; LP64E-FPELIM-NEXT:    sd a2, 24(sp)
@@ -231,9 +231,9 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64E-WITHFP-NEXT:    .cfi_offset s0, -64
 ; LP64E-WITHFP-NEXT:    addi s0, sp, 24
 ; LP64E-WITHFP-NEXT:    .cfi_def_cfa s0, 48
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
 ; LP64E-WITHFP-NEXT:    addi a0, s0, 12
 ; LP64E-WITHFP-NEXT:    sd a0, -24(s0)
-; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
 ; LP64E-WITHFP-NEXT:    lw a0, 8(s0)
 ; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
 ; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
@@ -3070,12 +3070,12 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64E-FPELIM-NEXT:    sub sp, sp, a0
 ; LP64E-FPELIM-NEXT:    .cfi_def_cfa_offset 100000064
 ; LP64E-FPELIM-NEXT:    lui a0, 24414
-; LP64E-FPELIM-NEXT:    addiw a0, a0, 284
 ; LP64E-FPELIM-NEXT:    add a0, sp, a0
-; LP64E-FPELIM-NEXT:    sd a0, 8(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 280(a0)
 ; LP64E-FPELIM-NEXT:    lui a0, 24414
+; LP64E-FPELIM-NEXT:    addiw a0, a0, 284
 ; LP64E-FPELIM-NEXT:    add a0, sp, a0
-; LP64E-FPELIM-NEXT:    sd a1, 280(a0)
+; LP64E-FPELIM-NEXT:    sd a0, 8(sp)
 ; LP64E-FPELIM-NEXT:    lui a0, 24414
 ; LP64E-FPELIM-NEXT:    add a0, sp, a0
 ; LP64E-FPELIM-NEXT:    lw a0, 280(a0)
@@ -3110,11 +3110,11 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64E-WITHFP-NEXT:    lui a0, 24414
 ; LP64E-WITHFP-NEXT:    addiw a0, a0, -1704
 ; LP64E-WITHFP-NEXT:    sub sp, sp, a0
-; LP64E-WITHFP-NEXT:    addi a0, s0, 12
-; LP64E-WITHFP-NEXT:    lui a6, 24414
-; LP64E-WITHFP-NEXT:    sub a6, s0, a6
-; LP64E-WITHFP-NEXT:    sd a0, -288(a6)
 ; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a0, s0, 12
+; LP64E-WITHFP-NEXT:    lui a1, 24414
+; LP64E-WITHFP-NEXT:    sub a1, s0, a1
+; LP64E-WITHFP-NEXT:    sd a0, -288(a1)
 ; LP64E-WITHFP-NEXT:    lw a0, 8(s0)
 ; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
 ; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll
index 437b7e557718c..2a0228f95f1cd 100644
--- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -30,25 +30,25 @@ define void @lshr_4bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: lshr_4bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    lbu a3, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a6, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a5
 ; RV32I-NEXT:    lbu a5, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a3, a6, a3
+; RV32I-NEXT:    or a4, a6, a4
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a4
 ; RV32I-NEXT:    slli a1, a1, 3
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 16
@@ -94,25 +94,25 @@ define void @shl_4bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: shl_4bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    lbu a3, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a6, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a5
 ; RV32I-NEXT:    lbu a5, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a3, a6, a3
+; RV32I-NEXT:    or a4, a6, a4
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a4
 ; RV32I-NEXT:    slli a1, a1, 3
 ; RV32I-NEXT:    sll a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 16
@@ -158,25 +158,25 @@ define void @ashr_4bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: ashr_4bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    lbu a3, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a6, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a5
 ; RV32I-NEXT:    lbu a5, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a3, a6, a3
+; RV32I-NEXT:    or a4, a6, a4
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a4
 ; RV32I-NEXT:    slli a1, a1, 3
 ; RV32I-NEXT:    sra a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 16
@@ -220,12 +220,12 @@ define void @lshr_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or a0, a0, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t1, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or t0, t2, t0
 ; RV64I-NEXT:    lbu t2, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -264,26 +264,26 @@ define void @lshr_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: lshr_8bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 5(a0)
-; RV32I-NEXT:    lbu a4, 6(a0)
-; RV32I-NEXT:    lbu a5, 7(a0)
-; RV32I-NEXT:    lbu a6, 4(a0)
+; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    lbu a5, 6(a0)
+; RV32I-NEXT:    lbu a6, 7(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a3, a3, a6
-; RV32I-NEXT:    lbu a6, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a7, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
-; RV32I-NEXT:    lbu a5, 2(a1)
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a6, a6, 24
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    lbu a6, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a7, a7, 8
-; RV32I-NEXT:    or a6, a7, a6
-; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    or a4, a7, a4
+; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a5, a4, a3
-; RV32I-NEXT:    or a4, a1, a6
-; RV32I-NEXT:    slli a4, a4, 3
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    or a5, a5, a3
+; RV32I-NEXT:    or a1, a1, a4
+; RV32I-NEXT:    slli a4, a1, 3
 ; RV32I-NEXT:    addi a3, a4, -32
 ; RV32I-NEXT:    srl a1, a5, a4
 ; RV32I-NEXT:    bltz a3, .LBB3_2
@@ -356,12 +356,12 @@ define void @shl_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or a0, a0, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t1, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or t0, t2, t0
 ; RV64I-NEXT:    lbu t2, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -400,26 +400,26 @@ define void @shl_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: shl_8bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a6, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a6, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a3, a3, a6
-; RV32I-NEXT:    lbu a6, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a7, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
-; RV32I-NEXT:    lbu a5, 2(a1)
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a6, a6, 24
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    lbu a6, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a7, a7, 8
-; RV32I-NEXT:    or a6, a7, a6
-; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    or a4, a7, a4
+; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a5, a4, a3
-; RV32I-NEXT:    or a4, a1, a6
-; RV32I-NEXT:    slli a4, a4, 3
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    or a5, a5, a3
+; RV32I-NEXT:    or a1, a1, a4
+; RV32I-NEXT:    slli a4, a1, 3
 ; RV32I-NEXT:    addi a3, a4, -32
 ; RV32I-NEXT:    sll a1, a5, a4
 ; RV32I-NEXT:    bltz a3, .LBB4_2
@@ -492,12 +492,12 @@ define void @ashr_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or a0, a0, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t1, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or t0, t2, t0
 ; RV64I-NEXT:    lbu t2, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -535,14 +535,14 @@ define void @ashr_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ;
 ; RV32I-LABEL: ashr_8bytes:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lbu a3, 5(a0)
-; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    lbu a3, 4(a0)
+; RV32I-NEXT:    lbu a4, 5(a0)
 ; RV32I-NEXT:    lbu a5, 6(a0)
 ; RV32I-NEXT:    lbu a6, 7(a0)
-; RV32I-NEXT:    slli a3, a3, 8
 ; RV32I-NEXT:    lbu a7, 0(a1)
 ; RV32I-NEXT:    lbu t0, 1(a1)
-; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    slli a4, a4, 8
+; RV32I-NEXT:    or a3, a4, a3
 ; RV32I-NEXT:    lbu a4, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli t0, t0, 8
@@ -629,12 +629,12 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t2, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -660,31 +660,31 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    j .LBB6_3
 ; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    lbu a6, 1(a0)
-; RV64I-NEXT:    lbu a7, 2(a0)
-; RV64I-NEXT:    lbu t0, 3(a0)
-; RV64I-NEXT:    lbu t1, 0(a0)
+; RV64I-NEXT:    lbu a7, 0(a0)
+; RV64I-NEXT:    lbu t0, 2(a0)
+; RV64I-NEXT:    lbu t1, 3(a0)
 ; RV64I-NEXT:    slli a6, a6, 8
-; RV64I-NEXT:    slli a7, a7, 16
-; RV64I-NEXT:    slli t0, t0, 24
-; RV64I-NEXT:    or a6, a6, t1
-; RV64I-NEXT:    lbu t1, 4(a0)
+; RV64I-NEXT:    or a6, a6, a7
+; RV64I-NEXT:    lbu a7, 4(a0)
 ; RV64I-NEXT:    lbu t2, 5(a0)
-; RV64I-NEXT:    or a7, t0, a7
-; RV64I-NEXT:    lbu t0, 6(a0)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t1, t1, 24
+; RV64I-NEXT:    or t0, t1, t0
+; RV64I-NEXT:    lbu t1, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli t2, t2, 8
-; RV64I-NEXT:    or t1, t2, t1
-; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    or a7, t2, a7
+; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    not a7, a4
-; RV64I-NEXT:    slli a5, a5, 1
 ; RV64I-NEXT:    or a0, a0, t1
+; RV64I-NEXT:    or a6, t0, a6
+; RV64I-NEXT:    not t0, a4
+; RV64I-NEXT:    slli a5, a5, 1
+; RV64I-NEXT:    or a0, a0, a7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    srl a0, a0, a4
-; RV64I-NEXT:    sll a4, a5, a7
+; RV64I-NEXT:    sll a4, a5, t0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:  .LBB6_3:
 ; RV64I-NEXT:    srai a3, a3, 63
@@ -868,12 +868,12 @@ define void @lshr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t2, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -899,31 +899,31 @@ define void @lshr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun
 ; RV64I-NEXT:    j .LBB7_3
 ; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    lbu a6, 1(a0)
-; RV64I-NEXT:    lbu a7, 2(a0)
-; RV64I-NEXT:    lbu t0, 3(a0)
-; RV64I-NEXT:    lbu t1, 0(a0)
+; RV64I-NEXT:    lbu a7, 0(a0)
+; RV64I-NEXT:    lbu t0, 2(a0)
+; RV64I-NEXT:    lbu t1, 3(a0)
 ; RV64I-NEXT:    slli a6, a6, 8
-; RV64I-NEXT:    slli a7, a7, 16
-; RV64I-NEXT:    slli t0, t0, 24
-; RV64I-NEXT:    or a6, a6, t1
-; RV64I-NEXT:    lbu t1, 4(a0)
+; RV64I-NEXT:    or a6, a6, a7
+; RV64I-NEXT:    lbu a7, 4(a0)
 ; RV64I-NEXT:    lbu t2, 5(a0)
-; RV64I-NEXT:    or a7, t0, a7
-; RV64I-NEXT:    lbu t0, 6(a0)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t1, t1, 24
+; RV64I-NEXT:    or t0, t1, t0
+; RV64I-NEXT:    lbu t1, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli t2, t2, 8
-; RV64I-NEXT:    or t1, t2, t1
-; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    or a7, t2, a7
+; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    not a7, a4
-; RV64I-NEXT:    slli a5, a5, 1
 ; RV64I-NEXT:    or a0, a0, t1
+; RV64I-NEXT:    or a6, t0, a6
+; RV64I-NEXT:    not t0, a4
+; RV64I-NEXT:    slli a5, a5, 1
+; RV64I-NEXT:    or a0, a0, a7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    srl a0, a0, a4
-; RV64I-NEXT:    sll a4, a5, a7
+; RV64I-NEXT:    sll a4, a5, t0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:  .LBB7_3:
 ; RV64I-NEXT:    srai a3, a3, 63
@@ -1083,12 +1083,12 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t2, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -1114,31 +1114,31 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    j .LBB8_3
 ; RV64I-NEXT:  .LBB8_2:
 ; RV64I-NEXT:    lbu a6, 9(a0)
-; RV64I-NEXT:    lbu a7, 10(a0)
-; RV64I-NEXT:    lbu t0, 11(a0)
-; RV64I-NEXT:    lbu t1, 8(a0)
+; RV64I-NEXT:    lbu a7, 8(a0)
+; RV64I-NEXT:    lbu t0, 10(a0)
+; RV64I-NEXT:    lbu t1, 11(a0)
 ; RV64I-NEXT:    slli a6, a6, 8
-; RV64I-NEXT:    slli a7, a7, 16
-; RV64I-NEXT:    slli t0, t0, 24
-; RV64I-NEXT:    or a6, a6, t1
-; RV64I-NEXT:    lbu t1, 12(a0)
+; RV64I-NEXT:    or a6, a6, a7
+; RV64I-NEXT:    lbu a7, 12(a0)
 ; RV64I-NEXT:    lbu t2, 13(a0)
-; RV64I-NEXT:    or a7, t0, a7
-; RV64I-NEXT:    lbu t0, 14(a0)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t1, t1, 24
+; RV64I-NEXT:    or t0, t1, t0
+; RV64I-NEXT:    lbu t1, 14(a0)
 ; RV64I-NEXT:    lbu a0, 15(a0)
 ; RV64I-NEXT:    slli t2, t2, 8
-; RV64I-NEXT:    or t1, t2, t1
-; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    or a7, t2, a7
+; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    not a7, a4
-; RV64I-NEXT:    srli a5, a5, 1
 ; RV64I-NEXT:    or a0, a0, t1
+; RV64I-NEXT:    or a6, t0, a6
+; RV64I-NEXT:    not t0, a4
+; RV64I-NEXT:    srli a5, a5, 1
+; RV64I-NEXT:    or a0, a0, a7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    sll a0, a0, a4
-; RV64I-NEXT:    srl a4, a5, a7
+; RV64I-NEXT:    srl a4, a5, t0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:  .LBB8_3:
 ; RV64I-NEXT:    srai a3, a3, 63
@@ -1322,12 +1322,12 @@ define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t2, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -1353,31 +1353,31 @@ define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw
 ; RV64I-NEXT:    j .LBB9_3
 ; RV64I-NEXT:  .LBB9_2:
 ; RV64I-NEXT:    lbu a6, 9(a0)
-; RV64I-NEXT:    lbu a7, 10(a0)
-; RV64I-NEXT:    lbu t0, 11(a0)
-; RV64I-NEXT:    lbu t1, 8(a0)
+; RV64I-NEXT:    lbu a7, 8(a0)
+; RV64I-NEXT:    lbu t0, 10(a0)
+; RV64I-NEXT:    lbu t1, 11(a0)
 ; RV64I-NEXT:    slli a6, a6, 8
-; RV64I-NEXT:    slli a7, a7, 16
-; RV64I-NEXT:    slli t0, t0, 24
-; RV64I-NEXT:    or a6, a6, t1
-; RV64I-NEXT:    lbu t1, 12(a0)
+; RV64I-NEXT:    or a6, a6, a7
+; RV64I-NEXT:    lbu a7, 12(a0)
 ; RV64I-NEXT:    lbu t2, 13(a0)
-; RV64I-NEXT:    or a7, t0, a7
-; RV64I-NEXT:    lbu t0, 14(a0)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t1, t1, 24
+; RV64I-NEXT:    or t0, t1, t0
+; RV64I-NEXT:    lbu t1, 14(a0)
 ; RV64I-NEXT:    lbu a0, 15(a0)
 ; RV64I-NEXT:    slli t2, t2, 8
-; RV64I-NEXT:    or t1, t2, t1
-; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    or a7, t2, a7
+; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    not a7, a4
-; RV64I-NEXT:    srli a5, a5, 1
 ; RV64I-NEXT:    or a0, a0, t1
+; RV64I-NEXT:    or a6, t0, a6
+; RV64I-NEXT:    not t0, a4
+; RV64I-NEXT:    srli a5, a5, 1
+; RV64I-NEXT:    or a0, a0, a7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    sll a0, a0, a4
-; RV64I-NEXT:    srl a4, a5, a7
+; RV64I-NEXT:    srl a4, a5, t0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:  .LBB9_3:
 ; RV64I-NEXT:    srai a3, a3, 63
@@ -1538,12 +1538,12 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t2, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -1571,31 +1571,31 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    j .LBB10_3
 ; RV64I-NEXT:  .LBB10_2:
 ; RV64I-NEXT:    lbu a5, 1(a0)
-; RV64I-NEXT:    lbu a6, 2(a0)
-; RV64I-NEXT:    lbu a7, 3(a0)
-; RV64I-NEXT:    lbu t0, 0(a0)
+; RV64I-NEXT:    lbu a6, 0(a0)
+; RV64I-NEXT:    lbu a7, 2(a0)
+; RV64I-NEXT:    lbu t0, 3(a0)
 ; RV64I-NEXT:    slli a5, a5, 8
-; RV64I-NEXT:    slli a6, a6, 16
-; RV64I-NEXT:    slli a7, a7, 24
-; RV64I-NEXT:    or a5, a5, t0
-; RV64I-NEXT:    lbu t0, 4(a0)
+; RV64I-NEXT:    or a5, a5, a6
+; RV64I-NEXT:    lbu a6, 4(a0)
 ; RV64I-NEXT:    lbu t1, 5(a0)
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    lbu a7, 6(a0)
+; RV64I-NEXT:    slli a7, a7, 16
+; RV64I-NEXT:    slli t0, t0, 24
+; RV64I-NEXT:    or a7, t0, a7
+; RV64I-NEXT:    lbu t0, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli t1, t1, 8
-; RV64I-NEXT:    or t0, t1, t0
-; RV64I-NEXT:    slli a7, a7, 16
+; RV64I-NEXT:    or a6, t1, a6
+; RV64I-NEXT:    slli t0, t0, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a7
-; RV64I-NEXT:    or a5, a6, a5
-; RV64I-NEXT:    not a6, a3
-; RV64I-NEXT:    slli a4, a4, 1
 ; RV64I-NEXT:    or a0, a0, t0
+; RV64I-NEXT:    or a5, a7, a5
+; RV64I-NEXT:    not a7, a3
+; RV64I-NEXT:    slli a4, a4, 1
+; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a5
 ; RV64I-NEXT:    srl a0, a0, a3
-; RV64I-NEXT:    sll a3, a4, a6
+; RV64I-NEXT:    sll a3, a4, a7
 ; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:  .LBB10_3:
 ; RV64I-NEXT:    srli a3, a1, 56
@@ -1662,11 +1662,11 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    slli t4, t4, 16
 ; RV32I-NEXT:    slli t5, t5, 24
-; RV32I-NEXT:    slli t1, t1, 8
 ; RV32I-NEXT:    or a4, t3, a4
 ; RV32I-NEXT:    or t3, t5, t4
 ; RV32I-NEXT:    lbu t4, 0(a1)
 ; RV32I-NEXT:    lbu t5, 1(a1)
+; RV32I-NEXT:    slli t1, t1, 8
 ; RV32I-NEXT:    or t0, t1, t0
 ; RV32I-NEXT:    lbu t1, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
@@ -1778,12 +1778,12 @@ define void @ashr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 0(a1)
 ; RV64I-NEXT:    lbu t2, 1(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 2(a1)
 ; RV64I-NEXT:    lbu a1, 3(a1)
@@ -1811,31 +1811,31 @@ define void @ashr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun
 ; RV64I-NEXT:    j .LBB11_3
 ; RV64I-NEXT:  .LBB11_2:
 ; RV64I-NEXT:    lbu a5, 1(a0)
-; RV64I-NEXT:    lbu a6, 2(a0)
-; RV64I-NEXT:    lbu a7, 3(a0)
-; RV64I-NEXT:    lbu t0, 0(a0)
+; RV64I-NEXT:    lbu a6, 0(a0)
+; RV64I-NEXT:    lbu a7, 2(a0)
+; RV64I-NEXT:    lbu t0, 3(a0)
 ; RV64I-NEXT:    slli a5, a5, 8
-; RV64I-NEXT:    slli a6, a6, 16
-; RV64I-NEXT:    slli a7, a7, 24
-; RV64I-NEXT:    or a5, a5, t0
-; RV64I-NEXT:    lbu t0, 4(a0)
+; RV64I-NEXT:    or a5, a5, a6
+; RV64I-NEXT:    lbu a6, 4(a0)
 ; RV64I-NEXT:    lbu t1, 5(a0)
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    lbu a7, 6(a0)
+; RV64I-NEXT:    slli a7, a7, 16
+; RV64I-NEXT:    slli t0, t0, 24
+; RV64I-NEXT:    or a7, t0, a7
+; RV64I-NEXT:    lbu t0, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli t1, t1, 8
-; RV64I-NEXT:    or t0, t1, t0
-; RV64I-NEXT:    slli a7, a7, 16
+; RV64I-NEXT:    or a6, t1, a6
+; RV64I-NEXT:    slli t0, t0, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a7
-; RV64I-NEXT:    or a5, a6, a5
-; RV64I-NEXT:    not a6, a3
-; RV64I-NEXT:    slli a4, a4, 1
 ; RV64I-NEXT:    or a0, a0, t0
+; RV64I-NEXT:    or a5, a7, a5
+; RV64I-NEXT:    not a7, a3
+; RV64I-NEXT:    slli a4, a4, 1
+; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a5
 ; RV64I-NEXT:    srl a0, a0, a3
-; RV64I-NEXT:    sll a3, a4, a6
+; RV64I-NEXT:    sll a3, a4, a7
 ; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:  .LBB11_3:
 ; RV64I-NEXT:    srli a3, a1, 56
@@ -2061,12 +2061,12 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -2539,12 +2539,12 @@ define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -3376,12 +3376,12 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -3854,12 +3854,12 @@ define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounw
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -4691,12 +4691,12 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -5171,12 +5171,12 @@ define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) noun
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
index b2c130c2d7c10..78f63c72d0469 100644
--- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
@@ -29,25 +29,25 @@ define void @lshr_4bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: lshr_4bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    lbu a3, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a6, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a5
 ; RV32I-NEXT:    lbu a5, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a3, a6, a3
+; RV32I-NEXT:    or a4, a6, a4
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a4
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    srli a3, a0, 24
@@ -90,25 +90,25 @@ define void @shl_4bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: shl_4bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    lbu a3, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a6, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a5
 ; RV32I-NEXT:    lbu a5, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a3, a6, a3
+; RV32I-NEXT:    or a4, a6, a4
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a4
 ; RV32I-NEXT:    sll a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    srli a3, a0, 24
@@ -151,25 +151,25 @@ define void @ashr_4bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: ashr_4bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a0, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    lbu a3, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a6, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a5
 ; RV32I-NEXT:    lbu a5, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a3, a6, a3
+; RV32I-NEXT:    or a4, a6, a4
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a4
 ; RV32I-NEXT:    sra a0, a0, a1
 ; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    srli a3, a0, 24
@@ -211,12 +211,12 @@ define void @lshr_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or a0, a0, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 4(a1)
 ; RV64I-NEXT:    lbu t1, 5(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or t0, t2, t0
 ; RV64I-NEXT:    lbu t2, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -254,25 +254,25 @@ define void @lshr_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: lshr_8bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 5(a0)
-; RV32I-NEXT:    lbu a4, 6(a0)
-; RV32I-NEXT:    lbu a5, 7(a0)
-; RV32I-NEXT:    lbu a6, 4(a0)
+; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    lbu a5, 6(a0)
+; RV32I-NEXT:    lbu a6, 7(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a3, a3, a6
-; RV32I-NEXT:    lbu a6, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a7, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
-; RV32I-NEXT:    lbu a5, 2(a1)
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a6, a6, 24
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    lbu a6, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a7, a7, 8
-; RV32I-NEXT:    or a6, a7, a6
-; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    or a4, a7, a4
+; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a5, a4, a3
-; RV32I-NEXT:    or a4, a1, a6
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    or a5, a5, a3
+; RV32I-NEXT:    or a4, a1, a4
 ; RV32I-NEXT:    addi a3, a4, -32
 ; RV32I-NEXT:    srl a1, a5, a4
 ; RV32I-NEXT:    bltz a3, .LBB3_2
@@ -344,12 +344,12 @@ define void @shl_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or a0, a0, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 4(a1)
 ; RV64I-NEXT:    lbu t1, 5(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or t0, t2, t0
 ; RV64I-NEXT:    lbu t2, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -387,25 +387,25 @@ define void @shl_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-LABEL: shl_8bytes:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a3, 1(a0)
-; RV32I-NEXT:    lbu a4, 2(a0)
-; RV32I-NEXT:    lbu a5, 3(a0)
-; RV32I-NEXT:    lbu a6, 0(a0)
+; RV32I-NEXT:    lbu a4, 0(a0)
+; RV32I-NEXT:    lbu a5, 2(a0)
+; RV32I-NEXT:    lbu a6, 3(a0)
 ; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    slli a4, a4, 16
-; RV32I-NEXT:    slli a5, a5, 24
-; RV32I-NEXT:    or a3, a3, a6
-; RV32I-NEXT:    lbu a6, 0(a1)
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lbu a4, 0(a1)
 ; RV32I-NEXT:    lbu a7, 1(a1)
-; RV32I-NEXT:    or a4, a5, a4
-; RV32I-NEXT:    lbu a5, 2(a1)
+; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    slli a6, a6, 24
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    lbu a6, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli a7, a7, 8
-; RV32I-NEXT:    or a6, a7, a6
-; RV32I-NEXT:    slli a5, a5, 16
+; RV32I-NEXT:    or a4, a7, a4
+; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a5
-; RV32I-NEXT:    or a5, a4, a3
-; RV32I-NEXT:    or a4, a1, a6
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    or a5, a5, a3
+; RV32I-NEXT:    or a4, a1, a4
 ; RV32I-NEXT:    addi a3, a4, -32
 ; RV32I-NEXT:    sll a1, a5, a4
 ; RV32I-NEXT:    bltz a3, .LBB4_2
@@ -477,12 +477,12 @@ define void @ashr_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or a0, a0, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 4(a1)
 ; RV64I-NEXT:    lbu t1, 5(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    or t0, t2, t0
 ; RV64I-NEXT:    lbu t2, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -519,14 +519,14 @@ define void @ashr_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ;
 ; RV32I-LABEL: ashr_8bytes:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lbu a3, 5(a0)
-; RV32I-NEXT:    lbu a4, 4(a0)
+; RV32I-NEXT:    lbu a3, 4(a0)
+; RV32I-NEXT:    lbu a4, 5(a0)
 ; RV32I-NEXT:    lbu a5, 6(a0)
 ; RV32I-NEXT:    lbu a6, 7(a0)
-; RV32I-NEXT:    slli a3, a3, 8
 ; RV32I-NEXT:    lbu a7, 0(a1)
 ; RV32I-NEXT:    lbu t0, 1(a1)
-; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    slli a4, a4, 8
+; RV32I-NEXT:    or a3, a4, a3
 ; RV32I-NEXT:    lbu a4, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
 ; RV32I-NEXT:    slli t0, t0, 8
@@ -611,12 +611,12 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 4(a1)
 ; RV64I-NEXT:    lbu t2, 5(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -641,31 +641,31 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    j .LBB6_3
 ; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    lbu a6, 1(a0)
-; RV64I-NEXT:    lbu a7, 2(a0)
-; RV64I-NEXT:    lbu t0, 3(a0)
-; RV64I-NEXT:    lbu t1, 0(a0)
+; RV64I-NEXT:    lbu a7, 0(a0)
+; RV64I-NEXT:    lbu t0, 2(a0)
+; RV64I-NEXT:    lbu t1, 3(a0)
 ; RV64I-NEXT:    slli a6, a6, 8
-; RV64I-NEXT:    slli a7, a7, 16
-; RV64I-NEXT:    slli t0, t0, 24
-; RV64I-NEXT:    or a6, a6, t1
-; RV64I-NEXT:    lbu t1, 4(a0)
+; RV64I-NEXT:    or a6, a6, a7
+; RV64I-NEXT:    lbu a7, 4(a0)
 ; RV64I-NEXT:    lbu t2, 5(a0)
-; RV64I-NEXT:    or a7, t0, a7
-; RV64I-NEXT:    lbu t0, 6(a0)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t1, t1, 24
+; RV64I-NEXT:    or t0, t1, t0
+; RV64I-NEXT:    lbu t1, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli t2, t2, 8
-; RV64I-NEXT:    or t1, t2, t1
-; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    or a7, t2, a7
+; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    not a7, a4
-; RV64I-NEXT:    slli a5, a5, 1
 ; RV64I-NEXT:    or a0, a0, t1
+; RV64I-NEXT:    or a6, t0, a6
+; RV64I-NEXT:    not t0, a4
+; RV64I-NEXT:    slli a5, a5, 1
+; RV64I-NEXT:    or a0, a0, a7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    srl a0, a0, a4
-; RV64I-NEXT:    sll a4, a5, a7
+; RV64I-NEXT:    sll a4, a5, t0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:  .LBB6_3:
 ; RV64I-NEXT:    srai a3, a3, 63
@@ -736,12 +736,12 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli t3, t3, 16
 ; RV32I-NEXT:    slli t4, t4, 24
 ; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t2, t2, 16
-; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    or t3, t4, t3
 ; RV32I-NEXT:    or a6, t1, a6
 ; RV32I-NEXT:    lbu t1, 0(a1)
 ; RV32I-NEXT:    lbu t4, 1(a1)
+; RV32I-NEXT:    slli t2, t2, 16
+; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    or a0, a0, t2
 ; RV32I-NEXT:    lbu t2, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
@@ -847,12 +847,12 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 4(a1)
 ; RV64I-NEXT:    lbu t2, 5(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -877,31 +877,31 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    j .LBB7_3
 ; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    lbu a6, 9(a0)
-; RV64I-NEXT:    lbu a7, 10(a0)
-; RV64I-NEXT:    lbu t0, 11(a0)
-; RV64I-NEXT:    lbu t1, 8(a0)
+; RV64I-NEXT:    lbu a7, 8(a0)
+; RV64I-NEXT:    lbu t0, 10(a0)
+; RV64I-NEXT:    lbu t1, 11(a0)
 ; RV64I-NEXT:    slli a6, a6, 8
-; RV64I-NEXT:    slli a7, a7, 16
-; RV64I-NEXT:    slli t0, t0, 24
-; RV64I-NEXT:    or a6, a6, t1
-; RV64I-NEXT:    lbu t1, 12(a0)
+; RV64I-NEXT:    or a6, a6, a7
+; RV64I-NEXT:    lbu a7, 12(a0)
 ; RV64I-NEXT:    lbu t2, 13(a0)
-; RV64I-NEXT:    or a7, t0, a7
-; RV64I-NEXT:    lbu t0, 14(a0)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t1, t1, 24
+; RV64I-NEXT:    or t0, t1, t0
+; RV64I-NEXT:    lbu t1, 14(a0)
 ; RV64I-NEXT:    lbu a0, 15(a0)
 ; RV64I-NEXT:    slli t2, t2, 8
-; RV64I-NEXT:    or t1, t2, t1
-; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    or a7, t2, a7
+; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    not a7, a4
-; RV64I-NEXT:    srli a5, a5, 1
 ; RV64I-NEXT:    or a0, a0, t1
+; RV64I-NEXT:    or a6, t0, a6
+; RV64I-NEXT:    not t0, a4
+; RV64I-NEXT:    srli a5, a5, 1
+; RV64I-NEXT:    or a0, a0, a7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    sll a0, a0, a4
-; RV64I-NEXT:    srl a4, a5, a7
+; RV64I-NEXT:    srl a4, a5, t0
 ; RV64I-NEXT:    or a0, a0, a4
 ; RV64I-NEXT:  .LBB7_3:
 ; RV64I-NEXT:    srai a3, a3, 63
@@ -972,12 +972,12 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli t3, t3, 16
 ; RV32I-NEXT:    slli t4, t4, 24
 ; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t2, t2, 16
-; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    or t3, t4, t3
 ; RV32I-NEXT:    or a6, t1, a6
 ; RV32I-NEXT:    lbu t1, 0(a1)
 ; RV32I-NEXT:    lbu t4, 1(a1)
+; RV32I-NEXT:    slli t2, t2, 16
+; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    or a0, a0, t2
 ; RV32I-NEXT:    lbu t2, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
@@ -1083,12 +1083,12 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
 ; RV64I-NEXT:    slli a7, a7, 8
-; RV64I-NEXT:    slli t0, t0, 16
-; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t1, t2, t1
 ; RV64I-NEXT:    or a6, a7, a6
 ; RV64I-NEXT:    lbu a7, 4(a1)
 ; RV64I-NEXT:    lbu t2, 5(a1)
+; RV64I-NEXT:    slli t0, t0, 16
+; RV64I-NEXT:    slli t3, t3, 24
 ; RV64I-NEXT:    or t0, t3, t0
 ; RV64I-NEXT:    lbu t3, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -1115,31 +1115,31 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    j .LBB8_3
 ; RV64I-NEXT:  .LBB8_2:
 ; RV64I-NEXT:    lbu a5, 1(a0)
-; RV64I-NEXT:    lbu a6, 2(a0)
-; RV64I-NEXT:    lbu a7, 3(a0)
-; RV64I-NEXT:    lbu t0, 0(a0)
+; RV64I-NEXT:    lbu a6, 0(a0)
+; RV64I-NEXT:    lbu a7, 2(a0)
+; RV64I-NEXT:    lbu t0, 3(a0)
 ; RV64I-NEXT:    slli a5, a5, 8
-; RV64I-NEXT:    slli a6, a6, 16
-; RV64I-NEXT:    slli a7, a7, 24
-; RV64I-NEXT:    or a5, a5, t0
-; RV64I-NEXT:    lbu t0, 4(a0)
+; RV64I-NEXT:    or a5, a5, a6
+; RV64I-NEXT:    lbu a6, 4(a0)
 ; RV64I-NEXT:    lbu t1, 5(a0)
-; RV64I-NEXT:    or a6, a7, a6
-; RV64I-NEXT:    lbu a7, 6(a0)
+; RV64I-NEXT:    slli a7, a7, 16
+; RV64I-NEXT:    slli t0, t0, 24
+; RV64I-NEXT:    or a7, t0, a7
+; RV64I-NEXT:    lbu t0, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
 ; RV64I-NEXT:    slli t1, t1, 8
-; RV64I-NEXT:    or t0, t1, t0
-; RV64I-NEXT:    slli a7, a7, 16
+; RV64I-NEXT:    or a6, t1, a6
+; RV64I-NEXT:    slli t0, t0, 16
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a7
-; RV64I-NEXT:    or a5, a6, a5
-; RV64I-NEXT:    not a6, a3
-; RV64I-NEXT:    slli a4, a4, 1
 ; RV64I-NEXT:    or a0, a0, t0
+; RV64I-NEXT:    or a5, a7, a5
+; RV64I-NEXT:    not a7, a3
+; RV64I-NEXT:    slli a4, a4, 1
+; RV64I-NEXT:    or a0, a0, a6
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a5
 ; RV64I-NEXT:    srl a0, a0, a3
-; RV64I-NEXT:    sll a3, a4, a6
+; RV64I-NEXT:    sll a3, a4, a7
 ; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:  .LBB8_3:
 ; RV64I-NEXT:    srli a3, a1, 56
@@ -1206,11 +1206,11 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli t3, t3, 8
 ; RV32I-NEXT:    slli t4, t4, 16
 ; RV32I-NEXT:    slli t5, t5, 24
-; RV32I-NEXT:    slli t1, t1, 8
 ; RV32I-NEXT:    or a4, t3, a4
 ; RV32I-NEXT:    or t3, t5, t4
 ; RV32I-NEXT:    lbu t4, 0(a1)
 ; RV32I-NEXT:    lbu t5, 1(a1)
+; RV32I-NEXT:    slli t1, t1, 8
 ; RV32I-NEXT:    or t0, t1, t0
 ; RV32I-NEXT:    lbu t1, 2(a1)
 ; RV32I-NEXT:    lbu a1, 3(a1)
@@ -1388,12 +1388,12 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -1864,12 +1864,12 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)
@@ -2340,12 +2340,12 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
 ; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s5, s6, s5
 ; RV64I-NEXT:    or s2, s3, s2
 ; RV64I-NEXT:    lbu s3, 4(a1)
 ; RV64I-NEXT:    lbu s6, 5(a1)
+; RV64I-NEXT:    slli s4, s4, 16
+; RV64I-NEXT:    slli s7, s7, 24
 ; RV64I-NEXT:    or s4, s7, s4
 ; RV64I-NEXT:    lbu s7, 6(a1)
 ; RV64I-NEXT:    lbu a1, 7(a1)

>From fa284efed508cfa7fb0237790b7413f6f2f6c6b3 Mon Sep 17 00:00:00 2001
From: Ruiling Song <ruiling.song at amd.com>
Date: Tue, 27 May 2025 20:52:30 +0800
Subject: [PATCH 2/2] Update tests after merge

---
 .../CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll  | 3889 +++++++++--------
 .../CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll   |   70 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll   |   46 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll   |   91 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll   |   44 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll    |    2 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll   |  381 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll   |   20 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll   |   32 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll   |   50 +-
 .../CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll   | 1061 ++---
 llvm/test/CodeGen/AMDGPU/scratch-simple.ll    |  105 +-
 llvm/test/CodeGen/RISCV/zilsd.ll              |   28 +-
 13 files changed, 2895 insertions(+), 2924 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 30d71f6163924..44abfd272be88 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -18795,22 +18795,22 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v20
-; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_or_b32_e32 v8, v0, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; SI-NEXT:    v_and_b32_e32 v0, 0xff, v22
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v24
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_mov_b32_e32 v2, v9
 ; SI-NEXT:    v_or_b32_e32 v9, v0, v1
@@ -20195,11 +20195,11 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; VI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v32, v54
-; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v43, v49
-; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v46, v61
 ; VI-NEXT:    v_mov_b32_e32 v47, v45
+; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
@@ -22132,9 +22132,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    s_lshl_b32 s10, s10, 16
 ; GFX11-TRUE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-TRUE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB15_3
 ; GFX11-TRUE16-NEXT:  .LBB15_2: ; %cmp.true
@@ -22552,13 +22551,12 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff, v31
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v27, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v28, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v29, v30, v33
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v30, v34, v35
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB15_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_clause 0x1d
 ; GFX11-TRUE16-NEXT:    scratch_load_b32 v93, off, s32 offset:320
@@ -23093,7 +23091,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s8, 16
 ; GFX11-FAKE16-NEXT:    s_or_b32 s5, s5, s6
 ; GFX11-FAKE16-NEXT:    s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_b32 s7, s20, 0xff
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s21, 8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s9, s22, 0xff
@@ -23111,8 +23108,9 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xff, v51
 ; GFX11-FAKE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xff, v52
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v3, v3, v93
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v2, v2, v92
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -23120,7 +23118,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v3, s8 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB15_3
 ; GFX11-FAKE16-NEXT:  .LBB15_2: ; %cmp.true
@@ -23484,12 +23482,11 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v27, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v28, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v29, v29, v33
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v30, v34, v30
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB15_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_clause 0x1f
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v111, off, s32 offset:320
@@ -57510,22 +57507,22 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v20
-; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_or_b32_e32 v8, v0, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; SI-NEXT:    v_and_b32_e32 v0, 0xff, v22
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v24
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_mov_b32_e32 v2, v9
 ; SI-NEXT:    v_or_b32_e32 v9, v0, v1
@@ -58910,11 +58907,11 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; VI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v32, v54
-; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v43, v49
-; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v46, v61
 ; VI-NEXT:    v_mov_b32_e32 v47, v45
+; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
@@ -60847,9 +60844,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_lshl_b32 s10, s10, 16
 ; GFX11-TRUE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-TRUE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB39_3
 ; GFX11-TRUE16-NEXT:  .LBB39_2: ; %cmp.true
@@ -61267,13 +61263,12 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff, v31
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v27, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v28, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v29, v30, v33
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v30, v34, v35
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB39_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_clause 0x1d
 ; GFX11-TRUE16-NEXT:    scratch_load_b32 v93, off, s32 offset:320
@@ -61808,7 +61803,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s8, 16
 ; GFX11-FAKE16-NEXT:    s_or_b32 s5, s5, s6
 ; GFX11-FAKE16-NEXT:    s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_b32 s7, s20, 0xff
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s21, 8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s9, s22, 0xff
@@ -61826,8 +61820,9 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xff, v51
 ; GFX11-FAKE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xff, v52
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v3, v3, v93
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v2, v2, v92
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -61835,7 +61830,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v3, s8 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB39_3
 ; GFX11-FAKE16-NEXT:  .LBB39_2: ; %cmp.true
@@ -62199,12 +62194,11 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v27, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v28, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v29, v29, v33
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v30, v34, v30
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB39_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_clause 0x1f
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v111, off, s32 offset:320
@@ -94244,22 +94238,22 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v20
-; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_or_b32_e32 v8, v0, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; SI-NEXT:    v_and_b32_e32 v0, 0xff, v22
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v24
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_mov_b32_e32 v2, v9
 ; SI-NEXT:    v_or_b32_e32 v9, v0, v1
@@ -95644,11 +95638,11 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; VI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v32, v54
-; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v43, v49
-; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v46, v61
 ; VI-NEXT:    v_mov_b32_e32 v47, v45
+; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
@@ -97581,9 +97575,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    s_lshl_b32 s10, s10, 16
 ; GFX11-TRUE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-TRUE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB59_3
 ; GFX11-TRUE16-NEXT:  .LBB59_2: ; %cmp.true
@@ -98001,13 +97994,12 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff, v31
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v27, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v28, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v29, v30, v33
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v30, v34, v35
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB59_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_clause 0x1d
 ; GFX11-TRUE16-NEXT:    scratch_load_b32 v93, off, s32 offset:320
@@ -98542,7 +98534,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s8, 16
 ; GFX11-FAKE16-NEXT:    s_or_b32 s5, s5, s6
 ; GFX11-FAKE16-NEXT:    s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_b32 s7, s20, 0xff
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s21, 8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s9, s22, 0xff
@@ -98560,8 +98551,9 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xff, v51
 ; GFX11-FAKE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xff, v52
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v3, v3, v93
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v2, v2, v92
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -98569,7 +98561,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v3, s8 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB59_3
 ; GFX11-FAKE16-NEXT:  .LBB59_2: ; %cmp.true
@@ -98933,12 +98925,11 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v27, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v28, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v29, v29, v33
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v30, v34, v30
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB59_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_clause 0x1f
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v111, off, s32 offset:320
@@ -130921,22 +130912,22 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v20
-; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_or_b32_e32 v8, v0, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
 ; SI-NEXT:    v_and_b32_e32 v0, 0xff, v22
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xff, v24
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    v_mov_b32_e32 v2, v9
 ; SI-NEXT:    v_or_b32_e32 v9, v0, v1
@@ -132321,11 +132312,11 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; VI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v32, v54
-; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v43, v49
-; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    v_mov_b32_e32 v46, v61
 ; VI-NEXT:    v_mov_b32_e32 v47, v45
+; VI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
@@ -134258,9 +134249,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_lshl_b32 s10, s10, 16
 ; GFX11-TRUE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-TRUE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB75_3
 ; GFX11-TRUE16-NEXT:  .LBB75_2: ; %cmp.true
@@ -134678,13 +134668,12 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v31, 0xffff, v31
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v27, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v28, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v29, v30, v33
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v30, v34, v35
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB75_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_clause 0x1d
 ; GFX11-TRUE16-NEXT:    scratch_load_b32 v93, off, s32 offset:320
@@ -135219,7 +135208,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s8, 16
 ; GFX11-FAKE16-NEXT:    s_or_b32 s5, s5, s6
 ; GFX11-FAKE16-NEXT:    s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_b32 s7, s20, 0xff
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s8, s21, 8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s9, s22, 0xff
@@ -135237,8 +135225,9 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    s_or_b32 s7, s7, s8
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xff, v51
 ; GFX11-FAKE16-NEXT:    s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xff, v52
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v3, v3, v93
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v2, v2, v92
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -135246,7 +135235,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v3, s8 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB75_3
 ; GFX11-FAKE16-NEXT:  .LBB75_2: ; %cmp.true
@@ -135610,12 +135599,11 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v27, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v28, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v29, v29, v33
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v30, v34, v30
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v31, v31, v32
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB75_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_clause 0x1f
 ; GFX11-FAKE16-NEXT:    scratch_load_b32 v111, off, s32 offset:320
@@ -159543,14 +159531,15 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, 3, v3
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; VI-NEXT:    s_waitcnt vmcnt(3)
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, 3, v2
 ; VI-NEXT:    v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    v_add_u32_e32 v34, vcc, 0x300, v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
@@ -160288,16 +160277,16 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
 ; GFX9-NEXT:    s_waitcnt vmcnt(4)
 ; GFX9-NEXT:    v_mov_b32_e32 v46, v0
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    v_lshl_or_b32 v24, v1, 16, v0
 ; GFX9-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_mov_b32_e32 v35, v45
 ; GFX9-NEXT:    v_mov_b32_e32 v45, v61
 ; GFX9-NEXT:    v_mov_b32_e32 v61, v42
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_mov_b32_e32 v38, v0
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
@@ -161647,10 +161636,9 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff, v27
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v15, 16, v12
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v14, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v117, 16, v19
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v167, 16, v24
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v45, 16, v32
@@ -162015,7 +162003,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v16, v16, v161
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v19, 0xffff, v13
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v166
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xff, v144
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v134
@@ -162026,12 +162013,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v17, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xff, v151
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xff, v149
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v180
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v1, v1, v177
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -162475,9 +162463,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v34, 0xffff, v181
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v35, 0xffff, v28
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v2, 16, v0
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v133, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v27, v160, 16, v32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v178, 16, v33
@@ -163089,8 +163076,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v30
 ; SI-NEXT:    v_alignbit_b32 v4, v1, v9, 16
-; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v62
+; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
 ; SI-NEXT:    ; implicit-def: $vgpr30
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -170379,11 +170366,11 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
 ; SI-NEXT:    v_alignbit_b32 v8, v44, v36, 8
 ; SI-NEXT:    v_alignbit_b32 v58, v22, v9, 16
 ; SI-NEXT:    v_alignbit_b32 v40, v1, v37, 16
-; SI-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v8, v58, v6, 24
+; SI-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v8, v58, v6, 16
@@ -187589,14 +187576,15 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, 3, v3
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; VI-NEXT:    s_waitcnt vmcnt(3)
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, 3, v2
 ; VI-NEXT:    v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    v_add_u32_e32 v34, vcc, 0x300, v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
@@ -188334,16 +188322,16 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
 ; GFX9-NEXT:    s_waitcnt vmcnt(4)
 ; GFX9-NEXT:    v_mov_b32_e32 v46, v0
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    v_lshl_or_b32 v24, v1, 16, v0
 ; GFX9-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_mov_b32_e32 v35, v45
 ; GFX9-NEXT:    v_mov_b32_e32 v45, v61
 ; GFX9-NEXT:    v_mov_b32_e32 v61, v42
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_mov_b32_e32 v38, v0
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
@@ -189693,10 +189681,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff, v27
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v15, 16, v12
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v14, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v117, 16, v19
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v167, 16, v24
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v45, 16, v32
@@ -190061,7 +190048,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v16, v16, v161
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v19, 0xffff, v13
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v166
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xff, v144
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v134
@@ -190072,12 +190058,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v17, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xff, v151
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xff, v149
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v180
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v1, v1, v177
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -190521,9 +190508,8 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v34, 0xffff, v181
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v35, 0xffff, v28
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v2, 16, v0
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v133, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v27, v160, 16, v32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v178, 16, v33
@@ -196600,9 +196586,9 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
 ; SI-NEXT:    v_mov_b32_e32 v50, v2
 ; SI-NEXT:    v_mov_b32_e32 v53, v40
 ; SI-NEXT:    v_mov_b32_e32 v40, v28
-; SI-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(11)
+; SI-NEXT:    s_waitcnt vmcnt(10)
 ; SI-NEXT:    v_mov_b32_e32 v2, v48
+; SI-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
 ; SI-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
 ; SI-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v11, v27
@@ -202779,14 +202765,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:236
+; SI-NEXT:    s_waitcnt expcnt(1)
 ; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:260
 ; SI-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:284
 ; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:256
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:236
+; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_lshlrev_b32_e32 v6, 24, v6
-; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:280
@@ -202804,16 +202791,17 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:268
+; SI-NEXT:    s_waitcnt expcnt(1)
 ; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:292
 ; SI-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:316
 ; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:288
-; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:268
+; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_lshlrev_b32_e32 v6, 24, v6
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_lshlrev_b32_e32 v34, 8, v7
-; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:312
@@ -211245,14 +211233,15 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, 3, v3
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
 ; VI-NEXT:    s_waitcnt vmcnt(3)
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, 3, v2
 ; VI-NEXT:    v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    v_add_u32_e32 v34, vcc, 0x300, v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
 ; VI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
 ; VI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
@@ -211990,16 +211979,16 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
 ; GFX9-NEXT:    s_waitcnt vmcnt(4)
 ; GFX9-NEXT:    v_mov_b32_e32 v46, v0
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    v_lshl_or_b32 v24, v1, 16, v0
 ; GFX9-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
 ; GFX9-NEXT:    v_mov_b32_e32 v35, v45
 ; GFX9-NEXT:    v_mov_b32_e32 v45, v61
 ; GFX9-NEXT:    v_mov_b32_e32 v61, v42
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_mov_b32_e32 v38, v0
 ; GFX9-NEXT:    v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
@@ -213349,10 +213338,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v35, 0xffff, v27
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v15, 16, v12
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v14, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v117, 16, v19
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v167, 16, v24
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v45, 16, v32
@@ -213717,7 +213705,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v16, v16, v161
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v19, 0xffff, v13
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v166
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xff, v144
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v134
@@ -213728,12 +213715,13 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v21, 0xffff, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v17, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xff, v151
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xff, v149
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v180
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v1, v1, v177
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -214177,9 +214165,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v34, 0xffff, v181
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v35, 0xffff, v28
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v2, 16, v0
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v133, 16, v19
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v27, v160, 16, v32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v178, 16, v33
@@ -214289,11 +214276,9 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:124
 ; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:132
 ; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:128
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT:    ; implicit-def: $vgpr23
-; SI-NEXT:    v_lshlrev_b32_e32 v27, 16, v28
-; SI-NEXT:    ; kill: killed $vgpr23
-; SI-NEXT:    ; implicit-def: $vgpr23
+; SI-NEXT:    ; implicit-def: $vgpr27
+; SI-NEXT:    ; kill: killed $vgpr27
+; SI-NEXT:    ; implicit-def: $vgpr27
 ; SI-NEXT:    ; implicit-def: $vgpr46
 ; SI-NEXT:    ; implicit-def: $vgpr45
 ; SI-NEXT:    ; implicit-def: $vgpr44
@@ -214318,25 +214303,22 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    ; implicit-def: $vgpr33
 ; SI-NEXT:    ; implicit-def: $vgpr31
 ; SI-NEXT:    ; implicit-def: $vgpr32
-; SI-NEXT:    ; kill: killed $vgpr23
-; SI-NEXT:    ; implicit-def: $vgpr23
-; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:4
-; SI-NEXT:    s_waitcnt vmcnt(11)
-; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; SI-NEXT:    ; kill: killed $vgpr27
+; SI-NEXT:    ; implicit-def: $vgpr27
 ; SI-NEXT:    s_waitcnt vmcnt(10)
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    s_waitcnt vmcnt(6)
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:72
+; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT:    s_waitcnt vmcnt(7)
 ; SI-NEXT:    v_lshlrev_b32_e32 v25, 16, v11
-; SI-NEXT:    s_waitcnt vmcnt(5)
+; SI-NEXT:    s_waitcnt vmcnt(6)
 ; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v13
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v17
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:120
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
 ; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:116
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:112
@@ -214371,38 +214353,62 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:20
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:104
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:68
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:40
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v7
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v19, 16, v9
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:36
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v9
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:8
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_lshlrev_b32_e32 v29, 16, v59
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v6
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v10
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v6
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v14
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v10
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v18
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v14
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v22
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v18
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v26
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v22
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v30
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v26
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v30
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:100
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:96
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
@@ -214510,7 +214516,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:72
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    ; kill: killed $vgpr2
@@ -214519,54 +214524,54 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    s_waitcnt vmcnt(13)
-; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v19, off, s[0:3], s32 offset:104
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; kill: killed $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:64
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    ; kill: killed $vgpr2
@@ -214592,7 +214597,10 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v15
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v17
 ; SI-NEXT:    v_lshlrev_b32_e32 v15, 16, v58
+; SI-NEXT:    v_lshlrev_b32_e32 v17, 16, v63
+; SI-NEXT:    v_lshlrev_b32_e32 v9, 16, v61
 ; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v62
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
@@ -214605,33 +214613,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    ; implicit-def: $vgpr6
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:68
-; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:40
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_lshlrev_b32_e32 v17, 16, v63
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:36
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:8
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_lshlrev_b32_e32 v29, 16, v59
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:100
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:96
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_lshlrev_b32_e32 v9, 16, v61
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:64
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
@@ -214640,26 +214621,29 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32
-; SI-NEXT:    v_lshlrev_b32_e32 v19, 16, v60
+; SI-NEXT:    v_lshlrev_b32_e32 v23, 16, v60
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v12
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v20
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v24
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v28
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v47
@@ -214668,8 +214652,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    s_cbranch_execz .LBB98_2
 ; SI-NEXT:  ; %bb.1: ; %cmp.false
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT:    v_and_b32_e32 v23, 0xffff, v47
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v27, 0xffff, v47
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -214680,7 +214664,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v45, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -214691,7 +214675,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v43, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -214702,7 +214686,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v41, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -214713,7 +214697,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v55, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -214724,7 +214708,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v53, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -214735,28 +214719,29 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v51, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_or_b32_e32 v50, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 16, v28
+; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT:    v_or_b32_e32 v49, v2, v27
+; SI-NEXT:    v_or_b32_e32 v49, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_or_b32_e32 v48, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT:    v_or_b32_e32 v39, v2, v19
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; SI-NEXT:    ; implicit-def: $vgpr19
+; SI-NEXT:    v_or_b32_e32 v39, v2, v23
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT:    ; implicit-def: $vgpr23
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v38, v2, v6
@@ -214771,30 +214756,30 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v36, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v35, v2, v29
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; SI-NEXT:    ; implicit-def: $vgpr29
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v34, v2, v6
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v33, v2, v21
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
 ; SI-NEXT:    ; implicit-def: $vgpr21
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT:    v_or_b32_e32 v31, v2, v6
+; SI-NEXT:    v_or_b32_e32 v31, v2, v19
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT:    ; implicit-def: $vgpr19
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v32, v2, v17
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; SI-NEXT:    ; implicit-def: $vgpr17
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
@@ -214809,12 +214794,12 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v18, v2, v11
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
 ; SI-NEXT:    ; implicit-def: $vgpr11
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v22, v2, v9
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
 ; SI-NEXT:    ; implicit-def: $vgpr9
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
@@ -214824,7 +214809,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_or_b32_e32 v10, v2, v5
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; SI-NEXT:    ; implicit-def: $vgpr5
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
@@ -214865,10 +214850,10 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v57
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v63
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_bfe_u32 v1, v4, 8, 8
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
@@ -214913,22 +214898,22 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v61
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_bfe_u32 v1, v61, 8, 8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v62
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_bfe_u32 v1, v62, 8, 8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_bfe_u32 v1, v47, 8, 8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v45, v46, 24
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v45, v46, 16
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
@@ -215045,31 +215030,31 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v22, v18, 24
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v22, v18, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v22, v18, 8
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v10, v14, 24
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v10, v14, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v10, v14, 8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v2, v6, 24
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v2, v6, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_alignbit_b32 v1, v2, v6, 8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 8, v45
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
@@ -215221,26 +215206,26 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    ; implicit-def: $vgpr1
 ; SI-NEXT:  .LBB98_2: ; %Flow
 ; SI-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
-; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
 ; SI-NEXT:    s_xor_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB98_4
 ; SI-NEXT:  ; %bb.3: ; %cmp.true
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
 ; SI-NEXT:    s_mov_b32 s6, 0x30000
 ; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
@@ -215256,24 +215241,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v3
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
 ; SI-NEXT:    v_alignbit_b32 v12, v2, v6, 24
 ; SI-NEXT:    v_alignbit_b32 v20, v2, v6, 16
 ; SI-NEXT:    v_alignbit_b32 v47, v2, v6, 8
 ; SI-NEXT:    v_lshrrev_b32_e32 v8, 24, v2
-; SI-NEXT:    v_lshrrev_b32_e32 v23, 16, v2
+; SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v2
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v11, v1
 ; SI-NEXT:    v_add_i32_e32 v18, vcc, s6, v1
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v9, v1
 ; SI-NEXT:    v_add_i32_e32 v22, vcc, s6, v1
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
 ; SI-NEXT:    v_alignbit_b32 v62, v22, v18, 24
 ; SI-NEXT:    v_alignbit_b32 v63, v22, v18, 16
 ; SI-NEXT:    v_lshrrev_b32_e32 v59, 24, v22
@@ -215288,7 +215273,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v3
 ; SI-NEXT:    v_or_b32_e32 v3, v7, v3
 ; SI-NEXT:    v_add_i32_e32 v14, vcc, s6, v3
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215298,16 +215283,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT:    v_or_b32_e32 v1, v3, v1
+; SI-NEXT:    v_or_b32_e32 v1, v19, v1
 ; SI-NEXT:    v_add_i32_e32 v31, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v17, v1
 ; SI-NEXT:    v_add_i32_e32 v32, vcc, s6, v1
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v32
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
@@ -215329,20 +215313,20 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v36, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v29, v1
 ; SI-NEXT:    v_add_i32_e32 v35, vcc, s6, v1
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v38, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215355,11 +215339,11 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v48, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT:    v_or_b32_e32 v1, v19, v1
+; SI-NEXT:    v_or_b32_e32 v1, v23, v1
 ; SI-NEXT:    v_add_i32_e32 v39, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -215368,6 +215352,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v50, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, 3, v4
 ; SI-NEXT:    v_and_b32_e32 v4, 0xffff, v4
 ; SI-NEXT:    v_or_b32_e32 v4, v5, v4
@@ -215377,20 +215362,22 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_alignbit_b32 v61, v10, v14, 8
 ; SI-NEXT:    v_lshrrev_b32_e32 v24, 24, v10
 ; SI-NEXT:    v_lshrrev_b32_e32 v28, 16, v10
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT:    v_or_b32_e32 v1, v27, v1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v49, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v52, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215398,7 +215385,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v51, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215406,7 +215393,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v54, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215414,7 +215401,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v53, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215422,7 +215409,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v40, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215430,7 +215417,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v55, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215438,7 +215425,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v42, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215446,7 +215433,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v41, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215454,7 +215441,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v44, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215462,7 +215449,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v43, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -215470,7 +215457,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_add_i32_e32 v46, vcc, s6, v1
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -216252,7 +216239,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
-; SI-NEXT:    v_and_b32_e32 v2, 0xff, v23
+; SI-NEXT:    v_and_b32_e32 v2, 0xff, v27
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    v_or_b32_e32 v2, v3, v2
@@ -225758,6 +225745,13 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:80
 ; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:84
 ; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:92
+; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:88
+; SI-NEXT:    s_waitcnt expcnt(6)
+; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:100
+; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:96
+; SI-NEXT:    s_waitcnt expcnt(4)
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:108
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:104
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
@@ -225794,13 +225788,8 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v31
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:88
-; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:100
-; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:96
-; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:108
-; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:104
-; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:112
-; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:116
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:112
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:116
 ; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:120
 ; SI-NEXT:    v_mul_f32_e32 v32, 1.0, v32
 ; SI-NEXT:    v_mul_f32_e32 v33, 1.0, v33
@@ -225813,12 +225802,12 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_mul_f32_e32 v48, 1.0, v48
 ; SI-NEXT:    v_mul_f32_e32 v49, 1.0, v49
 ; SI-NEXT:    v_mul_f32_e32 v50, 1.0, v50
-; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_mul_f32_e32 v51, 1.0, v51
 ; SI-NEXT:    v_mul_f32_e32 v52, 1.0, v52
 ; SI-NEXT:    v_mul_f32_e32 v53, 1.0, v53
 ; SI-NEXT:    v_mul_f32_e32 v54, 1.0, v54
 ; SI-NEXT:    v_mul_f32_e32 v55, 1.0, v55
+; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_mul_f32_e32 v40, 1.0, v40
 ; SI-NEXT:    v_mul_f32_e32 v41, 1.0, v41
 ; SI-NEXT:    v_mul_f32_e32 v42, 1.0, v42
@@ -225828,24 +225817,24 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_mul_f32_e32 v44, 1.0, v44
 ; SI-NEXT:    s_waitcnt vmcnt(11)
 ; SI-NEXT:    v_mul_f32_e32 v45, 1.0, v45
-; SI-NEXT:    s_waitcnt vmcnt(10)
+; SI-NEXT:    s_waitcnt vmcnt(9)
+; SI-NEXT:    v_mul_f32_e32 v46, 1.0, v46
 ; SI-NEXT:    v_mul_f32_e32 v47, 1.0, v47
 ; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_mul_f32_e32 v46, 1.0, v1
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_mul_f32_e32 v57, 1.0, v31
+; SI-NEXT:    v_mul_f32_e32 v56, 1.0, v56
+; SI-NEXT:    v_mul_f32_e32 v57, 1.0, v57
+; SI-NEXT:    s_waitcnt vmcnt(5)
+; SI-NEXT:    v_mul_f32_e32 v58, 1.0, v58
+; SI-NEXT:    v_mul_f32_e32 v59, 1.0, v59
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_mul_f32_e32 v60, 1.0, v1
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_mul_f32_e32 v61, 1.0, v31
 ; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:124
 ; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:128
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
-; SI-NEXT:    s_waitcnt vmcnt(8)
-; SI-NEXT:    v_mul_f32_e32 v56, 1.0, v56
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_mul_f32_e32 v58, 1.0, v58
 ; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v62
-; SI-NEXT:    v_mul_f32_e32 v59, 1.0, v59
-; SI-NEXT:    v_mul_f32_e32 v60, 1.0, v60
-; SI-NEXT:    v_mul_f32_e32 v61, 1.0, v61
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_mul_f32_e32 v62, 1.0, v31
 ; SI-NEXT:    s_waitcnt vmcnt(1)
@@ -233269,29 +233258,32 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:80
 ; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:84
 ; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:92
+; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:88
+; SI-NEXT:    s_waitcnt expcnt(6)
+; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:100
+; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:96
+; SI-NEXT:    s_waitcnt expcnt(4)
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:108
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:104
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v7
 ; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
 ; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v3
 ; SI-NEXT:    v_cvt_f16_f32_e32 v11, v11
 ; SI-NEXT:    v_cvt_f16_f32_e32 v12, v12
 ; SI-NEXT:    v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v4
 ; SI-NEXT:    v_cvt_f16_f32_e32 v14, v14
 ; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
 ; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
 ; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
 ; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
 ; SI-NEXT:    v_cvt_f16_f32_e32 v19, v19
@@ -233306,26 +233298,11 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
 ; SI-NEXT:    v_cvt_f16_f32_e32 v29, v29
 ; SI-NEXT:    v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT:    ; implicit-def: $vgpr3
-; SI-NEXT:    ; implicit-def: $vgpr4
-; SI-NEXT:    ; kill: killed $vgpr3
-; SI-NEXT:    ; implicit-def: $vgpr3
-; SI-NEXT:    ; kill: killed $vgpr4
-; SI-NEXT:    ; implicit-def: $vgpr4
-; SI-NEXT:    ; kill: killed $vgpr3
-; SI-NEXT:    ; implicit-def: $vgpr3
-; SI-NEXT:    ; kill: killed $vgpr4
-; SI-NEXT:    ; implicit-def: $vgpr4
 ; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v31
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:88
-; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:100
-; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:96
-; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:108
-; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:104
-; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:112
-; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:116
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:112
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:116
 ; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:120
 ; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
 ; SI-NEXT:    v_cvt_f16_f32_e32 v33, v33
@@ -233340,35 +233317,35 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f16_f32_e32 v50, v50
 ; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
 ; SI-NEXT:    v_cvt_f16_f32_e32 v52, v52
-; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v53, v53
 ; SI-NEXT:    v_cvt_f16_f32_e32 v54, v54
 ; SI-NEXT:    v_cvt_f16_f32_e32 v55, v55
+; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v40, v40
 ; SI-NEXT:    v_cvt_f16_f32_e32 v41, v41
 ; SI-NEXT:    v_cvt_f16_f32_e32 v42, v42
+; SI-NEXT:    s_waitcnt vmcnt(13)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v43, v43
+; SI-NEXT:    s_waitcnt vmcnt(12)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v44, v44
-; SI-NEXT:    s_waitcnt vmcnt(13)
+; SI-NEXT:    s_waitcnt vmcnt(11)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v45, v45
-; SI-NEXT:    s_waitcnt vmcnt(12)
+; SI-NEXT:    s_waitcnt vmcnt(9)
+; SI-NEXT:    v_cvt_f16_f32_e32 v46, v46
 ; SI-NEXT:    v_cvt_f16_f32_e32 v47, v47
 ; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_cvt_f16_f32_e32 v46, v1
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cvt_f16_f32_e32 v57, v31
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
-; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:128
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
-; SI-NEXT:    s_waitcnt vmcnt(8)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v56, v56
-; SI-NEXT:    s_waitcnt vmcnt(6)
+; SI-NEXT:    v_cvt_f16_f32_e32 v57, v57
+; SI-NEXT:    s_waitcnt vmcnt(5)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v58, v58
 ; SI-NEXT:    v_cvt_f16_f32_e32 v59, v59
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_cvt_f16_f32_e32 v60, v60
-; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_cvt_f16_f32_e32 v61, v61
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_cvt_f16_f32_e32 v60, v1
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cvt_f16_f32_e32 v61, v31
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:124
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:128
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:132
 ; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v62, v62
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -233493,216 +233470,228 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    ; implicit-def: $vgpr2
 ; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; kill: killed $vgpr2
 ; SI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; SI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB102_2
 ; SI-NEXT:  ; %bb.1: ; %cmp.false
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v63
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v31
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v5
-; SI-NEXT:    ; kill: killed $vgpr1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v8
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; SI-NEXT:    ; kill: killed $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v7
-; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
 ; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr5
-; SI-NEXT:    ; implicit-def: $vgpr6
-; SI-NEXT:    ; implicit-def: $vgpr7
-; SI-NEXT:    ; implicit-def: $vgpr8
+; SI-NEXT:    ; implicit-def: $vgpr3
 ; SI-NEXT:    ; implicit-def: $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr63
 ; SI-NEXT:    ; implicit-def: $vgpr31
-; SI-NEXT:    s_waitcnt vmcnt(6)
+; SI-NEXT:    s_waitcnt vmcnt(4)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(1) expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
 ; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(1) expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v9
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v4
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v5
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v6
 ; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v10
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v7
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v8
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v9
 ; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v10
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v11
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v12
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v13
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v14
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v15
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v16
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v17
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v18
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v19
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v20
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v21
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v22
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v23
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v24
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v25
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v26
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v27
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v28
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v29
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v30
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v32
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v33
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v34
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v35
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v36
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v37
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v38
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v39
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v48
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v49
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v50
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v51
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v52
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v53
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v54
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v55
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v40
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v41
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v42
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v43
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v44
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v45
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v46
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v47
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v56
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v57
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v58
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v59
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v60
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v61
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v62
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT:    ; implicit-def: $vgpr4
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr6
+; SI-NEXT:    ; implicit-def: $vgpr7
+; SI-NEXT:    ; implicit-def: $vgpr8
 ; SI-NEXT:    ; implicit-def: $vgpr9
 ; SI-NEXT:    ; implicit-def: $vgpr10
 ; SI-NEXT:    ; implicit-def: $vgpr11
@@ -233760,17 +233749,23 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    s_andn2_saveexec_b64 s[4:5], s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB102_4
 ; SI-NEXT:  ; %bb.3: ; %cmp.true
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v2, v31
 ; SI-NEXT:    v_cvt_f32_f16_e32 v31, v63
 ; SI-NEXT:    v_cvt_f32_f16_e32 v63, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v62
-; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
 ; SI-NEXT:    v_add_f32_e32 v62, 0x38000000, v63
 ; SI-NEXT:    v_add_f32_e32 v63, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v61
+; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
 ; SI-NEXT:    v_cvt_f32_f16_e32 v60, v60
 ; SI-NEXT:    v_cvt_f32_f16_e32 v61, v59
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v58
@@ -233778,6 +233773,14 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v57, v57
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v46
+; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
 ; SI-NEXT:    v_add_f32_e32 v59, 0x38000000, v60
 ; SI-NEXT:    v_add_f32_e32 v60, 0x38000000, v61
 ; SI-NEXT:    v_add_f32_e32 v61, 0x38000000, v58
@@ -233786,6 +233789,16 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_f32_e32 v46, 0x38000000, v57
 ; SI-NEXT:    v_add_f32_e32 v57, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v45
+; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v5
+; SI-NEXT:    v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v6
+; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v7
+; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v8
 ; SI-NEXT:    v_add_f32_e32 v47, 0x38000000, v56
 ; SI-NEXT:    v_add_f32_e32 v56, 0x38000000, v58
 ; SI-NEXT:    v_cvt_f32_f16_e32 v44, v44
@@ -233793,6 +233806,20 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v42
 ; SI-NEXT:    v_add_f32_e32 v42, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v41
+; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
 ; SI-NEXT:    v_add_f32_e32 v43, 0x38000000, v44
 ; SI-NEXT:    v_add_f32_e32 v44, 0x38000000, v45
 ; SI-NEXT:    v_add_f32_e32 v45, 0x38000000, v58
@@ -233801,6 +233828,16 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v54
 ; SI-NEXT:    v_add_f32_e32 v54, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v53
+; SI-NEXT:    v_add_f32_e32 v10, 0x38000000, v10
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v9
+; SI-NEXT:    v_add_f32_e32 v11, 0x38000000, v11
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v10
+; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v11
+; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v12
 ; SI-NEXT:    v_add_f32_e32 v55, 0x38000000, v40
 ; SI-NEXT:    v_add_f32_e32 v40, 0x38000000, v41
 ; SI-NEXT:    v_add_f32_e32 v41, 0x38000000, v58
@@ -233809,6 +233846,20 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v50
 ; SI-NEXT:    v_add_f32_e32 v50, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v49
+; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
 ; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v52
 ; SI-NEXT:    v_add_f32_e32 v52, 0x38000000, v53
 ; SI-NEXT:    v_add_f32_e32 v53, 0x38000000, v58
@@ -233817,6 +233868,16 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v38
 ; SI-NEXT:    v_add_f32_e32 v38, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v37
+; SI-NEXT:    v_add_f32_e32 v14, 0x38000000, v14
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v13
+; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v14
+; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v15
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v16
 ; SI-NEXT:    v_add_f32_e32 v39, 0x38000000, v48
 ; SI-NEXT:    v_add_f32_e32 v48, 0x38000000, v49
 ; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v58
@@ -233825,6 +233886,20 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v34
 ; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v33
+; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
 ; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v36
 ; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v37
 ; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v58
@@ -233833,330 +233908,242 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v29
 ; SI-NEXT:    v_add_f32_e32 v29, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v28
+; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v17
+; SI-NEXT:    v_add_f32_e32 v19, 0x38000000, v19
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v18
+; SI-NEXT:    v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v19
+; SI-NEXT:    v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v20
 ; SI-NEXT:    v_add_f32_e32 v30, 0x38000000, v32
 ; SI-NEXT:    v_add_f32_e32 v32, 0x38000000, v33
 ; SI-NEXT:    v_add_f32_e32 v33, 0x38000000, v58
 ; SI-NEXT:    v_cvt_f32_f16_e32 v58, v25
 ; SI-NEXT:    v_add_f32_e32 v25, 0x38000000, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v24
-; SI-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; SI-NEXT:    v_cvt_f32_f16_e32 v28, v26
-; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f32_f16_e32 v4, v3
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v27
-; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v28
-; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v58
-; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT:    v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
-; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
-; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
-; SI-NEXT:    v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
-; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
-; SI-NEXT:    v_add_f32_e32 v10, 0x38000000, v10
-; SI-NEXT:    v_add_f32_e32 v11, 0x38000000, v11
-; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
-; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
-; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
-; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT:    v_add_f32_e32 v14, 0x38000000, v14
-; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
-; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
-; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
-; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT:    v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT:    v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT:    v_cvt_f32_f16_e32 v21, v21
 ; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT:    v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT:    v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT:    v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT:    v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT:    v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT:    v_add_f32_e32 v31, 0x38000000, v31
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v28
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT:    v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v5
-; SI-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v24, 16, v4
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v8
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v7
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v9
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v10
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v5
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v11
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v12
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v13
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v14
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v15
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v16
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v17
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v18
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v20
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f32_f16_e32 v23, v23
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v6
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT:    v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v21
+; SI-NEXT:    v_cvt_f32_f16_e32 v27, v27
+; SI-NEXT:    v_cvt_f32_f16_e32 v28, v26
+; SI-NEXT:    v_add_f32_e32 v23, 0x38000000, v23
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v22
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v23
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v27
+; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v28
+; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v58
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v28
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v27
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v26
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v25
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v33
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v32
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v30
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v29
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v37
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v36
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v35
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v34
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v49
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v48
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v39
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v38
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v53
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v52
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v51
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v50
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v41
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v40
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v55
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v54
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v45
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v44
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v43
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v42
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v57
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v56
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v47
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v46
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v61
+; SI-NEXT:    buffer_load_dword v24, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
 ; SI-NEXT:    v_cvt_f16_f32_e32 v3, v60
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v59
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(14)
-; SI-NEXT:    v_cvt_f32_f16_e32 v58, v58
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v31
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v63
 ; SI-NEXT:    v_cvt_f16_f32_e32 v2, v62
-; SI-NEXT:    v_add_f32_e32 v58, 0x38000000, v58
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v31
-; SI-NEXT:    v_cvt_f16_f32_e32 v58, v58
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT:    v_lshlrev_b32_e32 v58, 16, v58
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT:    v_mov_b32_e32 v3, v24
-; SI-NEXT:    buffer_store_dword v58, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(6)
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt vmcnt(9)
+; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
+; SI-NEXT:    s_waitcnt vmcnt(8)
+; SI-NEXT:    v_cvt_f32_f16_e32 v58, v58
+; SI-NEXT:    v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT:    v_add_f32_e32 v58, 0x38000000, v58
+; SI-NEXT:    v_cvt_f16_f32_e32 v58, v58
+; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT:    s_waitcnt vmcnt(5)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v58, 16, v58
+; SI-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT:    buffer_store_dword v58, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
-; SI-NEXT:    v_mov_b32_e32 v4, v7
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
 ; SI-NEXT:  .LBB102_4: ; %end
 ; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234164,9 +234151,11 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v2
 ; SI-NEXT:    v_alignbit_b32 v1, v1, v2, 16
 ; SI-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v3
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v2
@@ -234174,8 +234163,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234185,17 +234174,19 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 8, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
-; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v2
 ; SI-NEXT:    v_alignbit_b32 v1, v1, v2, 16
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 12, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234205,8 +234196,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 16, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234216,8 +234207,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 20, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234227,8 +234218,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 24, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234238,8 +234229,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 28, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234249,8 +234240,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 32, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234260,8 +234251,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 36, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234271,8 +234262,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 40, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234282,8 +234273,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 44, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234293,8 +234284,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 48, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234304,8 +234295,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 52, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234315,8 +234306,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 56, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234326,8 +234317,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 60, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234337,8 +234328,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 64, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234348,8 +234339,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x44, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234359,8 +234350,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x48, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234370,8 +234361,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x4c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234381,8 +234372,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x50, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234392,8 +234383,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x54, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234403,8 +234394,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x58, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234414,8 +234405,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x5c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234425,8 +234416,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x60, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234436,8 +234427,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x64, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234447,8 +234438,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x68, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234458,8 +234449,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x6c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234469,8 +234460,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x70, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234480,8 +234471,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x74, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -234491,8 +234482,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x78, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_i32_e32 v0, vcc, 0x7c, v0
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
@@ -236252,8 +236243,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:20
 ; SI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:24
 ; SI-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:28
-; SI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:32
-; SI-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:36
+; SI-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:32
+; SI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:36
 ; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:40
 ; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:44
 ; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:48
@@ -236268,849 +236259,871 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:72
 ; SI-NEXT:    s_waitcnt expcnt(3)
 ; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:76
+; SI-NEXT:    s_waitcnt expcnt(2)
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:80
 ; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:80
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:84
-; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:92
-; SI-NEXT:    v_mul_f32_e32 v33, 1.0, v1
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v3
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:84
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:92
+; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:88
+; SI-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:100
+; SI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:96
+; SI-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:108
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:104
+; SI-NEXT:    v_mul_f32_e32 v40, 1.0, v5
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v15
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v7
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v16
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v8
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v19
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v11
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v20
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v12
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v23
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v15
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v24
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v27
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v19
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v28
+; SI-NEXT:    v_mul_f32_e32 v33, 1.0, v1
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v3
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v20
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v4
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v23
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v7
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v24
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v8
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v27
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v11
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v28
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v12
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
 ; SI-NEXT:    v_mul_f32_e32 v43, 1.0, v2
-; SI-NEXT:    v_mul_f32_e32 v54, 1.0, v13
-; SI-NEXT:    v_mul_f32_e32 v51, 1.0, v14
-; SI-NEXT:    v_mul_f32_e32 v14, 1.0, v18
-; SI-NEXT:    v_mul_f32_e32 v19, 1.0, v21
+; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v13
 ; SI-NEXT:    v_mul_f32_e32 v13, 1.0, v22
-; SI-NEXT:    v_mul_f32_e32 v18, 1.0, v25
-; SI-NEXT:    v_mul_f32_e32 v12, 1.0, v26
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    v_mul_f32_e32 v15, 1.0, v17
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v14
+; SI-NEXT:    v_mul_f32_e32 v14, 1.0, v18
+; SI-NEXT:    v_mul_f32_e32 v18, 1.0, v26
+; SI-NEXT:    v_mul_f32_e32 v15, 1.0, v21
+; SI-NEXT:    v_mul_f32_e32 v32, 1.0, v6
+; SI-NEXT:    v_mul_f32_e32 v4, 1.0, v9
+; SI-NEXT:    v_mul_f32_e32 v3, 1.0, v10
+; SI-NEXT:    v_mul_f32_e32 v16, 1.0, v17
+; SI-NEXT:    v_mul_f32_e32 v19, 1.0, v25
 ; SI-NEXT:    v_mul_f32_e32 v17, 1.0, v29
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v35
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v11, 1.0, v30
+; SI-NEXT:    s_waitcnt vmcnt(14)
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v34
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v35
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v36
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v36
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v39
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v39
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v48
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v48
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v53
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:88
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:100
-; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:96
-; SI-NEXT:    buffer_load_dword v21, off, s[0:3], s32 offset:108
-; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:104
-; SI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:112
-; SI-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:116
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:120
-; SI-NEXT:    v_mul_f32_e32 v20, 1.0, v44
-; SI-NEXT:    v_mul_f32_e32 v44, 1.0, v46
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    v_mul_f32_e32 v46, 1.0, v58
-; SI-NEXT:    v_mul_f32_e32 v61, 1.0, v59
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    v_mul_f32_e32 v32, 1.0, v5
-; SI-NEXT:    v_mul_f32_e32 v31, 1.0, v6
-; SI-NEXT:    v_mul_f32_e32 v40, 1.0, v9
-; SI-NEXT:    v_mul_f32_e32 v55, 1.0, v10
-; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v34
-; SI-NEXT:    v_mul_f32_e32 v11, 1.0, v30
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v51
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v52
+; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:112
+; SI-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:116
+; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:120
+; SI-NEXT:    ; implicit-def: $vgpr35
+; SI-NEXT:    ; kill: killed $vgpr35
+; SI-NEXT:    ; implicit-def: $vgpr35
+; SI-NEXT:    v_mul_f32_e32 v34, 1.0, v47
+; SI-NEXT:    ; kill: killed $vgpr35
+; SI-NEXT:    ; implicit-def: $vgpr35
+; SI-NEXT:    ; kill: killed $vgpr35
+; SI-NEXT:    ; implicit-def: $vgpr35
+; SI-NEXT:    v_mul_f32_e32 v47, 1.0, v57
 ; SI-NEXT:    v_mul_f32_e32 v10, 1.0, v37
 ; SI-NEXT:    v_mul_f32_e32 v9, 1.0, v38
+; SI-NEXT:    v_mul_f32_e32 v27, 1.0, v60
+; SI-NEXT:    v_mul_f32_e32 v57, 1.0, v61
 ; SI-NEXT:    v_mul_f32_e32 v8, 1.0, v49
 ; SI-NEXT:    v_mul_f32_e32 v7, 1.0, v50
-; SI-NEXT:    v_mul_f32_e32 v30, 1.0, v52
 ; SI-NEXT:    v_mul_f32_e32 v6, 1.0, v41
-; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v42
-; SI-NEXT:    v_mul_f32_e32 v3, 1.0, v47
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v56
-; SI-NEXT:    v_mul_f32_e32 v47, 1.0, v57
-; SI-NEXT:    v_mul_f32_e32 v60, 1.0, v60
+; SI-NEXT:    v_mul_f32_e32 v5, 1.0, v42
+; SI-NEXT:    v_mul_f32_e32 v30, 1.0, v44
+; SI-NEXT:    v_mul_f32_e32 v21, 1.0, v55
+; SI-NEXT:    v_mul_f32_e32 v23, 1.0, v31
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
+; SI-NEXT:    v_mul_f32_e32 v44, 1.0, v46
+; SI-NEXT:    v_mul_f32_e32 v29, 1.0, v56
+; SI-NEXT:    v_mul_f32_e32 v46, 1.0, v58
+; SI-NEXT:    v_mul_f32_e32 v28, 1.0, v59
 ; SI-NEXT:    v_mul_f32_e32 v56, 1.0, v62
-; SI-NEXT:    v_mul_f32_e32 v57, 1.0, v63
-; SI-NEXT:    v_mul_f32_e32 v28, 1.0, v45
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
+; SI-NEXT:    v_mul_f32_e32 v24, 1.0, v45
+; SI-NEXT:    v_mul_f32_e32 v25, 1.0, v63
+; SI-NEXT:    v_mul_f32_e32 v59, 1.0, v54
+; SI-NEXT:    v_mul_f32_e32 v58, 1.0, v53
+; SI-NEXT:    ; kill: killed $vgpr35
+; SI-NEXT:    ; implicit-def: $vgpr35
+; SI-NEXT:    ; implicit-def: $vgpr53
+; SI-NEXT:    ; implicit-def: $vgpr45
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
 ; SI-NEXT:    ; implicit-def: $vgpr62
 ; SI-NEXT:    ; implicit-def: $vgpr63
 ; SI-NEXT:    ; implicit-def: $vgpr41
 ; SI-NEXT:    ; implicit-def: $vgpr42
+; SI-NEXT:    ; implicit-def: $vgpr51
 ; SI-NEXT:    ; implicit-def: $vgpr52
-; SI-NEXT:    ; implicit-def: $vgpr53
 ; SI-NEXT:    ; implicit-def: $vgpr49
 ; SI-NEXT:    ; implicit-def: $vgpr50
 ; SI-NEXT:    ; implicit-def: $vgpr39
 ; SI-NEXT:    ; implicit-def: $vgpr48
 ; SI-NEXT:    ; implicit-def: $vgpr37
 ; SI-NEXT:    ; implicit-def: $vgpr38
+; SI-NEXT:    ; kill: killed $vgpr35
 ; SI-NEXT:    ; implicit-def: $vgpr35
 ; SI-NEXT:    ; implicit-def: $vgpr36
-; SI-NEXT:    ; implicit-def: $vgpr34
+; SI-NEXT:    ; kill: killed $vgpr53
+; SI-NEXT:    ; implicit-def: $vgpr53
+; SI-NEXT:    ; implicit-def: $vgpr54
+; SI-NEXT:    ; kill: killed $vgpr45
 ; SI-NEXT:    ; implicit-def: $vgpr45
-; SI-NEXT:    ; kill: killed $vgpr27
-; SI-NEXT:    ; implicit-def: $vgpr27
-; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_mul_f32_e32 v24, 1.0, v2
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_mul_f32_e32 v58, 1.0, v4
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_mul_f32_e32 v59, 1.0, v16
-; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_mul_f32_e32 v16, 1.0, v21
+; SI-NEXT:    ; kill: killed $vgpr55
+; SI-NEXT:    ; implicit-def: $vgpr55
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_mul_f32_e32 v4, 1.0, v23
+; SI-NEXT:    v_mul_f32_e32 v61, 1.0, v12
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v25
+; SI-NEXT:    v_mul_f32_e32 v60, 1.0, v20
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v21, 1.0, v26
-; SI-NEXT:    buffer_load_dword v23, off, s[0:3], s32 offset:124
-; SI-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:128
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:132
-; SI-NEXT:    v_mul_f32_e32 v22, 1.0, v22
+; SI-NEXT:    v_mul_f32_e32 v20, 1.0, v22
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:124
+; SI-NEXT:    buffer_load_dword v22, off, s[0:3], s32 offset:128
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:132
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_mul_f32_e32 v29, 1.0, v23
+; SI-NEXT:    v_mul_f32_e32 v26, 1.0, v12
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_mul_f32_e32 v25, 1.0, v25
+; SI-NEXT:    v_mul_f32_e32 v22, 1.0, v22
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v23, 1.0, v26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
-; SI-NEXT:    ; kill: killed $vgpr26
-; SI-NEXT:    ; implicit-def: $vgpr26
+; SI-NEXT:    v_mul_f32_e32 v12, 1.0, v31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
+; SI-NEXT:    ; kill: killed $vgpr31
+; SI-NEXT:    ; implicit-def: $vgpr31
 ; SI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; SI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB104_2
 ; SI-NEXT:  ; %bb.1: ; %cmp.false
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v33
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v31, 16, v33
+; SI-NEXT:    buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v43
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v31, 16, v43
+; SI-NEXT:    buffer_store_dword v31, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT:    v_lshrrev_b32_e32 v14, 16, v14
-; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v4, 16, v4
+; SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
+; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v55, 16, v22
+; SI-NEXT:    ; implicit-def: $vgpr33
+; SI-NEXT:    ; implicit-def: $vgpr43
+; SI-NEXT:    ; implicit-def: $vgpr4
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr22
+; SI-NEXT:    s_waitcnt expcnt(2)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(6)
+; SI-NEXT:    v_lshrrev_b32_e32 v62, 16, v31
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(5)
+; SI-NEXT:    v_lshrrev_b32_e32 v51, 16, v3
+; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_lshrrev_b32_e32 v49, 16, v1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_lshrrev_b32_e32 v63, 16, v31
+; SI-NEXT:    v_lshrrev_b32_e32 v31, 16, v40
+; SI-NEXT:    buffer_store_dword v31, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v30
-; SI-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v31, 16, v32
+; SI-NEXT:    buffer_store_dword v31, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(4)
+; SI-NEXT:    v_lshrrev_b32_e32 v52, 16, v3
+; SI-NEXT:    ; implicit-def: $vgpr40
+; SI-NEXT:    ; implicit-def: $vgpr32
+; SI-NEXT:    ; implicit-def: $vgpr3
+; SI-NEXT:    s_waitcnt vmcnt(3)
+; SI-NEXT:    v_lshrrev_b32_e32 v50, 16, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v16
+; SI-NEXT:    ; implicit-def: $vgpr16
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
-; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT:    v_lshrrev_b32_e32 v13, 16, v13
-; SI-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
-; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v11
-; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v14
+; SI-NEXT:    ; implicit-def: $vgpr14
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(3)
+; SI-NEXT:    v_lshrrev_b32_e32 v41, 16, v31
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_lshrrev_b32_e32 v39, 16, v1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_lshrrev_b32_e32 v42, 16, v31
+; SI-NEXT:    v_lshrrev_b32_e32 v31, 16, v61
+; SI-NEXT:    ; implicit-def: $vgpr61
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v48, 16, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v15
+; SI-NEXT:    ; implicit-def: $vgpr15
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v13
+; SI-NEXT:    ; implicit-def: $vgpr13
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v37, 16, v1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v38, 16, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v19
+; SI-NEXT:    ; implicit-def: $vgpr19
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v18
+; SI-NEXT:    ; implicit-def: $vgpr18
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v35, 16, v1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v36, 16, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v17
+; SI-NEXT:    ; implicit-def: $vgpr17
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v11
+; SI-NEXT:    ; implicit-def: $vgpr11
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v53, 16, v1
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v54, 16, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v10
+; SI-NEXT:    ; implicit-def: $vgpr10
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v9
+; SI-NEXT:    ; implicit-def: $vgpr9
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v45, 16, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v8
+; SI-NEXT:    ; implicit-def: $vgpr8
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v7
+; SI-NEXT:    ; implicit-def: $vgpr7
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v47
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v46
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v6
+; SI-NEXT:    ; implicit-def: $vgpr6
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v5
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v30
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr30
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v61
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v44
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr44
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v60
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v34
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr34
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v56
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v29
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr29
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v57
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v47
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr47
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v24
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v46
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr46
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v28
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr28
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v59
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v27
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr27
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v58
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v57
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr57
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v22
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v56
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr56
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v24
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr24
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v4
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v25
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr25
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v59
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; kill: killed $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr59
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v21
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v58
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr58
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v23
+; SI-NEXT:    ; implicit-def: $vgpr23
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v29
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v21
+; SI-NEXT:    ; implicit-def: $vgpr21
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v23
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; kill: killed $vgpr1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v60
+; SI-NEXT:    ; implicit-def: $vgpr60
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v20
+; SI-NEXT:    ; implicit-def: $vgpr20
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v26
+; SI-NEXT:    ; implicit-def: $vgpr26
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v12
+; SI-NEXT:    ; implicit-def: $vgpr12
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
 ; SI-NEXT:    ; implicit-def: $vgpr1
 ; SI-NEXT:    ; kill: killed $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr1
 ; SI-NEXT:    ; kill: killed $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
 ; SI-NEXT:    ; kill: killed $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v5, 16, v20
 ; SI-NEXT:    ; kill: killed $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    v_lshrrev_b32_e32 v15, 16, v15
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
-; SI-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
-; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v5, 16, v44
-; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
 ; SI-NEXT:    ; kill: killed $vgpr1
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(14)
-; SI-NEXT:    v_lshrrev_b32_e32 v62, 16, v26
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT:    v_lshrrev_b32_e32 v39, 16, v14
-; SI-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v25
-; SI-NEXT:    ; implicit-def: $vgpr33
-; SI-NEXT:    ; implicit-def: $vgpr43
-; SI-NEXT:    ; implicit-def: $vgpr15
-; SI-NEXT:    ; implicit-def: $vgpr10
-; SI-NEXT:    ; implicit-def: $vgpr8
-; SI-NEXT:    ; implicit-def: $vgpr7
-; SI-NEXT:    ; implicit-def: $vgpr30
 ; SI-NEXT:    ; kill: killed $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr6
-; SI-NEXT:    ; implicit-def: $vgpr5
-; SI-NEXT:    ; implicit-def: $vgpr20
-; SI-NEXT:    ; implicit-def: $vgpr44
-; SI-NEXT:    ; implicit-def: $vgpr3
 ; SI-NEXT:    ; implicit-def: $vgpr1
-; SI-NEXT:    ; implicit-def: $vgpr47
-; SI-NEXT:    ; implicit-def: $vgpr46
-; SI-NEXT:    ; implicit-def: $vgpr61
-; SI-NEXT:    ; implicit-def: $vgpr60
-; SI-NEXT:    ; implicit-def: $vgpr56
-; SI-NEXT:    ; implicit-def: $vgpr57
-; SI-NEXT:    ; implicit-def: $vgpr24
-; SI-NEXT:    ; implicit-def: $vgpr28
-; SI-NEXT:    ; implicit-def: $vgpr59
-; SI-NEXT:    ; implicit-def: $vgpr58
-; SI-NEXT:    ; implicit-def: $vgpr22
-; SI-NEXT:    ; implicit-def: $vgpr16
-; SI-NEXT:    ; implicit-def: $vgpr4
-; SI-NEXT:    ; implicit-def: $vgpr2
-; SI-NEXT:    ; implicit-def: $vgpr21
-; SI-NEXT:    ; implicit-def: $vgpr29
-; SI-NEXT:    ; implicit-def: $vgpr25
-; SI-NEXT:    ; implicit-def: $vgpr23
-; SI-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(12)
-; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
-; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(12)
-; SI-NEXT:    v_lshrrev_b32_e32 v63, 16, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v32
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v31
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(10)
-; SI-NEXT:    v_lshrrev_b32_e32 v48, 16, v14
-; SI-NEXT:    v_lshrrev_b32_e32 v14, 16, v19
-; SI-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT:    ; implicit-def: $vgpr32
-; SI-NEXT:    ; implicit-def: $vgpr31
-; SI-NEXT:    ; implicit-def: $vgpr14
-; SI-NEXT:    ; implicit-def: $vgpr19
-; SI-NEXT:    s_waitcnt vmcnt(8)
-; SI-NEXT:    v_lshrrev_b32_e32 v37, 16, v13
-; SI-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(8)
-; SI-NEXT:    v_lshrrev_b32_e32 v35, 16, v12
-; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_lshrrev_b32_e32 v45, 16, v9
-; SI-NEXT:    ; implicit-def: $vgpr9
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_lshrrev_b32_e32 v41, 16, v26
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_lshrrev_b32_e32 v38, 16, v13
-; SI-NEXT:    v_lshrrev_b32_e32 v13, 16, v18
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_lshrrev_b32_e32 v36, 16, v12
-; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v17
-; SI-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT:    ; implicit-def: $vgpr13
-; SI-NEXT:    ; implicit-def: $vgpr18
-; SI-NEXT:    ; implicit-def: $vgpr12
-; SI-NEXT:    ; implicit-def: $vgpr17
-; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_lshrrev_b32_e32 v42, 16, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v40
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v55
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT:    ; implicit-def: $vgpr40
-; SI-NEXT:    ; implicit-def: $vgpr55
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v52, 16, v26
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v53, 16, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v54
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v51
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT:    ; implicit-def: $vgpr54
-; SI-NEXT:    ; implicit-def: $vgpr51
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v49, 16, v26
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v50, 16, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v11
-; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v34, 16, v11
-; SI-NEXT:    ; implicit-def: $vgpr11
 ; SI-NEXT:  .LBB104_2: ; %Flow
 ; SI-NEXT:    s_andn2_saveexec_b64 s[4:5], s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB104_4
 ; SI-NEXT:  ; %bb.3: ; %cmp.true
-; SI-NEXT:    v_and_b32_e32 v27, 0xffff0000, v43
-; SI-NEXT:    v_and_b32_e32 v26, 0xffff0000, v33
-; SI-NEXT:    v_add_f32_e32 v33, 0x40c00000, v27
-; SI-NEXT:    v_add_f32_e32 v26, 0x40c00000, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v33
-; SI-NEXT:    v_alignbit_b32 v26, v27, v26, 16
-; SI-NEXT:    v_and_b32_e32 v27, 0xffff0000, v31
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v26, 0xffff0000, v32
-; SI-NEXT:    v_add_f32_e32 v31, 0x40c00000, v27
-; SI-NEXT:    v_add_f32_e32 v26, 0x40c00000, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v31
-; SI-NEXT:    v_alignbit_b32 v26, v27, v26, 16
-; SI-NEXT:    v_and_b32_e32 v27, 0xffff0000, v55
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; SI-NEXT:    v_and_b32_e32 v31, 0xffff0000, v33
+; SI-NEXT:    v_and_b32_e32 v33, 0xffff0000, v43
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; SI-NEXT:    v_add_f32_e32 v51, 0x40c00000, v1
+; SI-NEXT:    v_add_f32_e32 v33, 0x40c00000, v33
+; SI-NEXT:    v_add_f32_e32 v2, 0x40c00000, v2
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v51
+; SI-NEXT:    v_add_f32_e32 v31, 0x40c00000, v31
+; SI-NEXT:    v_lshrrev_b32_e32 v35, 16, v33
+; SI-NEXT:    v_alignbit_b32 v1, v1, v2, 16
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v14
+; SI-NEXT:    v_alignbit_b32 v31, v35, v31, 16
+; SI-NEXT:    v_and_b32_e32 v32, 0xffff0000, v32
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v26, 0xffff0000, v40
-; SI-NEXT:    v_add_f32_e32 v32, 0x40c00000, v27
-; SI-NEXT:    v_add_f32_e32 v26, 0x40c00000, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v32
-; SI-NEXT:    v_alignbit_b32 v26, v27, v26, 16
-; SI-NEXT:    v_and_b32_e32 v27, 0xffff0000, v51
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v16
+; SI-NEXT:    v_add_f32_e32 v14, 0x40c00000, v2
+; SI-NEXT:    buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v26, 0xffff0000, v54
-; SI-NEXT:    v_add_f32_e32 v51, 0x40c00000, v27
-; SI-NEXT:    v_add_f32_e32 v26, 0x40c00000, v26
-; SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v51
-; SI-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT:    v_alignbit_b32 v26, v27, v26, 16
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; SI-NEXT:    v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT:    buffer_store_dword v26, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
+; SI-NEXT:    v_and_b32_e32 v31, 0xffff0000, v40
+; SI-NEXT:    v_add_f32_e32 v32, 0x40c00000, v32
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v14
+; SI-NEXT:    v_add_f32_e32 v31, 0x40c00000, v31
+; SI-NEXT:    v_lshrrev_b32_e32 v35, 16, v32
+; SI-NEXT:    v_alignbit_b32 v1, v2, v1, 16
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v13
+; SI-NEXT:    v_alignbit_b32 v31, v35, v31, 16
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v26, 16, v14
-; SI-NEXT:    v_alignbit_b32 v15, v26, v15, 16
-; SI-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
-; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v15
+; SI-NEXT:    v_add_f32_e32 v13, 0x40c00000, v2
+; SI-NEXT:    buffer_store_dword v31, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v19
-; SI-NEXT:    v_add_f32_e32 v13, 0x40c00000, v13
-; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
-; SI-NEXT:    v_lshrrev_b32_e32 v19, 16, v13
-; SI-NEXT:    v_alignbit_b32 v15, v19, v15, 16
-; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
-; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v31, 0x40c00000, v3
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v13
+; SI-NEXT:    v_add_f32_e32 v4, 0x40c00000, v4
+; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v31
+; SI-NEXT:    v_alignbit_b32 v1, v2, v1, 16
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v18
+; SI-NEXT:    v_alignbit_b32 v3, v3, v4, 16
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v18
-; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
-; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
-; SI-NEXT:    v_lshrrev_b32_e32 v18, 16, v12
-; SI-NEXT:    v_alignbit_b32 v15, v18, v15, 16
-; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
-; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v19
+; SI-NEXT:    v_add_f32_e32 v2, 0x40c00000, v2
+; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v17
-; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
-; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
-; SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v11
-; SI-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
-; SI-NEXT:    v_alignbit_b32 v15, v17, v15, 16
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; SI-NEXT:    v_add_f32_e32 v9, 0x40c00000, v9
-; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
+; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
+; SI-NEXT:    v_alignbit_b32 v1, v3, v1, 16
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v11
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v17
+; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v4, 16, v3
+; SI-NEXT:    v_alignbit_b32 v1, v4, v1, 16
+; SI-NEXT:    v_and_b32_e32 v4, 0xffff0000, v9
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v15, 16, v9
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v10
+; SI-NEXT:    v_add_f32_e32 v4, 0x40c00000, v4
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v4
+; SI-NEXT:    v_alignbit_b32 v1, v9, v1, 16
 ; SI-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; SI-NEXT:    v_alignbit_b32 v10, v15, v10, 16
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
-; SI-NEXT:    v_add_f32_e32 v7, 0x40c00000, v7
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v7
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v8
+; SI-NEXT:    v_add_f32_e32 v7, 0x40c00000, v7
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v8, 16, v7
+; SI-NEXT:    v_alignbit_b32 v1, v8, v1, 16
 ; SI-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; SI-NEXT:    v_alignbit_b32 v8, v10, v8, 16
-; SI-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; SI-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
-; SI-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v8, 16, v5
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; SI-NEXT:    v_alignbit_b32 v6, v8, v6, 16
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v6
+; SI-NEXT:    v_add_f32_e32 v5, 0x40c00000, v5
 ; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v6, 16, v1
-; SI-NEXT:    v_alignbit_b32 v3, v6, v3, 16
-; SI-NEXT:    v_and_b32_e32 v6, 0xffff0000, v60
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshrrev_b32_e32 v6, 16, v5
+; SI-NEXT:    v_alignbit_b32 v1, v6, v1, 16
+; SI-NEXT:    v_and_b32_e32 v6, 0xffff0000, v29
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v61
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v34
 ; SI-NEXT:    v_add_f32_e32 v6, 0x40c00000, v6
-; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v8, 16, v6
-; SI-NEXT:    v_alignbit_b32 v3, v8, v3, 16
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v28
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v1, v8, v1, 16
+; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v27
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v24
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v28
 ; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v8
-; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; SI-NEXT:    v_alignbit_b32 v3, v10, v3, 16
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v16
-; SI-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; SI-NEXT:    v_add_f32_e32 v2, 0x40c00000, v2
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_add_f32_e32 v4, 0x40c00000, v4
-; SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
-; SI-NEXT:    v_lshrrev_b32_e32 v15, 16, v10
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v18, v2, v4, 16
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v8
+; SI-NEXT:    v_alignbit_b32 v1, v9, v1, 16
+; SI-NEXT:    v_and_b32_e32 v9, 0xffff0000, v25
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v24
+; SI-NEXT:    v_add_f32_e32 v9, 0x40c00000, v9
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v9
+; SI-NEXT:    v_alignbit_b32 v1, v10, v1, 16
+; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v21
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v10
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v58
-; SI-NEXT:    v_and_b32_e32 v4, 0xffff0000, v59
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v23
 ; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_add_f32_e32 v4, 0x40c00000, v4
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v10
+; SI-NEXT:    v_alignbit_b32 v1, v11, v1, 16
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v26
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v22
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v19, v10, v4, 16
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v20
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v15, 16, v11
+; SI-NEXT:    v_alignbit_b32 v1, v15, v1, 16
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v57
-; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; SI-NEXT:    v_and_b32_e32 v4, 0xffff0000, v8
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v56
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_alignbit_b32 v3, v15, v3, 16
-; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v22
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v18, v12, v1, 16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v21
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v21, v10, v8, 16
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v60
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v11
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v61
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v16, v12, v11, 16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v46
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v47
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v22, v10, v8, 16
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v58
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v59
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v19, v12, v11, 16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v44
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v20
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v20, v10, v8, 16
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v56
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v57
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v20, v12, v11, 16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v29
-; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v30
-; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v15
-; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; SI-NEXT:    v_alignbit_b32 v3, v16, v3, 16
-; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v23
-; SI-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v46
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v47
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v21, v12, v11, 16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v25
-; SI-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
-; SI-NEXT:    v_add_f32_e32 v3, 0x40c00000, v3
-; SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v16
-; SI-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v27, v16, v3, 16
-; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v15
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; SI-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v44
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v30
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v22, v12, v11, 16
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; SI-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; SI-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
+; SI-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; SI-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; SI-NEXT:    v_alignbit_b32 v5, v20, v5, 16
-; SI-NEXT:    v_alignbit_b32 v1, v22, v1, 16
-; SI-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(5)
+; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
+; SI-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v23, v10, v8, 16
-; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT:    v_mov_b32_e32 v55, v18
+; SI-NEXT:    v_alignbit_b32 v1, v18, v1, 16
+; SI-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v17, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(3)
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; SI-NEXT:    s_waitcnt vmcnt(2)
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v23, v12, v11, 16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v7, v23, v7, 16
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
-; SI-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
-; SI-NEXT:    v_lshrrev_b32_e32 v53, 16, v15
-; SI-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshrrev_b32_e32 v42, 16, v16
-; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(5)
+; SI-NEXT:    v_lshrrev_b32_e32 v52, 16, v15
+; SI-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(4)
 ; SI-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; SI-NEXT:    v_add_f32_e32 v17, 0x40c00000, v17
 ; SI-NEXT:    v_lshrrev_b32_e32 v63, 16, v17
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_add_f32_e32 v8, 0x40c00000, v8
-; SI-NEXT:    v_lshrrev_b32_e32 v45, 16, v10
-; SI-NEXT:    v_alignbit_b32 v24, v45, v8, 16
-; SI-NEXT:    v_and_b32_e32 v8, 0xffff0000, v9
-; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v8, v24, v8, 16
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
-; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
-; SI-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
-; SI-NEXT:    v_alignbit_b32 v41, v42, v15, 16
-; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v31
-; SI-NEXT:    v_alignbit_b32 v62, v63, v16, 16
-; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v33
-; SI-NEXT:    v_alignbit_b32 v15, v41, v15, 16
-; SI-NEXT:    v_alignbit_b32 v16, v62, v16, 16
-; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_lshrrev_b32_e32 v45, 16, v12
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_add_f32_e32 v9, 0x40c00000, v9
-; SI-NEXT:    v_lshrrev_b32_e32 v34, 16, v10
-; SI-NEXT:    v_alignbit_b32 v26, v34, v9, 16
-; SI-NEXT:    v_and_b32_e32 v9, 0xffff0000, v11
-; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v9, v26, v9, 16
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_alignbit_b32 v24, v45, v11, 16
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_lshrrev_b32_e32 v54, 16, v12
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
-; SI-NEXT:    v_lshrrev_b32_e32 v36, 16, v11
-; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT:    v_alignbit_b32 v53, v54, v11, 16
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
-; SI-NEXT:    v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT:    v_alignbit_b32 v35, v36, v10, 16
-; SI-NEXT:    v_and_b32_e32 v10, 0xffff0000, v12
-; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v10, v35, v10, 16
-; SI-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
+; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
+; SI-NEXT:    v_lshrrev_b32_e32 v36, 16, v12
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
+; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT:    v_alignbit_b32 v35, v36, v11, 16
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT:    v_alignbit_b32 v2, v35, v2, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_alignbit_b32 v2, v53, v3, 16
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
 ; SI-NEXT:    v_lshrrev_b32_e32 v38, 16, v12
-; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; SI-NEXT:    v_add_f32_e32 v11, 0x40c00000, v11
 ; SI-NEXT:    v_alignbit_b32 v37, v38, v11, 16
 ; SI-NEXT:    v_and_b32_e32 v11, 0xffff0000, v13
-; SI-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
 ; SI-NEXT:    v_alignbit_b32 v11, v37, v11, 16
-; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; SI-NEXT:    v_add_f32_e32 v13, 0x40c00000, v13
 ; SI-NEXT:    v_lshrrev_b32_e32 v48, 16, v13
-; SI-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
 ; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; SI-NEXT:    v_add_f32_e32 v12, 0x40c00000, v12
 ; SI-NEXT:    v_alignbit_b32 v39, v48, v12, 16
 ; SI-NEXT:    v_and_b32_e32 v12, 0xffff0000, v14
-; SI-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
 ; SI-NEXT:    v_alignbit_b32 v12, v39, v12, 16
-; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v12, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; SI-NEXT:    v_add_f32_e32 v14, 0x40c00000, v14
 ; SI-NEXT:    v_lshrrev_b32_e32 v50, 16, v14
-; SI-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
 ; SI-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; SI-NEXT:    v_add_f32_e32 v13, 0x40c00000, v13
 ; SI-NEXT:    v_alignbit_b32 v49, v50, v13, 16
 ; SI-NEXT:    v_and_b32_e32 v13, 0xffff0000, v51
 ; SI-NEXT:    v_alignbit_b32 v13, v49, v13, 16
-; SI-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v13, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; SI-NEXT:    v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT:    v_alignbit_b32 v52, v53, v14, 16
-; SI-NEXT:    v_and_b32_e32 v14, 0xffff0000, v32
-; SI-NEXT:    v_alignbit_b32 v14, v52, v14, 16
-; SI-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v51, v52, v14, 16
+; SI-NEXT:    v_and_b32_e32 v14, 0xffff0000, v31
+; SI-NEXT:    v_mov_b32_e32 v31, v16
+; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT:    v_alignbit_b32 v14, v51, v14, 16
+; SI-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; SI-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
+; SI-NEXT:    v_lshrrev_b32_e32 v42, 16, v16
+; SI-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; SI-NEXT:    v_add_f32_e32 v15, 0x40c00000, v15
+; SI-NEXT:    v_alignbit_b32 v41, v42, v15, 16
+; SI-NEXT:    v_and_b32_e32 v15, 0xffff0000, v32
+; SI-NEXT:    v_alignbit_b32 v15, v41, v15, 16
+; SI-NEXT:    buffer_store_dword v15, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
+; SI-NEXT:    v_add_f32_e32 v16, 0x40c00000, v16
+; SI-NEXT:    v_alignbit_b32 v62, v63, v16, 16
+; SI-NEXT:    v_and_b32_e32 v16, 0xffff0000, v33
+; SI-NEXT:    v_alignbit_b32 v16, v62, v16, 16
+; SI-NEXT:    buffer_store_dword v16, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v24, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    v_alignbit_b32 v1, v21, v6, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v2, v24, v4, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v23, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    v_alignbit_b32 v1, v19, v4, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v2, v23, v7, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v22, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    v_alignbit_b32 v1, v18, v2, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v2, v22, v5, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v21, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(1)
+; SI-NEXT:    v_alignbit_b32 v2, v21, v6, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v20, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(1)
+; SI-NEXT:    v_alignbit_b32 v2, v20, v8, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v19, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(1)
+; SI-NEXT:    v_alignbit_b32 v2, v19, v9, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_alignbit_b32 v1, v27, v3, 16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v2, v31, v10, 16
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
 ; SI-NEXT:  .LBB104_4: ; %end
 ; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237124,8 +237137,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237140,8 +237153,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 12, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237150,14 +237163,14 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 16, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v52
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v53
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v51
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v52
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 20, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237172,8 +237185,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 28, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237188,8 +237201,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 36, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237204,8 +237217,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 44, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237220,8 +237233,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 52, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237230,14 +237243,14 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 56, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v26
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v34
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v53
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v54
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 60, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237246,7 +237259,7 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 64, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v45
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
@@ -237254,8 +237267,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x44, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237264,8 +237277,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x48, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237274,8 +237287,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x4c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237284,8 +237297,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x50, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237294,8 +237307,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x54, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237304,8 +237317,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x58, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237314,8 +237327,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x5c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237324,8 +237337,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x60, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237334,8 +237347,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x64, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237344,8 +237357,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x68, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237354,8 +237367,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x6c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237363,19 +237376,17 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x70, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v31
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x74, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -237383,9 +237394,9 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x78, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v27
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v55
 ; SI-NEXT:    v_add_i32_e32 v0, vcc, 0x7c, v0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
@@ -242274,10 +242285,10 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
 ; GFX11-NEXT:    v_add_f32_e32 v4, 0x40c00000, v6
 ; GFX11-NEXT:    v_and_b32_e32 v6, 0xffff0000, v29
 ; GFX11-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v29
 ; GFX11-NEXT:    v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_dual_cndmask_b32 v28, v1, v3 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-NEXT:    v_dual_cndmask_b32 v28, v1, v3 :: v_dual_lshlrev_b32 v5, 16, v29
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_add_f32_e32 v3, 0x40c00000, v6
 ; GFX11-NEXT:    v_add_nc_u32_e32 v0, v0, v2
 ; GFX11-NEXT:    v_bfe_u32 v1, v4, 16, 1
 ; GFX11-NEXT:    v_or_b32_e32 v6, 0x400000, v2
@@ -242299,11 +242310,11 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v29, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v30
 ; GFX11-NEXT:    v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT:    v_add_f32_e32 v2, 0x40c00000, v7
 ; GFX11-NEXT:    v_or_b32_e32 v1, 0x400000, v3
+; GFX11-NEXT:    v_add_f32_e32 v2, 0x40c00000, v7
 ; GFX11-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
 ; GFX11-NEXT:    v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v52, v0, v1 :: v_dual_add_nc_u32 v1, 0x7fff, v4
 ; GFX11-NEXT:    v_bfe_u32 v0, v2, 16, 1
 ; GFX11-NEXT:    v_add_f32_e32 v4, 0x40c00000, v6
@@ -245085,659 +245096,663 @@ define <64 x i16> @bitcast_v64f16_to_v64i16(<64 x half> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
 ; SI-NEXT:    buffer_store_dword v63, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
 ; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:136
-; SI-NEXT:    buffer_load_dword v35, off, s[0:3], s32
-; SI-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:4
-; SI-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:8
-; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:12
-; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:16
-; SI-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:20
-; SI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:24
-; SI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:28
-; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:32
-; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:36
-; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:40
-; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:44
+; SI-NEXT:    buffer_load_dword v39, off, s[0:3], s32
+; SI-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:4
+; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:8
+; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:12
+; SI-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:16
+; SI-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:20
+; SI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:24
+; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:28
+; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:32
+; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:36
+; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:40
 ; SI-NEXT:    s_waitcnt expcnt(6)
-; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:48
+; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:44
 ; SI-NEXT:    s_waitcnt expcnt(5)
-; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:52
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:48
 ; SI-NEXT:    s_waitcnt expcnt(4)
-; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:56
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:52
 ; SI-NEXT:    s_waitcnt expcnt(3)
-; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:60
+; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:56
 ; SI-NEXT:    s_waitcnt expcnt(2)
-; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:64
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:60
 ; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:68
-; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:72
-; SI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:76
+; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:64
+; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:68
+; SI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:72
+; SI-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:76
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:80
 ; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:84
 ; SI-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:92
+; SI-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:88
+; SI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:100
+; SI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:96
+; SI-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:108
+; SI-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:104
+; SI-NEXT:    v_cvt_f16_f32_e32 v55, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v17
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v9
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v14
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v43, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v44, v4
+; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v5
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v18
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v18
-; SI-NEXT:    v_cvt_f16_f32_e32 v55, v7
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v5
+; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v22
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT:    v_cvt_f16_f32_e32 v40, v8
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v10
-; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v22
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v6
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v30
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v21
-; SI-NEXT:    v_cvt_f16_f32_e32 v43, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v48, v16
-; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v17
-; SI-NEXT:    v_cvt_f16_f32_e32 v44, v4
-; SI-NEXT:    v_cvt_f16_f32_e32 v52, v11
-; SI-NEXT:    v_cvt_f16_f32_e32 v53, v12
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v13
-; SI-NEXT:    v_cvt_f16_f32_e32 v39, v15
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v13, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v9
+; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v30
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v12
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v13
+; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v19
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
 ; SI-NEXT:    v_cvt_f16_f32_e32 v19, v23
-; SI-NEXT:    v_cvt_f16_f32_e32 v20, v24
 ; SI-NEXT:    v_cvt_f16_f32_e32 v23, v25
-; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
+; SI-NEXT:    v_cvt_f16_f32_e32 v40, v8
+; SI-NEXT:    v_cvt_f16_f32_e32 v8, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v11
+; SI-NEXT:    v_cvt_f16_f32_e32 v10, v14
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v15
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v11, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v20, v24
+; SI-NEXT:    v_cvt_f16_f32_e32 v24, v26
 ; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v31
+; SI-NEXT:    v_cvt_f16_f32_e32 v25, v39
 ; SI-NEXT:    v_cvt_f16_f32_e32 v31, v27
-; SI-NEXT:    v_cvt_f16_f32_e32 v27, v29
-; SI-NEXT:    v_cvt_f16_f32_e32 v24, v35
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v42
-; SI-NEXT:    v_cvt_f16_f32_e32 v21, v47
-; SI-NEXT:    v_cvt_f16_f32_e32 v25, v50
-; SI-NEXT:    v_cvt_f16_f32_e32 v29, v51
-; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:88
-; SI-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:100
-; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:96
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:108
-; SI-NEXT:    buffer_load_dword v30, off, s[0:3], s32 offset:104
-; SI-NEXT:    v_cvt_f16_f32_e32 v47, v54
-; SI-NEXT:    v_cvt_f16_f32_e32 v54, v46
-; SI-NEXT:    v_cvt_f16_f32_e32 v42, v56
-; SI-NEXT:    v_cvt_f16_f32_e32 v16, v41
+; SI-NEXT:    v_cvt_f16_f32_e32 v27, v28
+; SI-NEXT:    v_cvt_f16_f32_e32 v28, v29
+; SI-NEXT:    v_cvt_f16_f32_e32 v26, v51
+; SI-NEXT:    v_cvt_f16_f32_e32 v29, v42
+; SI-NEXT:    v_cvt_f16_f32_e32 v42, v47
+; SI-NEXT:    v_cvt_f16_f32_e32 v21, v38
+; SI-NEXT:    v_cvt_f16_f32_e32 v22, v50
+; SI-NEXT:    v_cvt_f16_f32_e32 v38, v54
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v56
+; SI-NEXT:    v_cvt_f16_f32_e32 v47, v41
 ; SI-NEXT:    v_cvt_f16_f32_e32 v17, v45
-; SI-NEXT:    v_cvt_f16_f32_e32 v22, v38
-; SI-NEXT:    v_cvt_f16_f32_e32 v38, v49
-; SI-NEXT:    v_cvt_f16_f32_e32 v11, v57
-; SI-NEXT:    v_cvt_f16_f32_e32 v12, v58
+; SI-NEXT:    v_cvt_f16_f32_e32 v12, v59
+; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v46
+; SI-NEXT:    v_cvt_f16_f32_e32 v46, v57
+; SI-NEXT:    v_cvt_f16_f32_e32 v41, v58
+; SI-NEXT:    v_cvt_f16_f32_e32 v50, v60
+; SI-NEXT:    v_cvt_f16_f32_e32 v30, v34
+; SI-NEXT:    v_cvt_f16_f32_e32 v51, v61
+; SI-NEXT:    v_cvt_f16_f32_e32 v34, v36
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:112
+; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:116
+; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:120
 ; SI-NEXT:    s_waitcnt vmcnt(14)
-; SI-NEXT:    v_cvt_f16_f32_e32 v56, v32
-; SI-NEXT:    v_cvt_f16_f32_e32 v49, v59
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v60
-; SI-NEXT:    s_waitcnt vmcnt(13)
-; SI-NEXT:    v_cvt_f16_f32_e32 v14, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v58, v61
-; SI-NEXT:    v_cvt_f16_f32_e32 v61, v62
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v33
-; SI-NEXT:    buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:112
-; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:116
-; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:120
+; SI-NEXT:    v_cvt_f16_f32_e32 v39, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v57, v62
+; SI-NEXT:    v_cvt_f16_f32_e32 v13, v35
+; SI-NEXT:    v_cvt_f16_f32_e32 v58, v32
+; SI-NEXT:    v_cvt_f16_f32_e32 v61, v33
 ; SI-NEXT:    v_cvt_f16_f32_e32 v33, v63
-; SI-NEXT:    s_waitcnt vmcnt(14)
-; SI-NEXT:    v_cvt_f16_f32_e32 v50, v37
-; SI-NEXT:    s_waitcnt vmcnt(8)
-; SI-NEXT:    v_cvt_f16_f32_e32 v45, v6
-; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v7
-; SI-NEXT:    s_waitcnt vmcnt(6) expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v14, v8
-; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_cvt_f16_f32_e32 v41, v30
-; SI-NEXT:    v_cvt_f16_f32_e32 v30, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v45, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v14, v49
+; SI-NEXT:    s_waitcnt vmcnt(12)
+; SI-NEXT:    v_cvt_f16_f32_e32 v54, v53
+; SI-NEXT:    v_cvt_f16_f32_e32 v32, v52
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_cvt_f16_f32_e32 v7, v32
+; SI-NEXT:    v_cvt_f16_f32_e32 v35, v7
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f16_f32_e32 v8, v36
-; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:124
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:128
-; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:132
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_cvt_f16_f32_e32 v46, v46
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v9
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v59, v36
+; SI-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:124
+; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:128
+; SI-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:132
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_cvt_f16_f32_e32 v32, v6
+; SI-NEXT:    v_cvt_f16_f32_e32 v56, v9
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v10, v36
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v37
 ; SI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; SI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
 ; SI-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v59, v29
-; SI-NEXT:    v_mov_b32_e32 v29, v27
-; SI-NEXT:    v_mov_b32_e32 v57, v23
-; SI-NEXT:    v_mov_b32_e32 v60, v2
-; SI-NEXT:    v_mov_b32_e32 v62, v3
-; SI-NEXT:    v_mov_b32_e32 v63, v4
+; SI-NEXT:    v_mov_b32_e32 v60, v29
+; SI-NEXT:    v_mov_b32_e32 v29, v28
+; SI-NEXT:    v_mov_b32_e32 v62, v23
+; SI-NEXT:    v_mov_b32_e32 v63, v11
 ; SI-NEXT:    s_xor_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB108_2
 ; SI-NEXT:  ; %bb.1: ; %cmp.true
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT:    v_cvt_f32_f16_e32 v36, v36
 ; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
-; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT:    v_cvt_f32_f16_e32 v35, v35
 ; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
-; SI-NEXT:    v_add_f32_e32 v10, 0x38000000, v10
-; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
-; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT:    v_cvt_f32_f16_e32 v34, v34
+; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT:    v_cvt_f32_f16_e32 v33, v33
+; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
 ; SI-NEXT:    v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
+; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
 ; SI-NEXT:    v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
 ; SI-NEXT:    v_add_f32_e32 v14, 0x38000000, v14
+; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
 ; SI-NEXT:    v_cvt_f16_f32_e32 v14, v14
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v10
-; SI-NEXT:    v_or_b32_e32 v6, v6, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v8
-; SI-NEXT:    v_or_b32_e32 v7, v7, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v15
-; SI-NEXT:    v_or_b32_e32 v14, v14, v36
-; SI-NEXT:    v_cvt_f32_f16_e32 v33, v33
-; SI-NEXT:    v_cvt_f32_f16_e32 v4, v61
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v58
-; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
-; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
-; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
+; SI-NEXT:    v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
 ; SI-NEXT:    v_add_f32_e32 v33, 0x38000000, v33
-; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT:    v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v33, v33
-; SI-NEXT:    v_cvt_f16_f32_e32 v61, v4
-; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT:    v_or_b32_e32 v36, v36, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v7
+; SI-NEXT:    v_or_b32_e32 v35, v35, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v14
+; SI-NEXT:    v_or_b32_e32 v13, v13, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v34
+; SI-NEXT:    v_or_b32_e32 v33, v33, v37
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v58
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v57
+; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT:    v_cvt_f32_f16_e32 v11, v41
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v58, v37
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v48
 ; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT:    v_cvt_f32_f16_e32 v28, v28
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
 ; SI-NEXT:    v_cvt_f16_f32_e32 v12, v12
 ; SI-NEXT:    v_add_f32_e32 v11, 0x38000000, v11
-; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT:    v_cvt_f16_f32_e32 v11, v11
+; SI-NEXT:    v_lshlrev_b32_e32 v48, 16, v58
+; SI-NEXT:    v_or_b32_e32 v57, v37, v48
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v12
+; SI-NEXT:    v_or_b32_e32 v41, v11, v37
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT:    v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT:    v_cvt_f32_f16_e32 v26, v26
+; SI-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT:    v_cvt_f32_f16_e32 v27, v27
+; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
 ; SI-NEXT:    v_cvt_f32_f16_e32 v31, v31
 ; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT:    v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
 ; SI-NEXT:    v_add_f32_e32 v22, 0x38000000, v22
 ; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
-; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
-; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
 ; SI-NEXT:    v_cvt_f16_f32_e32 v22, v22
 ; SI-NEXT:    v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT:    v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v26
+; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
 ; SI-NEXT:    v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
+; SI-NEXT:    v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
 ; SI-NEXT:    v_cvt_f16_f32_e32 v25, v25
-; SI-NEXT:    v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v28
-; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
+; SI-NEXT:    v_cvt_f16_f32_e32 v27, v27
 ; SI-NEXT:    v_add_f32_e32 v31, 0x38000000, v31
 ; SI-NEXT:    v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v18
 ; SI-NEXT:    v_cvt_f16_f32_e32 v31, v31
 ; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
 ; SI-NEXT:    v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
+; SI-NEXT:    v_or_b32_e32 v17, v17, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v22
 ; SI-NEXT:    v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v39
-; SI-NEXT:    v_cvt_f32_f16_e32 v34, v34
-; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
-; SI-NEXT:    v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
+; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT:    v_or_b32_e32 v21, v21, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v26
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
 ; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
+; SI-NEXT:    v_or_b32_e32 v25, v25, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v27
 ; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v2
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT:    v_or_b32_e32 v33, v33, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v61
-; SI-NEXT:    v_or_b32_e32 v58, v3, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v12
-; SI-NEXT:    v_or_b32_e32 v11, v11, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v17
-; SI-NEXT:    v_or_b32_e32 v16, v16, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v22
-; SI-NEXT:    v_or_b32_e32 v21, v21, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v25
-; SI-NEXT:    v_or_b32_e32 v24, v24, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v28
-; SI-NEXT:    v_or_b32_e32 v31, v31, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v20
-; SI-NEXT:    v_or_b32_e32 v19, v19, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v13
-; SI-NEXT:    v_or_b32_e32 v18, v18, v36
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v48
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v48, v36
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v37
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v9, 16, v9
-; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
 ; SI-NEXT:    v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT:    v_or_b32_e32 v39, v36, v37
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v53
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v52
+; SI-NEXT:    v_or_b32_e32 v31, v31, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v20
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT:    v_cvt_f32_f16_e32 v26, v26
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v53, v36
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v37
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v53
-; SI-NEXT:    v_cvt_f32_f16_e32 v57, v57
-; SI-NEXT:    v_or_b32_e32 v52, v36, v37
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v40
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v55
-; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v26
-; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v40, v36
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v37
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v26
-; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v40
-; SI-NEXT:    v_cvt_f32_f16_e32 v47, v47
-; SI-NEXT:    v_or_b32_e32 v55, v36, v37
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v44
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v43
-; SI-NEXT:    v_cvt_f32_f16_e32 v42, v42
-; SI-NEXT:    v_cvt_f32_f16_e32 v51, v51
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v44, v36
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v37
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v35
-; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v44
-; SI-NEXT:    v_cvt_f32_f16_e32 v50, v50
-; SI-NEXT:    v_or_b32_e32 v43, v36, v37
+; SI-NEXT:    v_or_b32_e32 v19, v19, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v15
+; SI-NEXT:    v_or_b32_e32 v16, v16, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v3
+; SI-NEXT:    v_or_b32_e32 v2, v2, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v6
+; SI-NEXT:    v_or_b32_e32 v5, v5, v37
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v40
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v55
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v40, v37
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v48, 16, v40
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_or_b32_e32 v55, v37, v48
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v44
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v43
+; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v44, v37
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v48, 16, v44
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT:    v_or_b32_e32 v43, v37, v48
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v11
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT:    v_add_f32_e32 v10, 0x38000000, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
+; SI-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v52, v62
+; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
+; SI-NEXT:    v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT:    v_cvt_f32_f16_e32 v28, v42
 ; SI-NEXT:    v_cvt_f32_f16_e32 v30, v30
+; SI-NEXT:    v_cvt_f32_f16_e32 v39, v39
+; SI-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
+; SI-NEXT:    v_cvt_f32_f16_e32 v42, v45
+; SI-NEXT:    v_add_f32_e32 v30, 0x38000000, v30
+; SI-NEXT:    v_cvt_f16_f32_e32 v30, v30
+; SI-NEXT:    v_lshlrev_b32_e32 v28, 16, v28
+; SI-NEXT:    v_add_f32_e32 v39, 0x38000000, v39
+; SI-NEXT:    v_cvt_f16_f32_e32 v39, v39
+; SI-NEXT:    v_add_f32_e32 v42, 0x38000000, v42
+; SI-NEXT:    v_cvt_f16_f32_e32 v42, v42
+; SI-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
 ; SI-NEXT:    v_cvt_f32_f16_e32 v32, v32
+; SI-NEXT:    v_cvt_f32_f16_e32 v51, v51
+; SI-NEXT:    v_lshlrev_b32_e32 v39, 16, v39
 ; SI-NEXT:    v_cvt_f32_f16_e32 v29, v29
 ; SI-NEXT:    v_cvt_f32_f16_e32 v38, v38
-; SI-NEXT:    v_cvt_f32_f16_e32 v54, v54
-; SI-NEXT:    v_cvt_f32_f16_e32 v49, v49
-; SI-NEXT:    v_cvt_f32_f16_e32 v45, v45
-; SI-NEXT:    v_cvt_f32_f16_e32 v41, v41
-; SI-NEXT:    v_cvt_f32_f16_e32 v46, v46
-; SI-NEXT:    v_add_f32_e32 v47, 0x38000000, v47
-; SI-NEXT:    v_add_f32_e32 v42, 0x38000000, v42
-; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v51
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
-; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v2
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT:    v_add_f32_e32 v50, 0x38000000, v50
-; SI-NEXT:    v_add_f32_e32 v30, 0x38000000, v30
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
+; SI-NEXT:    v_cvt_f32_f16_e32 v50, v50
+; SI-NEXT:    v_or_b32_e32 v45, v42, v39
+; SI-NEXT:    v_cvt_f32_f16_e32 v42, v59
 ; SI-NEXT:    v_add_f32_e32 v32, 0x38000000, v32
+; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v51
+; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
 ; SI-NEXT:    v_add_f32_e32 v29, 0x38000000, v29
-; SI-NEXT:    v_cvt_f16_f32_e32 v47, v47
 ; SI-NEXT:    v_add_f32_e32 v38, 0x38000000, v38
-; SI-NEXT:    v_cvt_f16_f32_e32 v42, v42
-; SI-NEXT:    v_add_f32_e32 v54, 0x38000000, v54
 ; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
-; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v50, v50
-; SI-NEXT:    v_add_f32_e32 v45, 0x38000000, v45
-; SI-NEXT:    v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT:    v_add_f32_e32 v41, 0x38000000, v41
-; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
-; SI-NEXT:    v_add_f32_e32 v46, 0x38000000, v46
+; SI-NEXT:    v_add_f32_e32 v50, 0x38000000, v50
 ; SI-NEXT:    v_cvt_f16_f32_e32 v29, v29
 ; SI-NEXT:    v_cvt_f16_f32_e32 v38, v38
-; SI-NEXT:    v_cvt_f16_f32_e32 v54, v54
-; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
-; SI-NEXT:    v_cvt_f16_f32_e32 v45, v45
-; SI-NEXT:    v_cvt_f16_f32_e32 v41, v41
-; SI-NEXT:    v_cvt_f16_f32_e32 v46, v46
-; SI-NEXT:    v_lshlrev_b32_e32 v47, 16, v47
-; SI-NEXT:    v_lshlrev_b32_e32 v42, 16, v42
-; SI-NEXT:    v_lshlrev_b32_e32 v51, 16, v51
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
-; SI-NEXT:    v_lshlrev_b32_e32 v50, 16, v50
-; SI-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
+; SI-NEXT:    v_cvt_f16_f32_e32 v50, v50
 ; SI-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
-; SI-NEXT:    v_or_b32_e32 v38, v38, v47
-; SI-NEXT:    v_or_b32_e32 v54, v54, v42
-; SI-NEXT:    v_or_b32_e32 v49, v49, v51
-; SI-NEXT:    v_or_b32_e32 v45, v45, v50
-; SI-NEXT:    v_or_b32_e32 v41, v41, v30
-; SI-NEXT:    v_or_b32_e32 v46, v46, v32
-; SI-NEXT:    v_alignbit_b32 v47, v16, v47, 16
-; SI-NEXT:    v_alignbit_b32 v42, v11, v42, 16
-; SI-NEXT:    v_alignbit_b32 v51, v58, v51, 16
-; SI-NEXT:    v_alignbit_b32 v50, v14, v50, 16
-; SI-NEXT:    v_alignbit_b32 v30, v7, v30, 16
-; SI-NEXT:    v_alignbit_b32 v32, v6, v32, 16
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v2
-; SI-NEXT:    v_or_b32_e32 v2, v36, v34
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT:    v_alignbit_b32 v34, v43, v34, 16
-; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
-; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v51, 16, v51
+; SI-NEXT:    v_alignbit_b32 v39, v13, v39, 16
+; SI-NEXT:    v_or_b32_e32 v50, v50, v51
+; SI-NEXT:    v_alignbit_b32 v51, v57, v51, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v11
+; SI-NEXT:    v_or_b32_e32 v11, v37, v1
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v1, v43, v1, 16
+; SI-NEXT:    v_add_f32_e32 v48, 0x38000000, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v48, v48
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_or_b32_e32 v2, v37, v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v63
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    v_or_b32_e32 v11, v48, v4
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v1, v55, v1, 16
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT:    v_alignbit_b32 v4, v55, v4, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v11
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
 ; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
-; SI-NEXT:    v_or_b32_e32 v63, v37, v9
-; SI-NEXT:    v_cvt_f32_f16_e32 v37, v3
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v9, v39, v9, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v11
+; SI-NEXT:    v_or_b32_e32 v11, v37, v8
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT:    v_add_f32_e32 v48, 0x38000000, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v48, v48
+; SI-NEXT:    v_alignbit_b32 v8, v5, v8, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v11
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
 ; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
 ; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v37
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v23, v3
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v49, v11
+; SI-NEXT:    v_or_b32_e32 v11, v48, v10
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    v_alignbit_b32 v10, v2, v10, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v11
+; SI-NEXT:    v_or_b32_e32 v11, v49, v37
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT:    v_cvt_f32_f16_e32 v49, v63
+; SI-NEXT:    v_add_f32_e32 v48, 0x38000000, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v48, v48
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    v_lshlrev_b32_e32 v48, 16, v48
+; SI-NEXT:    v_or_b32_e32 v63, v49, v48
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v52
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    v_cvt_f32_f16_e32 v52, v47
+; SI-NEXT:    v_or_b32_e32 v62, v49, v24
+; SI-NEXT:    v_cvt_f32_f16_e32 v49, v60
+; SI-NEXT:    v_add_f32_e32 v52, 0x38000000, v52
+; SI-NEXT:    v_cvt_f16_f32_e32 v52, v52
+; SI-NEXT:    v_alignbit_b32 v24, v31, v24, 16
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    v_or_b32_e32 v60, v49, v28
+; SI-NEXT:    v_lshlrev_b32_e32 v49, 16, v52
+; SI-NEXT:    v_cvt_f32_f16_e32 v52, v46
+; SI-NEXT:    v_or_b32_e32 v38, v38, v49
+; SI-NEXT:    v_add_f32_e32 v52, 0x38000000, v52
+; SI-NEXT:    v_cvt_f16_f32_e32 v52, v52
+; SI-NEXT:    v_lshlrev_b32_e32 v52, 16, v52
+; SI-NEXT:    v_alignbit_b32 v46, v41, v52, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v23, v11
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_f32_e32 v23, 0x38000000, v23
 ; SI-NEXT:    v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
 ; SI-NEXT:    v_lshlrev_b32_e32 v23, 16, v23
 ; SI-NEXT:    v_or_b32_e32 v29, v29, v23
-; SI-NEXT:    v_or_b32_e32 v2, v36, v5
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT:    v_alignbit_b32 v5, v52, v5, 16
-; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_cvt_f32_f16_e32 v27, v3
-; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v27
-; SI-NEXT:    v_cvt_f16_f32_e32 v27, v27
-; SI-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v36, v2
-; SI-NEXT:    v_mov_b32_e32 v2, v56
-; SI-NEXT:    v_cvt_f32_f16_e32 v56, v62
-; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT:    v_add_f32_e32 v56, 0x38000000, v56
-; SI-NEXT:    v_cvt_f16_f32_e32 v56, v56
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v36
-; SI-NEXT:    v_or_b32_e32 v62, v56, v36
-; SI-NEXT:    v_cvt_f32_f16_e32 v56, v60
-; SI-NEXT:    v_add_f32_e32 v56, 0x38000000, v56
-; SI-NEXT:    v_cvt_f16_f32_e32 v56, v56
-; SI-NEXT:    v_or_b32_e32 v60, v56, v37
-; SI-NEXT:    v_add_f32_e32 v56, 0x38000000, v57
-; SI-NEXT:    v_cvt_f16_f32_e32 v56, v56
-; SI-NEXT:    v_or_b32_e32 v57, v56, v26
-; SI-NEXT:    v_cvt_f32_f16_e32 v56, v59
-; SI-NEXT:    v_alignbit_b32 v26, v31, v26, 16
-; SI-NEXT:    v_add_f32_e32 v56, 0x38000000, v56
-; SI-NEXT:    v_cvt_f16_f32_e32 v56, v56
-; SI-NEXT:    v_or_b32_e32 v59, v56, v27
-; SI-NEXT:    v_cvt_f32_f16_e32 v56, v2
-; SI-NEXT:    v_alignbit_b32 v2, v18, v36, 16
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f32_f16_e32 v53, v11
+; SI-NEXT:    v_add_f32_e32 v53, 0x38000000, v53
+; SI-NEXT:    v_cvt_f16_f32_e32 v53, v53
+; SI-NEXT:    v_or_b32_e32 v11, v53, v52
+; SI-NEXT:    v_cvt_f32_f16_e32 v53, v61
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_alignbit_b32 v2, v19, v37, 16
-; SI-NEXT:    v_add_f32_e32 v56, 0x38000000, v56
-; SI-NEXT:    v_cvt_f16_f32_e32 v56, v56
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v11, v16, v37, 16
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v53, 0x38000000, v53
+; SI-NEXT:    v_cvt_f16_f32_e32 v53, v53
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_alignbit_b32 v2, v24, v23, 16
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT:    v_or_b32_e32 v56, v56, v35
+; SI-NEXT:    v_alignbit_b32 v11, v19, v48, 16
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_alignbit_b32 v2, v21, v27, 16
-; SI-NEXT:    v_alignbit_b32 v35, v33, v35, 16
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v11, v25, v23, 16
+; SI-NEXT:    v_or_b32_e32 v61, v53, v30
+; SI-NEXT:    v_cvt_f32_f16_e32 v53, v54
+; SI-NEXT:    v_cvt_f32_f16_e32 v54, v56
+; SI-NEXT:    v_alignbit_b32 v30, v33, v30, 16
+; SI-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v53, 0x38000000, v53
+; SI-NEXT:    v_add_f32_e32 v54, 0x38000000, v54
+; SI-NEXT:    v_cvt_f16_f32_e32 v53, v53
+; SI-NEXT:    v_cvt_f16_f32_e32 v47, v54
+; SI-NEXT:    v_add_f32_e32 v54, 0x38000000, v42
+; SI-NEXT:    v_cvt_f16_f32_e32 v42, v54
+; SI-NEXT:    v_or_b32_e32 v54, v53, v32
+; SI-NEXT:    v_lshlrev_b32_e32 v53, 16, v47
+; SI-NEXT:    v_alignbit_b32 v47, v17, v49, 16
+; SI-NEXT:    v_or_b32_e32 v59, v42, v53
+; SI-NEXT:    v_alignbit_b32 v42, v21, v28, 16
+; SI-NEXT:    v_alignbit_b32 v32, v35, v32, 16
+; SI-NEXT:    v_alignbit_b32 v56, v36, v53, 16
 ; SI-NEXT:  .LBB108_2: ; %end
 ; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
+; SI-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v61
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v36, 0xffff, v2
-; SI-NEXT:    v_or_b32_e32 v34, v36, v34
-; SI-NEXT:    buffer_store_dword v34, v0, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v37, 0xffff, v11
+; SI-NEXT:    v_or_b32_e32 v1, v37, v1
+; SI-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v34, 0xffff, v43
-; SI-NEXT:    v_lshlrev_b32_e32 v36, 16, v44
-; SI-NEXT:    v_or_b32_e32 v34, v34, v36
-; SI-NEXT:    v_add_i32_e32 v36, vcc, 4, v0
-; SI-NEXT:    buffer_store_dword v34, v36, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v34, 0xffff, v2
-; SI-NEXT:    v_or_b32_e32 v1, v34, v1
-; SI-NEXT:    v_add_i32_e32 v34, vcc, 8, v0
-; SI-NEXT:    buffer_store_dword v1, v34, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v43
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v44
+; SI-NEXT:    v_or_b32_e32 v1, v1, v37
+; SI-NEXT:    v_add_i32_e32 v37, vcc, 4, v0
+; SI-NEXT:    buffer_store_dword v1, v37, s[0:3], 0 offen
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_add_i32_e32 v4, vcc, 8, v0
+; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v55
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v40
-; SI-NEXT:    v_or_b32_e32 v1, v1, v34
-; SI-NEXT:    v_add_i32_e32 v34, vcc, 12, v0
-; SI-NEXT:    buffer_store_dword v1, v34, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v40
+; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_add_i32_e32 v4, vcc, 12, v0
+; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v8
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 16, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_add_i32_e32 v4, vcc, 16, v0
+; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v52
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v53
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 20, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v5
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
+; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_add_i32_e32 v4, vcc, 20, v0
+; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v63
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v9
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 24, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v10
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_add_i32_e32 v4, vcc, 24, v0
+; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v39
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v48
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 28, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 28, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v62
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 32, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 32, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v18
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v13
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 36, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v16
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v15
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 36, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v60
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v63
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 40, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 40, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v19
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v20
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 44, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v20
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 44, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v57
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v26
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 48, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v62
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v24
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 48, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v31
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v28
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 52, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v27
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 52, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v29
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 56, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 56, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v24
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v25
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 60, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v25
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v26
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 60, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v59
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 64, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v60
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v42
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 64, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v21
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v22
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x44, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v22
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x44, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v38
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v47
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x48, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v47
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x48, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v16
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v17
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x4c, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v17
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v18
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x4c, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v54
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v42
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x50, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v46
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x50, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v11
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v12
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x54, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v41
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v12
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x54, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v49
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v51
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x58, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v50
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v51
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x58, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v58
-; SI-NEXT:    v_or_b32_e32 v1, v1, v3
-; SI-NEXT:    v_add_i32_e32 v3, vcc, 0x5c, v0
-; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v57
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v58
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x5c, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v56
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v35
-; SI-NEXT:    v_or_b32_e32 v1, v1, v3
-; SI-NEXT:    v_add_i32_e32 v3, vcc, 0x60, v0
-; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v61
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v30
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x60, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v33
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v34
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x64, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v45
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v50
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v39
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x68, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v14
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v15
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v13
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v14
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x6c, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v41
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v30
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v54
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v32
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x70, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v7
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v8
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v35
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v7
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x74, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v46
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v32
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v59
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v56
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x78, v0
 ; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v6
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v10
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v36
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v9
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v0, vcc, 0x7c, v0
 ; SI-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index 81d4c9f2c82e6..178718a338432 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -7476,11 +7476,10 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v22, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB27_3
 ; GFX11-TRUE16-NEXT:  .LBB27_2: ; %cmp.true
@@ -7586,11 +7585,10 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s5, s5, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s5, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v9
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB27_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB27_4:
@@ -7664,7 +7662,7 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v6, v13
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v22, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v20
 ; GFX11-FAKE16-NEXT:    s_or_b32 s9, s9, s10
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s28, 0xff
@@ -7677,9 +7675,9 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11-FAKE16-NEXT:    s_or_b32 s10, s10, s11
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v7, v22, v23
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
@@ -14856,11 +14854,10 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v22, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB51_3
 ; GFX11-TRUE16-NEXT:  .LBB51_2: ; %cmp.true
@@ -14966,11 +14963,10 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s5, s5, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s5, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v9
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB51_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB51_4:
@@ -15044,7 +15040,7 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v6, v13
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v22, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v20
 ; GFX11-FAKE16-NEXT:    s_or_b32 s9, s9, s10
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s28, 0xff
@@ -15057,9 +15053,9 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11-FAKE16-NEXT:    s_or_b32 s10, s10, s11
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v7, v22, v23
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
@@ -21743,11 +21739,10 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v22, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB71_3
 ; GFX11-TRUE16-NEXT:  .LBB71_2: ; %cmp.true
@@ -21853,11 +21848,10 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s5, s5, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s5, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v9
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB71_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB71_4:
@@ -21931,7 +21925,7 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v6, v13
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v22, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v20
 ; GFX11-FAKE16-NEXT:    s_or_b32 s9, s9, s10
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s28, 0xff
@@ -21944,9 +21938,9 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11-FAKE16-NEXT:    s_or_b32 s10, s10, s11
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v7, v22, v23
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
@@ -28128,11 +28122,10 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v22, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB87_3
 ; GFX11-TRUE16-NEXT:  .LBB87_2: ; %cmp.true
@@ -28238,11 +28231,10 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
 ; GFX11-TRUE16-NEXT:    s_and_b32 s5, s5, 0xffff
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v4, s5, v0
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v9
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB87_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB87_4:
@@ -28316,7 +28308,7 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v6, v13
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v22, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v20
 ; GFX11-FAKE16-NEXT:    s_or_b32 s9, s9, s10
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s28, 0xff
@@ -28329,9 +28321,9 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11-FAKE16-NEXT:    s_or_b32 s10, s10, s11
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v6, v3, v7
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v7, v22, v23
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v4, s10, v0
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
@@ -34188,9 +34180,9 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v23, v24
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v23, 0xffff, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v0, 16, v1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v3, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v14, 16, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
@@ -34290,9 +34282,8 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v3, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v9, 16, v7
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v2, 16, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB99_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB99_4:
@@ -34460,12 +34451,11 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v4, v6, 16, v7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v7, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB99_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB99_4:
@@ -39390,9 +39380,9 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v23, v24
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v23, 0xffff, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v0, 16, v1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v3, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v14, 16, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
@@ -39492,9 +39482,8 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v3, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v9, 16, v7
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v2, 16, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB107_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB107_4:
@@ -39662,12 +39651,11 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v4, v6, 16, v7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v7, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB107_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB107_4:
@@ -43740,9 +43728,9 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v23, v24
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v23, 0xffff, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v0, 16, v1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v3, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v14, 16, v23
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
@@ -43842,9 +43830,8 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v3, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v9, 16, v7
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v2, 16, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB111_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB111_4:
@@ -44012,12 +43999,11 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v4, v6, 16, v7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v7, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB111_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB111_4:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index 35ab38c67b1ec..d966d136d75b6 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -6571,12 +6571,11 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v7, v33
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v34, v8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v32
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB15_3
 ; GFX11-TRUE16-NEXT:  .LBB15_2: ; %cmp.true
@@ -6706,13 +6705,12 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v9, 0xffff, v9
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v0, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v11
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v12, v13
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB15_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB15_4:
@@ -14058,12 +14056,11 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v7, v33
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v34, v8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v32
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB35_3
 ; GFX11-TRUE16-NEXT:  .LBB35_2: ; %cmp.true
@@ -14193,13 +14190,12 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v9, 0xffff, v9
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v0, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v11
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v12, v13
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB35_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB35_4:
@@ -21124,12 +21120,11 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v13, 0xffff, v8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v7, 16, v9
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v8, v10, 16, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v12, 16, v13
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB51_3
 ; GFX11-TRUE16-NEXT:  .LBB51_2: ; %cmp.true
@@ -21248,7 +21243,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v5, 0xffff, v8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v9, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -21456,10 +21451,9 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v6, v11, 16, v10
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v7, v8, 16, v9
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v9, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB51_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB51_4:
@@ -27674,12 +27668,11 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v13, 0xffff, v8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v7, v7, 16, v9
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v8, v10, 16, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v12, 16, v13
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB63_3
 ; GFX11-TRUE16-NEXT:  .LBB63_2: ; %cmp.true
@@ -27798,7 +27791,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v5, 0xffff, v8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v9, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -28006,10 +27999,9 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v6, v11, 16, v10
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v7, v8, 16, v9
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v9, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB63_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB63_4:
@@ -32105,12 +32097,11 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v7, v11
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v12, v8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB73_3
 ; GFX11-TRUE16-NEXT:  .LBB73_2: ; %cmp.true
@@ -32240,13 +32231,12 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v9, 0xffff, v9
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v0, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v11
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v12, v13
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB73_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB73_4:
@@ -32479,8 +32469,8 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v7, v7, v11
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v0, v1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v8, v12, v8
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
 ; GFX11-FAKE16-NEXT:  .LBB73_3: ; %end
@@ -34417,8 +34407,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v1, v1, v6
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v2, v3, v2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v7, v5
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX11-TRUE16-NEXT:    s_or_b32 s1, s1, s2
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s0, s21
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s2, s22
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s3, s15
@@ -36751,12 +36741,11 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v7, v11
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v12, v8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB77_3
 ; GFX11-TRUE16-NEXT:  .LBB77_2: ; %cmp.true
@@ -36886,13 +36875,12 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v9, 0xffff, v9
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v6, v0, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v7, v8, v11
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v12, v13
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB77_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB77_4:
@@ -37125,8 +37113,8 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v7, v7, v11
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v0, v1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v8, v12, v8
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v9, v9, v10
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
 ; GFX11-FAKE16-NEXT:  .LBB77_3: ; %end
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index 3c82ae190c1a9..397955a8a8928 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -15305,12 +15305,11 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB27_3
 ; GFX11-TRUE16-NEXT:  .LBB27_2: ; %cmp.true
@@ -15522,13 +15521,12 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 0xffff, v15
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v14, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v18, v19
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB27_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -15717,12 +15715,11 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB27_3
 ; GFX11-FAKE16-NEXT:  .LBB27_2: ; %cmp.true
@@ -15910,12 +15907,11 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v17
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v18, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB27_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
@@ -30664,12 +30660,11 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB51_3
 ; GFX11-TRUE16-NEXT:  .LBB51_2: ; %cmp.true
@@ -30881,13 +30876,12 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 0xffff, v15
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v14, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v18, v19
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB51_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -31076,12 +31070,11 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB51_3
 ; GFX11-FAKE16-NEXT:  .LBB51_2: ; %cmp.true
@@ -31269,12 +31262,11 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v17
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v18, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB51_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
@@ -45294,12 +45286,11 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB71_3
 ; GFX11-TRUE16-NEXT:  .LBB71_2: ; %cmp.true
@@ -45511,13 +45502,12 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 0xffff, v15
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v14, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v18, v19
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB71_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -45706,12 +45696,11 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB71_3
 ; GFX11-FAKE16-NEXT:  .LBB71_2: ; %cmp.true
@@ -45899,12 +45888,11 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v17
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v18, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB71_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
@@ -55058,10 +55046,10 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s2, s2, 16
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v26, v27, v26
 ; GFX11-FAKE16-NEXT:    s_or_b32 s1, s1, s2
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v13, s1 :: v_dual_and_b32 v14, 0xff, v14
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v14, 0xff, v14
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v21, 8, v21
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v11, s0
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v13, s1
 ; GFX11-FAKE16-NEXT:    s_and_b32 s0, s25, 0xff
 ; GFX11-FAKE16-NEXT:    s_lshl_b32 s1, s47, 8
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v3, v3, v8
@@ -59078,12 +59066,11 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB87_3
 ; GFX11-TRUE16-NEXT:  .LBB87_2: ; %cmp.true
@@ -59295,13 +59282,12 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 0xffff, v15
 ; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v2, v0
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v1, v3
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v14, v17
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v14, v18, v19
 ; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB87_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -59490,12 +59476,11 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v86, 16, v86
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v87
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v96, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v86
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB87_3
 ; GFX11-FAKE16-NEXT:  .LBB87_2: ; %cmp.true
@@ -59683,12 +59668,11 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v16, 16, v16
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v0, v1
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v2, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, v13, v17
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v18, v14
 ; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v16
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB87_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
@@ -64406,9 +64390,8 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
 ; GFX11-NEXT:    v_bfe_u32 v9, v12, 16, 1
 ; GFX11-NEXT:    v_cndmask_b32_e32 v20, v5, v7, vcc_lo
 ; GFX11-NEXT:    v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_add_nc_u32_e32 v7, 0x7fff, v8
 ; GFX11-NEXT:    s_lshl_b32 s0, s18, 16
+; GFX11-NEXT:    v_add_nc_u32_e32 v7, 0x7fff, v8
 ; GFX11-NEXT:    v_or_b32_e32 v8, 0x400000, v6
 ; GFX11-NEXT:    v_add_nc_u32_e32 v9, v9, v12
 ; GFX11-NEXT:    v_bfe_u32 v10, v5, 16, 1
@@ -73071,12 +73054,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v97, 0xffff, v14
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v15
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v86, 16, v87
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v96, 16, v97
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB99_3
 ; GFX11-TRUE16-NEXT:  .LBB99_2: ; %cmp.true
@@ -73268,11 +73250,10 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v37, 16, v15
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v14, 16, v12
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v2, 16, v16
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB99_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -73441,12 +73422,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v11, v0, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v15, 16, v87
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v96, 16, v97
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v86, 16, v98
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB99_3
 ; GFX11-FAKE16-NEXT:  .LBB99_2: ; %cmp.true
@@ -73616,10 +73596,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v14, 16, v12
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v16
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB99_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
@@ -86107,12 +86086,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v97, 0xffff, v14
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v15
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v86, 16, v87
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v96, 16, v97
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB107_3
 ; GFX11-TRUE16-NEXT:  .LBB107_2: ; %cmp.true
@@ -86304,11 +86282,10 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v37, 16, v15
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v14, 16, v12
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v2, 16, v16
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB107_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -86477,12 +86454,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v11, v0, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v15, 16, v87
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v96, 16, v97
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v86, 16, v98
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB107_3
 ; GFX11-FAKE16-NEXT:  .LBB107_2: ; %cmp.true
@@ -86652,10 +86628,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v14, 16, v12
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v16
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB107_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
@@ -97481,12 +97456,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v97, 0xffff, v14
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v3, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v15
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v86, 16, v87
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v96, 16, v97
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-TRUE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-TRUE16-NEXT:    s_cbranch_vccnz .LBB111_3
 ; GFX11-TRUE16-NEXT:  .LBB111_2: ; %cmp.true
@@ -97678,11 +97652,10 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v37, 16, v15
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v14, 16, v12
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v14, v2, 16, v16
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-TRUE16-NEXT:  .LBB111_3: ; %end
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -97851,12 +97824,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v98, 0xffff, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v11, v0, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s8
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v15, 16, v87
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v96, 16, v97
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v86, 16, v98
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
 ; GFX11-FAKE16-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
 ; GFX11-FAKE16-NEXT:    s_cbranch_vccnz .LBB111_3
 ; GFX11-FAKE16-NEXT:  .LBB111_2: ; %cmp.true
@@ -98026,10 +97998,9 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v14, 16, v12
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v13, 16, v16
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v1, 16, v0
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, s3
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GFX11-FAKE16-NEXT:  .LBB111_3: ; %end
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index 0ac06bbd1b996..4a8000ba52752 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -5117,9 +5117,9 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -8737,9 +8737,9 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -13219,9 +13219,9 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -16827,9 +16827,9 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -20633,9 +20633,9 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -24263,9 +24263,9 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -25652,7 +25652,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v17, v25, 16, v19
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v24, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v23, 16, v1
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, v21 :: v_dual_mov_b32 v0, v20
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB49_4:
 ; GFX11-TRUE16-NEXT:    ; implicit-def: $vgpr39_lo16
@@ -27274,9 +27274,9 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -28948,7 +28948,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v17, v25, 16, v19
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v24, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v23, 16, v1
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, v21 :: v_dual_mov_b32 v0, v20
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB53_4:
 ; GFX11-TRUE16-NEXT:    ; implicit-def: $vgpr39_lo16
@@ -30814,9 +30814,9 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s88, s88, s4
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, s57
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v3, s58
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v5, s60
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, s63 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, s73 :: v_dual_mov_b32 v9, s72
@@ -32784,7 +32784,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v17, v18, 16, v23
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v24, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v25, 16, v1
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, v21 :: v_dual_mov_b32 v0, v20
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v40f16_scalar:
@@ -32943,7 +32943,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v17, v22, 16, v25
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v18, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, v21 :: v_dual_mov_b32 v0, v20
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34603,7 +34603,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v17, v18, 16, v23
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v24, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v25, 16, v1
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v1, v21 :: v_dual_mov_b32 v0, v20
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v40i16_scalar:
@@ -34762,7 +34762,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v17, v22, 16, v25
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v18, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v1, v21 :: v_dual_mov_b32 v0, v20
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index a6e041b2d8300..b73870977c429 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -17499,9 +17499,9 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 8, v9
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b64 v[3:4], 24, v[9:10]
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v8
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v7, 24, v10
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 8, v10
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v8
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB109_3:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $sgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index d8fe5f27e9ac8..4141c33eca786 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -5545,13 +5545,13 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -9558,13 +9558,13 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -12617,9 +12617,9 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, v25
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v23
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v20
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v38, 16, v9
@@ -12767,9 +12767,9 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, v25
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, v23
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v20
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v9, v38, 16, v9
@@ -14433,13 +14433,13 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -16355,9 +16355,9 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, v25
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v23
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v20
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v38, 16, v9
@@ -16505,9 +16505,9 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, v25
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, v23
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v20
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v9, v38, 16, v9
@@ -18446,13 +18446,13 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -22611,13 +22611,13 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -26636,13 +26636,13 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -29951,13 +29951,13 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -33887,13 +33887,13 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, s47 :: v_dual_mov_b32 v1, s56
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, s63
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, s57 :: v_dual_mov_b32 v3, s58
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, s73
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, s59 :: v_dual_mov_b32 v5, s60
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v12, s75 :: v_dual_mov_b32 v7, s62
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v12, s75
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, s61 :: v_dual_mov_b32 v7, s62
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_mov_b32 v9, s72
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_mov_b32 v11, s74
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v13, s76
@@ -36130,11 +36130,10 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v20, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v21, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v23
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v22 :: v_dual_mov_b32 v3, v23
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v35, 16, v6
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v37, 16, v38
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v22
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v44f16_scalar:
@@ -36332,157 +36331,162 @@ define <44 x i16> @bitcast_v44f16_to_v44i16(<44 x half> %a, i32 %b) {
 ; SI-LABEL: bitcast_v44f16_to_v44i16:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v46, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v47, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v56, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v42, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v43, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v44, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v46, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v47, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v56, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v58, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v59, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v60, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v61, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v62, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
 ; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:56
-; SI-NEXT:    s_waitcnt expcnt(6)
 ; SI-NEXT:    buffer_load_dword v43, off, s[0:3], s32
-; SI-NEXT:    s_waitcnt expcnt(5)
 ; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:4
-; SI-NEXT:    s_waitcnt expcnt(4)
 ; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:8
-; SI-NEXT:    s_waitcnt expcnt(3)
 ; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:12
-; SI-NEXT:    s_waitcnt expcnt(2)
 ; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:16
-; SI-NEXT:    s_waitcnt expcnt(1)
+; SI-NEXT:    s_waitcnt expcnt(6)
 ; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:20
-; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    s_waitcnt expcnt(5)
 ; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:28
+; SI-NEXT:    s_waitcnt expcnt(4)
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:24
+; SI-NEXT:    s_waitcnt expcnt(3)
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:36
+; SI-NEXT:    s_waitcnt expcnt(2)
+; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:32
+; SI-NEXT:    s_waitcnt expcnt(1)
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:44
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:40
+; SI-NEXT:    v_cvt_f16_f32_e32 v51, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v41, v2
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:48
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:52
 ; SI-NEXT:    v_cvt_f16_f32_e32 v48, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v38, v8
-; SI-NEXT:    v_cvt_f16_f32_e32 v55, v10
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v11
-; SI-NEXT:    v_cvt_f16_f32_e32 v33, v17
-; SI-NEXT:    v_cvt_f16_f32_e32 v53, v18
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v25
-; SI-NEXT:    v_cvt_f16_f32_e32 v25, v26
-; SI-NEXT:    v_cvt_f16_f32_e32 v11, v27
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:24
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:36
-; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:32
-; SI-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:44
-; SI-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:40
-; SI-NEXT:    buffer_load_dword v26, off, s[0:3], s32 offset:48
-; SI-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:52
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v49, v4
 ; SI-NEXT:    v_cvt_f16_f32_e32 v50, v5
 ; SI-NEXT:    v_cvt_f16_f32_e32 v40, v6
 ; SI-NEXT:    v_cvt_f16_f32_e32 v37, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v38, v8
 ; SI-NEXT:    v_cvt_f16_f32_e32 v39, v9
+; SI-NEXT:    v_cvt_f16_f32_e32 v55, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v34, v11
 ; SI-NEXT:    v_cvt_f16_f32_e32 v35, v12
 ; SI-NEXT:    v_cvt_f16_f32_e32 v36, v13
 ; SI-NEXT:    v_cvt_f16_f32_e32 v54, v14
 ; SI-NEXT:    v_cvt_f16_f32_e32 v31, v15
 ; SI-NEXT:    v_cvt_f16_f32_e32 v32, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v33, v17
+; SI-NEXT:    v_cvt_f16_f32_e32 v53, v18
 ; SI-NEXT:    v_cvt_f16_f32_e32 v19, v19
 ; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
 ; SI-NEXT:    v_cvt_f16_f32_e32 v21, v21
 ; SI-NEXT:    v_cvt_f16_f32_e32 v52, v22
-; SI-NEXT:    v_cvt_f16_f32_e32 v14, v23
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v24
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v23
+; SI-NEXT:    v_cvt_f16_f32_e32 v17, v24
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v25
+; SI-NEXT:    v_cvt_f16_f32_e32 v25, v26
+; SI-NEXT:    v_cvt_f16_f32_e32 v11, v27
 ; SI-NEXT:    v_cvt_f16_f32_e32 v12, v28
-; SI-NEXT:    v_cvt_f16_f32_e32 v13, v29
+; SI-NEXT:    v_cvt_f16_f32_e32 v14, v29
 ; SI-NEXT:    v_cvt_f16_f32_e32 v24, v30
 ; SI-NEXT:    s_waitcnt vmcnt(14)
 ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v42
 ; SI-NEXT:    s_waitcnt vmcnt(13)
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v43
+; SI-NEXT:    v_cvt_f16_f32_e32 v8, v43
 ; SI-NEXT:    s_waitcnt vmcnt(12)
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v44
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v44
 ; SI-NEXT:    s_waitcnt vmcnt(11)
-; SI-NEXT:    v_cvt_f16_f32_e32 v7, v45
+; SI-NEXT:    v_cvt_f16_f32_e32 v10, v45
 ; SI-NEXT:    s_waitcnt vmcnt(10)
-; SI-NEXT:    v_cvt_f16_f32_e32 v22, v46
+; SI-NEXT:    v_cvt_f16_f32_e32 v23, v46
 ; SI-NEXT:    s_waitcnt vmcnt(9)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v47
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v47
 ; SI-NEXT:    s_waitcnt vmcnt(8)
 ; SI-NEXT:    v_cvt_f16_f32_e32 v4, v56
 ; SI-NEXT:    s_waitcnt vmcnt(7)
-; SI-NEXT:    v_cvt_f16_f32_e32 v23, v57
+; SI-NEXT:    v_cvt_f16_f32_e32 v22, v57
 ; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cvt_f16_f32_e32 v16, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v13, v58
 ; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v59
 ; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v60
 ; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v61
 ; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v62
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v26
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v27
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; SI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; SI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
 ; SI-NEXT:    s_andn2_saveexec_b64 s[4:5], s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB58_2
 ; SI-NEXT:  ; %bb.1: ; %cmp.true
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
 ; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
-; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
-; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
 ; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
 ; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
-; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
-; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
-; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
 ; SI-NEXT:    v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
 ; SI-NEXT:    v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v3
+; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
 ; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
+; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
 ; SI-NEXT:    v_cvt_f16_f32_e32 v12, v12
 ; SI-NEXT:    v_add_f32_e32 v11, 0x38000000, v11
-; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
-; SI-NEXT:    v_or_b32_e32 v2, v2, v26
-; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v9
+; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT:    v_or_b32_e32 v1, v1, v26
+; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v6
 ; SI-NEXT:    v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT:    v_add_f32_e32 v14, 0x38000000, v14
+; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
 ; SI-NEXT:    v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT:    v_or_b32_e32 v8, v8, v26
+; SI-NEXT:    v_or_b32_e32 v5, v5, v26
 ; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v4
-; SI-NEXT:    v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
 ; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
 ; SI-NEXT:    v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT:    v_or_b32_e32 v1, v1, v26
-; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v6
+; SI-NEXT:    v_or_b32_e32 v3, v3, v26
+; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT:    v_or_b32_e32 v5, v5, v26
+; SI-NEXT:    v_or_b32_e32 v8, v8, v26
 ; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v12
 ; SI-NEXT:    v_or_b32_e32 v11, v11, v26
-; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v15
-; SI-NEXT:    v_or_b32_e32 v14, v14, v26
+; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v17
+; SI-NEXT:    v_or_b32_e32 v16, v16, v26
 ; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v20
 ; SI-NEXT:    v_or_b32_e32 v19, v19, v26
 ; SI-NEXT:    v_cvt_f32_f16_e32 v26, v32
@@ -36523,36 +36527,36 @@ define <44 x i16> @bitcast_v44f16_to_v44i16(<44 x half> %a, i32 %b) {
 ; SI-NEXT:    v_or_b32_e32 v37, v26, v27
 ; SI-NEXT:    v_cvt_f32_f16_e32 v26, v49
 ; SI-NEXT:    v_cvt_f32_f16_e32 v27, v48
-; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
 ; SI-NEXT:    v_cvt_f32_f16_e32 v23, v23
+; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
 ; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v26
 ; SI-NEXT:    v_cvt_f16_f32_e32 v49, v26
 ; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v27
 ; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
 ; SI-NEXT:    v_lshlrev_b32_e32 v27, 16, v49
 ; SI-NEXT:    v_cvt_f32_f16_e32 v33, v33
 ; SI-NEXT:    v_or_b32_e32 v48, v26, v27
 ; SI-NEXT:    v_cvt_f32_f16_e32 v26, v41
 ; SI-NEXT:    v_cvt_f32_f16_e32 v27, v51
 ; SI-NEXT:    v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v17
+; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
 ; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v26
 ; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
 ; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v27
 ; SI-NEXT:    v_cvt_f16_f32_e32 v27, v27
-; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
 ; SI-NEXT:    v_lshlrev_b32_e32 v26, 16, v26
-; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
 ; SI-NEXT:    v_or_b32_e32 v51, v27, v26
 ; SI-NEXT:    v_lshlrev_b32_e32 v27, 16, v28
 ; SI-NEXT:    v_cvt_f32_f16_e32 v28, v55
 ; SI-NEXT:    v_or_b32_e32 v50, v29, v27
 ; SI-NEXT:    v_cvt_f32_f16_e32 v29, v54
-; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
 ; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v28
 ; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
-; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
 ; SI-NEXT:    v_add_f32_e32 v29, 0x38000000, v29
 ; SI-NEXT:    v_cvt_f16_f32_e32 v29, v29
 ; SI-NEXT:    v_lshlrev_b32_e32 v28, 16, v28
@@ -36564,58 +36568,58 @@ define <44 x i16> @bitcast_v44f16_to_v44i16(<44 x half> %a, i32 %b) {
 ; SI-NEXT:    v_add_f32_e32 v30, 0x38000000, v30
 ; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
 ; SI-NEXT:    v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT:    v_add_f32_e32 v22, 0x38000000, v22
 ; SI-NEXT:    v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT:    v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
 ; SI-NEXT:    v_cvt_f16_f32_e32 v30, v30
 ; SI-NEXT:    v_cvt_f16_f32_e32 v53, v36
 ; SI-NEXT:    v_add_f32_e32 v33, 0x38000000, v33
 ; SI-NEXT:    v_cvt_f16_f32_e32 v52, v52
 ; SI-NEXT:    v_add_f32_e32 v21, 0x38000000, v21
 ; SI-NEXT:    v_cvt_f16_f32_e32 v25, v25
-; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
 ; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT:    v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT:    v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT:    v_add_f32_e32 v14, 0x38000000, v14
 ; SI-NEXT:    v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
 ; SI-NEXT:    v_add_f32_e32 v10, 0x38000000, v10
+; SI-NEXT:    v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT:    v_add_f32_e32 v7, 0x38000000, v7
 ; SI-NEXT:    v_cvt_f16_f32_e32 v33, v33
 ; SI-NEXT:    v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT:    v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
 ; SI-NEXT:    v_cvt_f16_f32_e32 v13, v13
 ; SI-NEXT:    v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
 ; SI-NEXT:    v_lshlrev_b32_e32 v29, 16, v29
 ; SI-NEXT:    v_or_b32_e32 v36, v30, v29
 ; SI-NEXT:    v_lshlrev_b32_e32 v30, 16, v53
 ; SI-NEXT:    v_lshlrev_b32_e32 v52, 16, v52
 ; SI-NEXT:    v_lshlrev_b32_e32 v25, 16, v25
 ; SI-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
-; SI-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
 ; SI-NEXT:    v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT:    v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT:    v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT:    v_lshlrev_b32_e32 v15, 16, v15
 ; SI-NEXT:    v_or_b32_e32 v33, v33, v30
 ; SI-NEXT:    v_or_b32_e32 v21, v21, v52
-; SI-NEXT:    v_or_b32_e32 v17, v17, v25
-; SI-NEXT:    v_or_b32_e32 v13, v13, v24
-; SI-NEXT:    v_or_b32_e32 v7, v7, v22
-; SI-NEXT:    v_or_b32_e32 v16, v16, v23
-; SI-NEXT:    v_or_b32_e32 v10, v10, v18
+; SI-NEXT:    v_or_b32_e32 v18, v18, v25
+; SI-NEXT:    v_or_b32_e32 v14, v14, v24
+; SI-NEXT:    v_or_b32_e32 v10, v10, v23
+; SI-NEXT:    v_or_b32_e32 v13, v13, v22
+; SI-NEXT:    v_or_b32_e32 v7, v7, v15
 ; SI-NEXT:    v_alignbit_b32 v41, v48, v26, 16
 ; SI-NEXT:    v_alignbit_b32 v40, v37, v27, 16
 ; SI-NEXT:    v_alignbit_b32 v55, v34, v28, 16
 ; SI-NEXT:    v_alignbit_b32 v54, v31, v29, 16
 ; SI-NEXT:    v_alignbit_b32 v53, v19, v30, 16
-; SI-NEXT:    v_alignbit_b32 v52, v14, v52, 16
+; SI-NEXT:    v_alignbit_b32 v52, v16, v52, 16
 ; SI-NEXT:    v_alignbit_b32 v25, v11, v25, 16
-; SI-NEXT:    v_alignbit_b32 v24, v5, v24, 16
-; SI-NEXT:    v_alignbit_b32 v22, v1, v22, 16
-; SI-NEXT:    v_alignbit_b32 v23, v8, v23, 16
-; SI-NEXT:    v_alignbit_b32 v18, v2, v18, 16
+; SI-NEXT:    v_alignbit_b32 v24, v8, v24, 16
+; SI-NEXT:    v_alignbit_b32 v23, v3, v23, 16
+; SI-NEXT:    v_alignbit_b32 v22, v5, v22, 16
+; SI-NEXT:    v_alignbit_b32 v15, v1, v15, 16
 ; SI-NEXT:  .LBB58_2: ; %end
 ; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SI-NEXT:    v_and_b32_e32 v26, 0xffff, v51
@@ -36678,81 +36682,85 @@ define <44 x i16> @bitcast_v44f16_to_v44i16(<44 x half> %a, i32 %b) {
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v19, 0xffff, v21
 ; SI-NEXT:    v_lshlrev_b32_e32 v20, 16, v52
-; SI-NEXT:    v_and_b32_e32 v14, 0xffff, v14
-; SI-NEXT:    v_lshlrev_b32_e32 v15, 16, v15
+; SI-NEXT:    v_and_b32_e32 v16, 0xffff, v16
+; SI-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
 ; SI-NEXT:    v_or_b32_e32 v19, v19, v20
 ; SI-NEXT:    v_add_i32_e32 v20, vcc, 40, v0
-; SI-NEXT:    v_or_b32_e32 v14, v14, v15
-; SI-NEXT:    v_add_i32_e32 v15, vcc, 44, v0
+; SI-NEXT:    v_or_b32_e32 v16, v16, v17
+; SI-NEXT:    v_add_i32_e32 v17, vcc, 44, v0
 ; SI-NEXT:    buffer_store_dword v19, v20, s[0:3], 0 offen
-; SI-NEXT:    buffer_store_dword v14, v15, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v16, v17, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v14, 0xffff, v17
-; SI-NEXT:    v_lshlrev_b32_e32 v15, 16, v25
+; SI-NEXT:    v_and_b32_e32 v16, 0xffff, v18
+; SI-NEXT:    v_lshlrev_b32_e32 v17, 16, v25
 ; SI-NEXT:    v_and_b32_e32 v11, 0xffff, v11
 ; SI-NEXT:    v_lshlrev_b32_e32 v12, 16, v12
-; SI-NEXT:    v_or_b32_e32 v14, v14, v15
-; SI-NEXT:    v_add_i32_e32 v15, vcc, 48, v0
+; SI-NEXT:    v_or_b32_e32 v16, v16, v17
+; SI-NEXT:    v_add_i32_e32 v17, vcc, 48, v0
 ; SI-NEXT:    v_or_b32_e32 v11, v11, v12
 ; SI-NEXT:    v_add_i32_e32 v12, vcc, 52, v0
-; SI-NEXT:    buffer_store_dword v14, v15, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v16, v17, s[0:3], 0 offen
 ; SI-NEXT:    buffer_store_dword v11, v12, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v11, 0xffff, v13
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff, v14
 ; SI-NEXT:    v_lshlrev_b32_e32 v12, 16, v24
-; SI-NEXT:    v_and_b32_e32 v5, 0xffff, v5
-; SI-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT:    v_and_b32_e32 v8, 0xffff, v8
+; SI-NEXT:    v_lshlrev_b32_e32 v9, 16, v9
 ; SI-NEXT:    v_or_b32_e32 v11, v11, v12
 ; SI-NEXT:    v_add_i32_e32 v12, vcc, 56, v0
-; SI-NEXT:    v_or_b32_e32 v5, v5, v6
-; SI-NEXT:    v_add_i32_e32 v6, vcc, 60, v0
+; SI-NEXT:    v_or_b32_e32 v8, v8, v9
+; SI-NEXT:    v_add_i32_e32 v9, vcc, 60, v0
 ; SI-NEXT:    buffer_store_dword v11, v12, s[0:3], 0 offen
-; SI-NEXT:    buffer_store_dword v5, v6, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v8, v9, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v5, 0xffff, v7
-; SI-NEXT:    v_lshlrev_b32_e32 v6, 16, v22
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_and_b32_e32 v8, 0xffff, v10
+; SI-NEXT:    v_lshlrev_b32_e32 v9, 16, v23
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v3
 ; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT:    v_or_b32_e32 v5, v5, v6
-; SI-NEXT:    v_add_i32_e32 v6, vcc, 64, v0
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_or_b32_e32 v8, v8, v9
+; SI-NEXT:    v_add_i32_e32 v9, vcc, 64, v0
+; SI-NEXT:    v_or_b32_e32 v3, v3, v4
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x44, v0
-; SI-NEXT:    buffer_store_dword v5, v6, s[0:3], 0 offen
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v8, v9, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v3, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v16
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v23
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v13
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v22
+; SI-NEXT:    v_or_b32_e32 v3, v3, v4
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x48, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v3, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v8
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v9
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v5
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
+; SI-NEXT:    v_or_b32_e32 v3, v3, v4
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x4c, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    buffer_store_dword v3, v4, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v10
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v18
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v7
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v15
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v3, v3, v4
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x50, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v2
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v0, vcc, 0x54, v0
+; SI-NEXT:    buffer_store_dword v3, v4, s[0:3], 0 offen
 ; SI-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
-; SI-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v43, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; SI-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -38109,11 +38117,10 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v20, 16, v1
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v21, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v23
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v22 :: v_dual_mov_b32 v3, v23
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v4, v35, 16, v6
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v6, v37, 16, v38
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v22
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v44i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index 79adc25903ac7..7ec521c4ed8f7 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -30981,10 +30981,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v34, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v32, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v27
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v30, 16, v4
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v24
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v26
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB49_4:
@@ -31143,10 +31142,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v34, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v32, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, v27
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v30, 16, v4
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, v24
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, v26
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB49_4:
@@ -35085,10 +35083,9 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v34, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v32, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v27
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v30, 16, v4
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v24
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v26
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB53_4:
@@ -35247,10 +35244,9 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v34, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v32, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v3, v27
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v30, 16, v4
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, v24
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, v26
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB53_4:
@@ -40122,7 +40118,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 0xffff, v16
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v34, 0xffff, v15
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v35, 0xffff, v14
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v11, v36, 16, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v33, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v32, 16, v16
@@ -40134,7 +40130,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v23, 16, v4
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, v26
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v4, v28 :: v_dual_mov_b32 v5, v29
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
@@ -42317,7 +42313,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 0xffff, v16
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v34, 0xffff, v15
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v35, 0xffff, v14
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v11, v36, 16, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v33, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v32, 16, v16
@@ -42329,7 +42325,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v23, 16, v4
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, v26
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v4, v28 :: v_dual_mov_b32 v5, v29
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index e19eba6270957..028e61a1ef687 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -33826,10 +33826,10 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v49, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v39, 16, v2
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff, v3
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v5
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v6
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v37, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, v33
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v32, v32, 16, v26
@@ -33999,10 +33999,10 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v49, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v20, v39, 16, v2
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v3
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v37, 16, v1
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, v33
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v32, v32, 16, v26
@@ -38343,10 +38343,10 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v18, v49, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v39, 16, v2
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff, v3
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v5
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v6
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v37, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, v33
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v32, v32, 16, v26
@@ -38516,10 +38516,10 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v49, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v20, v39, 16, v2
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v3
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v5
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff, v6
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v28, v28, 16, v22
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v37, 16, v1
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v1, v33
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v32, v32, 16, v26
@@ -43834,10 +43834,10 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v5, v29
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v49, 16, v52
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v30
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v37, 16, v9
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v32
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v26
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v30
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v37, 16, v9
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -46268,10 +46268,10 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v5, v29
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v49, 16, v52
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v6
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v30
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v37, 16, v9
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v32
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v26
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v30
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v9, v37, 16, v9
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index 66242a3cf45d8..f3ef202a22f31 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -16176,15 +16176,16 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v17, v64, 16, v20
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v53, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v8
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v7, v37
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v36, v36, 16, v22
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v8
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v9
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v50, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v30
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v30
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v38, 16, v4
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v34
@@ -16365,7 +16366,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v36, v36, 16, v22
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v9, v29 :: v_dual_and_b32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v9
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v67, 16, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v50, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v25, v48, 16, v2
@@ -16375,7 +16376,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v34
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v6, v36
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v28
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB29_4:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $vgpr30
@@ -21196,15 +21197,16 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v17, v64, 16, v20
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v53, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v8
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v7, v37
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v36, v36, 16, v22
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v8
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v9
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v50, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v30
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v30
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v38, 16, v4
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v34
@@ -21385,7 +21387,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v36, v36, 16, v22
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v9, v29 :: v_dual_and_b32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v9
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v67, 16, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v50, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v25, v48, 16, v2
@@ -21395,7 +21397,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v34
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v6, v36
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v28
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB33_4:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $vgpr30
@@ -47677,19 +47679,19 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v6
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v11, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v6.l, v8.h
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, v28
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v0, 16, v1
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, v31
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v30, v51, 16, v32
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v33
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v32, v53, 16, v54
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v48, 16, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v26, v6, 16, v7
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v5, v35
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v36
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v32
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v7, v37
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, v28
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v48, 16, v11
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v56f16_scalar:
@@ -47897,7 +47899,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v20, v20, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v22, 16, v4
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v7
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v9, v29 :: v_dual_and_b32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v9
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v50, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v49, 16, v16
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v48, 16, v15
@@ -47910,7 +47912,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, v32
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v34
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v28
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
@@ -50343,19 +50345,19 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v6
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v11, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v6.l, v8.h
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, v28
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v0, 16, v1
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, v31
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v30, v51, 16, v32
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v2, 16, v3
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v3, v33
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v32, v53, 16, v54
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v48, 16, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v26, v6, 16, v7
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v5, v35
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v36
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v2, v32
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v7, v37
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v8, v28
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v48, 16, v11
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v56i16_scalar:
@@ -50563,7 +50565,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v20, v20, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v22, 16, v4
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v7
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v9, v29 :: v_dual_and_b32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v9
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v50, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v49, 16, v16
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v48, 16, v15
@@ -50576,7 +50578,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v2, v32
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v34
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v28
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index b480e89dfcc30..0a7790a27f5ae 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -7445,14 +7445,14 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -13313,14 +13313,14 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -17421,18 +17421,17 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v7, v31
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v30, v83, 16, v24
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v25, v54, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v29, v50, 16, v4
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v36
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v29, v50, 16, v4
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v48
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v30
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB29_4:
 ; GFX11-TRUE16-NEXT:    ; implicit-def: $vgpr36_lo16
@@ -17620,7 +17619,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v7, v31
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v30, v83, 16, v24
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v80, 16, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v25, v54, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
@@ -17631,7 +17630,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v48
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v6, v30
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB29_4:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $vgpr36
@@ -20001,14 +20000,14 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -22847,18 +22846,17 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v7, v31
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v30, v83, 16, v24
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v25, v54, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v29, v50, 16, v4
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, v36
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v29, v50, 16, v4
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, v48
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, v30
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-TRUE16-NEXT:  .LBB33_4:
 ; GFX11-TRUE16-NEXT:    ; implicit-def: $vgpr36_lo16
@@ -23046,7 +23044,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v7, v31
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v30, v83, 16, v24
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v80, 16, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v25, v54, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
@@ -23057,7 +23055,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v48
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v6, v30
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB33_4:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $vgpr36
@@ -25852,14 +25850,14 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -31659,14 +31657,14 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -37544,14 +37542,14 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -39736,17 +39734,17 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v67, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v64, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v0, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v10
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v33, v33, 16, v22
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v65, 16, v2
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v9
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v10
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v25, v54, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v51, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v36 :: v_dual_mov_b32 v1, v37
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v9, v33
@@ -39935,7 +39933,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v9, v33
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v30, v83, 16, v24
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v81, 16, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v80, 16, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v71, 16, v16
@@ -39949,7 +39947,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v6, v30
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v32
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB49_4:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $vgpr36
@@ -42319,14 +42317,14 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -45065,17 +45063,17 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v67, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v23, v64, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v0, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff, v7
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v10
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v33, v33, 16, v22
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v22, v65, 16, v2
 ; GFX11-TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff, v9
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v3, 0xffff, v10
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v25, v54, 16, v0
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v51, 16, v3
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v52, 16, v2
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v0, v36 :: v_dual_mov_b32 v1, v37
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v9, v33
@@ -45264,7 +45262,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v9, v33
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v30, v83, 16, v24
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v81, 16, v14
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v80, 16, v15
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v71, 16, v16
@@ -45278,7 +45276,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v6, v30
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v8, v32
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ; GFX11-FAKE16-NEXT:  .LBB53_4:
 ; GFX11-FAKE16-NEXT:    ; implicit-def: $vgpr36
@@ -48073,14 +48071,14 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
 ; GFX11-TRUE16-NEXT:    s_pack_ll_b32_b16 s79, s79, s5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v27, v4, 16, v5
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v28, v6, 16, v7
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
-; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v4, s59
 ; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v20, v0, 16, v1
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.l, v36.h
 ; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v14, s77 :: v_dual_and_b32 v1, 0xffff, v36
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v6, s61
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v21, v2, 16, v3
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v2.l, v35.h
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v16, s79 :: v_dual_and_b32 v3, 0xffff, v35
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s56, s1
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s58, s3
 ; GFX11-TRUE16-NEXT:    s_mov_b32 s60, s17
@@ -51839,15 +51837,15 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v19, 16, v1
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff, v5
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v5, v49
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v37, v82, 16, v39
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v39, v80, 16, v84
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v38, 0xffff, v38
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v18, 16, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v4
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v23, 16, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v5, v49
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v39, v80, 16, v84
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v38, 0xffff, v38
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v48, 0xffff, v48
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v21, 16, v1
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v22, 16, v2
@@ -51886,7 +51884,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v20, v20, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v4
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v7
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v54, 16, v13
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v52, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v51, 16, v16
@@ -51897,7 +51895,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v48
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
@@ -51936,593 +51934,608 @@ define <60 x i16> @bitcast_v60f16_to_v60i16(<60 x half> %a, i32 %b) {
 ; SI-NEXT:    buffer_store_dword v61, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
 ; SI-NEXT:    buffer_store_dword v62, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
 ; SI-NEXT:    buffer_store_dword v63, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; SI-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:120
-; SI-NEXT:    buffer_load_dword v43, off, s[0:3], s32
-; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:4
+; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:120
+; SI-NEXT:    buffer_load_dword v51, off, s[0:3], s32
+; SI-NEXT:    buffer_load_dword v43, off, s[0:3], s32 offset:4
+; SI-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:8
 ; SI-NEXT:    s_waitcnt expcnt(2)
-; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:8
+; SI-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:12
 ; SI-NEXT:    s_waitcnt expcnt(1)
-; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:12
+; SI-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:16
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:16
-; SI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:20
-; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:24
-; SI-NEXT:    buffer_load_dword v39, off, s[0:3], s32 offset:28
-; SI-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:32
-; SI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:36
-; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:40
-; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:44
-; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:48
-; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:52
-; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:56
-; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:60
-; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:64
-; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:68
-; SI-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:72
+; SI-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:20
+; SI-NEXT:    buffer_load_dword v33, off, s[0:3], s32 offset:24
+; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:28
+; SI-NEXT:    buffer_load_dword v39, off, s[0:3], s32 offset:32
+; SI-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:36
+; SI-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:40
+; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:44
+; SI-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:48
+; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:52
+; SI-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:56
+; SI-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:60
+; SI-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:64
+; SI-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:68
+; SI-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:72
 ; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:76
 ; SI-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:80
 ; SI-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:84
 ; SI-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:92
-; SI-NEXT:    v_cvt_f16_f32_e32 v60, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v18
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v55, v3
+; SI-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:88
+; SI-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:100
+; SI-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:96
+; SI-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:108
+; SI-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:104
 ; SI-NEXT:    v_cvt_f16_f32_e32 v53, v8
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v8, v9
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v22
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v52, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v18
+; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v22
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v26
 ; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v5
-; SI-NEXT:    v_cvt_f16_f32_e32 v48, v11
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT:    v_cvt_f16_f32_e32 v38, v16
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT:    v_cvt_f16_f32_e32 v40, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v5
+; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v6
-; SI-NEXT:    v_cvt_f16_f32_e32 v52, v7
-; SI-NEXT:    v_cvt_f16_f32_e32 v6, v9
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v30
+; SI-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v12
+; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v55, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v40, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v6
 ; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
-; SI-NEXT:    v_cvt_f16_f32_e32 v49, v12
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v13
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v11
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v13
 ; SI-NEXT:    v_cvt_f16_f32_e32 v13, v14
-; SI-NEXT:    v_cvt_f16_f32_e32 v37, v15
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v17
-; SI-NEXT:    v_cvt_f16_f32_e32 v14, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
-; SI-NEXT:    v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT:    v_cvt_f16_f32_e32 v7, v23
+; SI-NEXT:    v_cvt_f16_f32_e32 v14, v15
+; SI-NEXT:    s_waitcnt expcnt(1)
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v19
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v20
 ; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT:    v_cvt_f16_f32_e32 v21, v25
-; SI-NEXT:    v_cvt_f16_f32_e32 v25, v27
-; SI-NEXT:    v_cvt_f16_f32_e32 v29, v29
-; SI-NEXT:    v_cvt_f16_f32_e32 v27, v30
-; SI-NEXT:    s_waitcnt vmcnt(14)
-; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v51
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v26
-; SI-NEXT:    v_cvt_f16_f32_e32 v26, v28
-; SI-NEXT:    v_cvt_f16_f32_e32 v22, v43
-; SI-NEXT:    v_cvt_f16_f32_e32 v23, v44
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v63
-; SI-NEXT:    v_cvt_f16_f32_e32 v61, v61
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v36
-; SI-NEXT:    v_cvt_f16_f32_e32 v44, v62
-; SI-NEXT:    v_cvt_f16_f32_e32 v19, v33
-; SI-NEXT:    v_cvt_f16_f32_e32 v16, v54
-; SI-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT:    v_cvt_f16_f32_e32 v54, v41
-; SI-NEXT:    v_cvt_f16_f32_e32 v11, v45
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:88
-; SI-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:100
-; SI-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:96
-; SI-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:108
-; SI-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:104
-; SI-NEXT:    v_cvt_f16_f32_e32 v43, v39
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v50
+; SI-NEXT:    v_cvt_f16_f32_e32 v26, v27
+; SI-NEXT:    v_cvt_f16_f32_e32 v27, v28
+; SI-NEXT:    v_cvt_f16_f32_e32 v28, v29
+; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT:    v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT:    v_cvt_f16_f32_e32 v25, v25
 ; SI-NEXT:    s_waitcnt vmcnt(14)
-; SI-NEXT:    v_cvt_f16_f32_e32 v63, v57
-; SI-NEXT:    v_cvt_f16_f32_e32 v42, v42
-; SI-NEXT:    v_cvt_f16_f32_e32 v12, v46
-; SI-NEXT:    v_cvt_f16_f32_e32 v39, v47
-; SI-NEXT:    v_cvt_f16_f32_e32 v30, v56
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v31
+; SI-NEXT:    v_cvt_f16_f32_e32 v31, v23
+; SI-NEXT:    v_cvt_f16_f32_e32 v22, v51
+; SI-NEXT:    v_cvt_f16_f32_e32 v23, v43
+; SI-NEXT:    v_cvt_f16_f32_e32 v29, v44
+; SI-NEXT:    v_cvt_f16_f32_e32 v44, v61
+; SI-NEXT:    v_cvt_f16_f32_e32 v19, v62
+; SI-NEXT:    v_cvt_f16_f32_e32 v20, v63
+; SI-NEXT:    v_cvt_f16_f32_e32 v61, v33
+; SI-NEXT:    v_cvt_f16_f32_e32 v43, v36
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v39
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v54
+; SI-NEXT:    v_cvt_f16_f32_e32 v16, v50
+; SI-NEXT:    v_cvt_f16_f32_e32 v51, v41
+; SI-NEXT:    v_cvt_f16_f32_e32 v12, v45
+; SI-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f16_f32_e32 v30, v47
+; SI-NEXT:    v_cvt_f16_f32_e32 v11, v42
+; SI-NEXT:    v_cvt_f16_f32_e32 v50, v46
+; SI-NEXT:    v_cvt_f16_f32_e32 v54, v56
+; SI-NEXT:    v_cvt_f16_f32_e32 v45, v32
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v57
+; SI-NEXT:    s_waitcnt vmcnt(13)
+; SI-NEXT:    v_cvt_f16_f32_e32 v47, v34
+; SI-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:112
+; SI-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:116
+; SI-NEXT:    v_cvt_f16_f32_e32 v36, v58
+; SI-NEXT:    v_cvt_f16_f32_e32 v46, v59
+; SI-NEXT:    s_waitcnt vmcnt(13)
+; SI-NEXT:    v_cvt_f16_f32_e32 v39, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v42, v35
 ; SI-NEXT:    s_waitcnt vmcnt(11)
-; SI-NEXT:    v_cvt_f16_f32_e32 v57, v34
-; SI-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:112
-; SI-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:116
-; SI-NEXT:    v_cvt_f16_f32_e32 v62, v58
-; SI-NEXT:    v_cvt_f16_f32_e32 v33, v31
-; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
-; SI-NEXT:    v_cvt_f16_f32_e32 v31, v59
-; SI-NEXT:    s_waitcnt vmcnt(12)
-; SI-NEXT:    v_cvt_f16_f32_e32 v28, v35
-; SI-NEXT:    s_waitcnt vmcnt(6)
-; SI-NEXT:    v_cvt_f16_f32_e32 v50, v2
-; SI-NEXT:    s_waitcnt vmcnt(5)
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v3
-; SI-NEXT:    s_waitcnt vmcnt(4)
-; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT:    s_waitcnt vmcnt(3)
-; SI-NEXT:    v_cvt_f16_f32_e32 v41, v41
-; SI-NEXT:    s_waitcnt vmcnt(2)
-; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
+; SI-NEXT:    v_cvt_f16_f32_e32 v35, v60
+; SI-NEXT:    v_cvt_f16_f32_e32 v62, v38
+; SI-NEXT:    s_waitcnt vmcnt(9)
+; SI-NEXT:    v_cvt_f16_f32_e32 v33, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v41, v48
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v34
+; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v45
+; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
 ; SI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; SI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
 ; SI-NEXT:    s_or_saveexec_b64 s[4:5], s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v46, v21
-; SI-NEXT:    v_mov_b32_e32 v47, v17
-; SI-NEXT:    v_mov_b32_e32 v56, v4
-; SI-NEXT:    v_mov_b32_e32 v58, v5
-; SI-NEXT:    v_mov_b32_e32 v59, v6
+; SI-NEXT:    v_mov_b32_e32 v56, v29
+; SI-NEXT:    v_mov_b32_e32 v29, v28
+; SI-NEXT:    v_mov_b32_e32 v57, v25
+; SI-NEXT:    v_mov_b32_e32 v58, v21
+; SI-NEXT:    v_mov_b32_e32 v59, v17
+; SI-NEXT:    v_mov_b32_e32 v60, v6
+; SI-NEXT:    v_mov_b32_e32 v63, v8
 ; SI-NEXT:    s_xor_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_cbranch_execz .LBB58_2
 ; SI-NEXT:  ; %bb.1: ; %cmp.true
-; SI-NEXT:    v_cvt_f32_f16_e32 v4, v57
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
-; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
-; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT:    v_cvt_f16_f32_e32 v57, v4
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
-; SI-NEXT:    v_cvt_f32_f16_e32 v31, v31
 ; SI-NEXT:    v_cvt_f32_f16_e32 v6, v62
-; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT:    v_cvt_f32_f16_e32 v5, v63
+; SI-NEXT:    v_cvt_f32_f16_e32 v34, v34
+; SI-NEXT:    v_cvt_f32_f16_e32 v32, v32
+; SI-NEXT:    v_cvt_f32_f16_e32 v35, v35
+; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
+; SI-NEXT:    v_cvt_f16_f32_e32 v62, v6
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
+; SI-NEXT:    v_add_f32_e32 v32, 0x38000000, v32
+; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
+; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
+; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
+; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v34
+; SI-NEXT:    v_or_b32_e32 v32, v32, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v62
+; SI-NEXT:    v_or_b32_e32 v35, v35, v37
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v47
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v46
+; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT:    v_cvt_f32_f16_e32 v8, v54
 ; SI-NEXT:    v_cvt_f32_f16_e32 v12, v12
-; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
 ; SI-NEXT:    v_cvt_f32_f16_e32 v11, v11
 ; SI-NEXT:    v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
 ; SI-NEXT:    v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v47, v37
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v38
+; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
 ; SI-NEXT:    v_cvt_f32_f16_e32 v19, v19
-; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT:    v_add_f32_e32 v31, 0x38000000, v31
-; SI-NEXT:    v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
 ; SI-NEXT:    v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT:    v_cvt_f16_f32_e32 v31, v31
-; SI-NEXT:    v_cvt_f16_f32_e32 v62, v6
-; SI-NEXT:    v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT:    v_add_f32_e32 v8, 0x38000000, v8
 ; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
 ; SI-NEXT:    v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT:    v_cvt_f32_f16_e32 v26, v26
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_cvt_f32_f16_e32 v27, v27
+; SI-NEXT:    v_cvt_f16_f32_e32 v8, v8
 ; SI-NEXT:    v_cvt_f16_f32_e32 v12, v12
 ; SI-NEXT:    v_add_f32_e32 v11, 0x38000000, v11
 ; SI-NEXT:    v_add_f32_e32 v16, 0x38000000, v16
-; SI-NEXT:    v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT:    v_cvt_f32_f16_e32 v26, v26
 ; SI-NEXT:    v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT:    v_or_b32_e32 v2, v2, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v11, v11
 ; SI-NEXT:    v_cvt_f16_f32_e32 v16, v16
 ; SI-NEXT:    v_add_f32_e32 v15, 0x38000000, v15
+; SI-NEXT:    v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT:    v_cvt_f32_f16_e32 v31, v31
+; SI-NEXT:    v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT:    v_lshlrev_b32_e32 v38, 16, v47
+; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
 ; SI-NEXT:    v_add_f32_e32 v19, 0x38000000, v19
+; SI-NEXT:    v_add_f32_e32 v23, 0x38000000, v23
 ; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
-; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT:    v_or_b32_e32 v8, v8, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v57
-; SI-NEXT:    v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_or_b32_e32 v46, v37, v38
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT:    v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
-; SI-NEXT:    v_or_b32_e32 v31, v31, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v62
-; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
 ; SI-NEXT:    v_cvt_f16_f32_e32 v23, v23
 ; SI-NEXT:    v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v26
-; SI-NEXT:    v_or_b32_e32 v63, v5, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v12
+; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT:    v_cvt_f32_f16_e32 v14, v14
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT:    v_or_b32_e32 v54, v8, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v12
 ; SI-NEXT:    v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT:    v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT:    v_cvt_f16_f32_e32 v27, v27
+; SI-NEXT:    v_add_f32_e32 v26, 0x38000000, v26
 ; SI-NEXT:    v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT:    v_or_b32_e32 v11, v11, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v16
-; SI-NEXT:    v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_or_b32_e32 v11, v11, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v16
+; SI-NEXT:    v_cvt_f16_f32_e32 v26, v26
 ; SI-NEXT:    v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT:    v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT:    v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT:    v_or_b32_e32 v15, v15, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v31, v31
+; SI-NEXT:    v_cvt_f16_f32_e32 v18, v18
 ; SI-NEXT:    v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT:    v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT:    v_or_b32_e32 v15, v15, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v19
+; SI-NEXT:    v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT:    v_or_b32_e32 v19, v19, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v23
 ; SI-NEXT:    v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT:    v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; SI-NEXT:    v_add_f32_e32 v14, 0x38000000, v14
-; SI-NEXT:    v_or_b32_e32 v18, v18, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v23
+; SI-NEXT:    v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT:    v_or_b32_e32 v22, v22, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v27
 ; SI-NEXT:    v_cvt_f16_f32_e32 v14, v14
-; SI-NEXT:    v_or_b32_e32 v22, v22, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v26
-; SI-NEXT:    v_or_b32_e32 v25, v25, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v24
-; SI-NEXT:    v_or_b32_e32 v7, v7, v34
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v20
-; SI-NEXT:    v_or_b32_e32 v14, v14, v34
-; SI-NEXT:    v_cvt_f32_f16_e32 v34, v38
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT:    v_or_b32_e32 v26, v26, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v24
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_or_b32_e32 v31, v31, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v18
+; SI-NEXT:    v_or_b32_e32 v7, v7, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v2
+; SI-NEXT:    v_or_b32_e32 v14, v14, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v5
+; SI-NEXT:    v_or_b32_e32 v4, v4, v37
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v53
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v52
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
-; SI-NEXT:    v_cvt_f16_f32_e32 v38, v34
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v53, v37
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v38
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
 ; SI-NEXT:    v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v38
+; SI-NEXT:    v_lshlrev_b32_e32 v38, 16, v53
 ; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_or_b32_e32 v37, v34, v35
-; SI-NEXT:    v_cvt_f32_f16_e32 v34, v49
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v48
+; SI-NEXT:    v_or_b32_e32 v52, v37, v38
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v40
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v55
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v45, v59
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
-; SI-NEXT:    v_cvt_f16_f32_e32 v49, v34
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
+; SI-NEXT:    v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v40, v37
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v38
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v38, 16, v40
+; SI-NEXT:    v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT:    v_or_b32_e32 v55, v37, v38
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v6
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v63
 ; SI-NEXT:    v_add_f32_e32 v10, 0x38000000, v10
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v49
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
 ; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
-; SI-NEXT:    v_or_b32_e32 v48, v34, v35
-; SI-NEXT:    v_cvt_f32_f16_e32 v34, v53
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v52
 ; SI-NEXT:    v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT:    v_cvt_f32_f16_e32 v48, v43
+; SI-NEXT:    v_or_b32_e32 v6, v37, v1
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
-; SI-NEXT:    v_cvt_f16_f32_e32 v53, v34
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
 ; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v53
 ; SI-NEXT:    v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT:    v_or_b32_e32 v52, v34, v35
-; SI-NEXT:    v_cvt_f32_f16_e32 v34, v40
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v55
+; SI-NEXT:    v_cvt_f32_f16_e32 v49, v61
+; SI-NEXT:    v_add_f32_e32 v48, 0x38000000, v48
+; SI-NEXT:    v_cvt_f16_f32_e32 v48, v48
 ; SI-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT:    v_cvt_f32_f16_e32 v42, v42
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
-; SI-NEXT:    v_cvt_f16_f32_e32 v40, v34
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
-; SI-NEXT:    v_cvt_f32_f16_e32 v27, v27
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v40
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    v_lshlrev_b32_e32 v48, 16, v48
 ; SI-NEXT:    v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT:    v_or_b32_e32 v55, v34, v35
-; SI-NEXT:    v_cvt_f32_f16_e32 v34, v60
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v4
-; SI-NEXT:    v_cvt_f32_f16_e32 v32, v32
-; SI-NEXT:    v_cvt_f32_f16_e32 v28, v28
-; SI-NEXT:    v_add_f32_e32 v34, 0x38000000, v34
-; SI-NEXT:    v_cvt_f16_f32_e32 v34, v34
-; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
+; SI-NEXT:    v_cvt_f32_f16_e32 v41, v41
+; SI-NEXT:    v_or_b32_e32 v61, v49, v48
 ; SI-NEXT:    v_cvt_f32_f16_e32 v29, v29
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v34
-; SI-NEXT:    v_cvt_f32_f16_e32 v39, v39
-; SI-NEXT:    v_or_b32_e32 v4, v35, v34
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT:    v_cvt_f32_f16_e32 v33, v33
 ; SI-NEXT:    v_cvt_f32_f16_e32 v50, v50
 ; SI-NEXT:    v_cvt_f32_f16_e32 v36, v36
-; SI-NEXT:    v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT:    v_cvt_f32_f16_e32 v39, v39
+; SI-NEXT:    v_cvt_f32_f16_e32 v33, v33
 ; SI-NEXT:    v_add_f32_e32 v30, 0x38000000, v30
-; SI-NEXT:    v_add_f32_e32 v32, 0x38000000, v32
-; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v28
-; SI-NEXT:    v_cvt_f16_f32_e32 v27, v27
+; SI-NEXT:    v_add_f32_e32 v41, 0x38000000, v41
 ; SI-NEXT:    v_add_f32_e32 v29, 0x38000000, v29
 ; SI-NEXT:    v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT:    v_add_f32_e32 v39, 0x38000000, v39
-; SI-NEXT:    v_cvt_f16_f32_e32 v32, v32
-; SI-NEXT:    v_add_f32_e32 v33, 0x38000000, v33
-; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
 ; SI-NEXT:    v_add_f32_e32 v50, 0x38000000, v50
 ; SI-NEXT:    v_add_f32_e32 v36, 0x38000000, v36
+; SI-NEXT:    v_add_f32_e32 v39, 0x38000000, v39
+; SI-NEXT:    v_cvt_f16_f32_e32 v41, v41
+; SI-NEXT:    v_add_f32_e32 v33, 0x38000000, v33
 ; SI-NEXT:    v_cvt_f16_f32_e32 v29, v29
-; SI-NEXT:    v_cvt_f16_f32_e32 v39, v39
-; SI-NEXT:    v_cvt_f16_f32_e32 v33, v33
 ; SI-NEXT:    v_cvt_f16_f32_e32 v50, v50
 ; SI-NEXT:    v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v27, 16, v27
+; SI-NEXT:    v_cvt_f16_f32_e32 v39, v39
+; SI-NEXT:    v_cvt_f16_f32_e32 v33, v33
 ; SI-NEXT:    v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT:    v_lshlrev_b32_e32 v32, 16, v32
-; SI-NEXT:    v_lshlrev_b32_e32 v28, 16, v28
-; SI-NEXT:    v_or_b32_e32 v29, v29, v27
-; SI-NEXT:    v_or_b32_e32 v39, v39, v30
-; SI-NEXT:    v_or_b32_e32 v33, v33, v32
-; SI-NEXT:    v_or_b32_e32 v50, v50, v28
-; SI-NEXT:    v_alignbit_b32 v60, v55, v34, 16
-; SI-NEXT:    v_alignbit_b32 v27, v22, v27, 16
-; SI-NEXT:    v_alignbit_b32 v30, v63, v30, 16
-; SI-NEXT:    v_alignbit_b32 v32, v31, v32, 16
-; SI-NEXT:    v_alignbit_b32 v28, v8, v28, 16
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v4
-; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
-; SI-NEXT:    v_or_b32_e32 v4, v35, v1
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT:    v_lshlrev_b32_e32 v41, 16, v41
+; SI-NEXT:    v_or_b32_e32 v50, v50, v30
+; SI-NEXT:    v_or_b32_e32 v33, v33, v41
+; SI-NEXT:    v_alignbit_b32 v1, v55, v1, 16
+; SI-NEXT:    v_alignbit_b32 v43, v15, v48, 16
+; SI-NEXT:    v_alignbit_b32 v30, v54, v30, 16
+; SI-NEXT:    v_alignbit_b32 v41, v32, v41, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v6
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_or_b32_e32 v6, v37, v3
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v45
-; SI-NEXT:    v_cvt_f32_f16_e32 v45, v58
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
-; SI-NEXT:    v_alignbit_b32 v1, v52, v1, 16
-; SI-NEXT:    v_add_f32_e32 v45, 0x38000000, v45
-; SI-NEXT:    v_or_b32_e32 v59, v35, v10
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v56
-; SI-NEXT:    v_cvt_f16_f32_e32 v45, v45
-; SI-NEXT:    v_alignbit_b32 v10, v48, v10, 16
-; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
-; SI-NEXT:    v_or_b32_e32 v58, v45, v13
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
-; SI-NEXT:    v_cvt_f32_f16_e32 v45, v47
-; SI-NEXT:    v_alignbit_b32 v13, v37, v13, 16
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v38
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v60
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_alignbit_b32 v3, v52, v3, 16
+; SI-NEXT:    v_add_f32_e32 v38, 0x38000000, v38
+; SI-NEXT:    v_or_b32_e32 v63, v37, v10
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v59
+; SI-NEXT:    v_cvt_f16_f32_e32 v38, v38
+; SI-NEXT:    v_alignbit_b32 v10, v4, v10, 16
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_or_b32_e32 v60, v38, v13
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v58
+; SI-NEXT:    v_alignbit_b32 v13, v14, v13, 16
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v17, v4
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT:    v_cvt_f32_f16_e32 v17, v6
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_f32_e32 v17, 0x38000000, v17
 ; SI-NEXT:    v_cvt_f16_f32_e32 v17, v17
 ; SI-NEXT:    v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT:    v_or_b32_e32 v56, v35, v17
-; SI-NEXT:    v_cvt_f32_f16_e32 v35, v51
-; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v45
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
-; SI-NEXT:    v_cvt_f32_f16_e32 v45, v46
-; SI-NEXT:    v_add_f32_e32 v35, 0x38000000, v35
-; SI-NEXT:    v_cvt_f16_f32_e32 v35, v35
-; SI-NEXT:    v_add_f32_e32 v45, 0x38000000, v45
-; SI-NEXT:    v_cvt_f16_f32_e32 v45, v45
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v35
-; SI-NEXT:    v_or_b32_e32 v46, v45, v35
+; SI-NEXT:    v_or_b32_e32 v59, v37, v17
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v38
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v57
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_add_f32_e32 v38, 0x38000000, v38
+; SI-NEXT:    v_cvt_f16_f32_e32 v38, v38
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v21, v4
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT:    v_cvt_f32_f16_e32 v21, v6
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
 ; SI-NEXT:    v_add_f32_e32 v21, 0x38000000, v21
 ; SI-NEXT:    v_cvt_f16_f32_e32 v21, v21
 ; SI-NEXT:    v_lshlrev_b32_e32 v21, 16, v21
-; SI-NEXT:    v_or_b32_e32 v47, v51, v21
-; SI-NEXT:    v_cvt_f32_f16_e32 v51, v44
-; SI-NEXT:    v_cvt_f32_f16_e32 v44, v61
-; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v51
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
-; SI-NEXT:    v_lshlrev_b32_e32 v45, 16, v51
-; SI-NEXT:    v_cvt_f32_f16_e32 v51, v43
-; SI-NEXT:    v_add_f32_e32 v43, 0x38000000, v44
-; SI-NEXT:    v_cvt_f16_f32_e32 v43, v43
-; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v51
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
-; SI-NEXT:    v_or_b32_e32 v61, v43, v45
-; SI-NEXT:    v_lshlrev_b32_e32 v43, 16, v51
-; SI-NEXT:    v_cvt_f32_f16_e32 v51, v54
-; SI-NEXT:    v_add_f32_e32 v54, 0x38000000, v42
-; SI-NEXT:    v_cvt_f16_f32_e32 v54, v54
-; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v51
-; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
-; SI-NEXT:    v_lshlrev_b32_e32 v42, 16, v54
-; SI-NEXT:    v_or_b32_e32 v54, v51, v42
-; SI-NEXT:    v_cvt_f32_f16_e32 v51, v41
-; SI-NEXT:    v_alignbit_b32 v42, v11, v42, 16
+; SI-NEXT:    v_or_b32_e32 v58, v37, v21
+; SI-NEXT:    v_cvt_f32_f16_e32 v37, v44
+; SI-NEXT:    v_add_f32_e32 v37, 0x38000000, v37
+; SI-NEXT:    v_cvt_f16_f32_e32 v37, v37
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v37
+; SI-NEXT:    v_alignbit_b32 v44, v19, v37, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v25, v6
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT:    v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT:    v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT:    v_lshlrev_b32_e32 v25, 16, v25
+; SI-NEXT:    v_or_b32_e32 v57, v38, v25
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v56
+; SI-NEXT:    v_add_f32_e32 v38, 0x38000000, v38
+; SI-NEXT:    v_cvt_f16_f32_e32 v38, v38
+; SI-NEXT:    v_or_b32_e32 v56, v38, v37
+; SI-NEXT:    v_cvt_f32_f16_e32 v38, v51
+; SI-NEXT:    v_cvt_f32_f16_e32 v51, v42
+; SI-NEXT:    v_add_f32_e32 v38, 0x38000000, v38
+; SI-NEXT:    v_cvt_f16_f32_e32 v38, v38
 ; SI-NEXT:    v_add_f32_e32 v51, 0x38000000, v51
 ; SI-NEXT:    v_cvt_f16_f32_e32 v51, v51
-; SI-NEXT:    v_lshlrev_b32_e32 v41, 16, v51
-; SI-NEXT:    v_or_b32_e32 v36, v36, v41
-; SI-NEXT:    v_alignbit_b32 v51, v25, v35, 16
-; SI-NEXT:    v_alignbit_b32 v41, v2, v41, 16
+; SI-NEXT:    v_lshlrev_b32_e32 v38, 16, v38
+; SI-NEXT:    v_lshlrev_b32_e32 v42, 16, v51
+; SI-NEXT:    v_or_b32_e32 v39, v39, v42
+; SI-NEXT:    v_alignbit_b32 v51, v11, v38, 16
+; SI-NEXT:    v_alignbit_b32 v42, v35, v42, 16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v28, v6
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT:    v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT:    v_cvt_f16_f32_e32 v28, v28
+; SI-NEXT:    v_lshlrev_b32_e32 v28, 16, v28
+; SI-NEXT:    v_or_b32_e32 v29, v29, v28
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v44, v4
-; SI-NEXT:    v_add_f32_e32 v44, 0x38000000, v44
-; SI-NEXT:    v_cvt_f16_f32_e32 v44, v44
-; SI-NEXT:    v_or_b32_e32 v4, v44, v43
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT:    v_cvt_f32_f16_e32 v49, v6
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    v_or_b32_e32 v6, v49, v38
+; SI-NEXT:    v_cvt_f32_f16_e32 v49, v45
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_alignbit_b32 v6, v7, v17, 16
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT:    v_add_f32_e32 v49, 0x38000000, v49
+; SI-NEXT:    v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_alignbit_b32 v6, v31, v21, 16
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_alignbit_b32 v4, v14, v17, 16
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v6, v26, v25, 16
+; SI-NEXT:    v_lshlrev_b32_e32 v49, 16, v49
+; SI-NEXT:    v_or_b32_e32 v36, v36, v49
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_alignbit_b32 v4, v7, v21, 16
-; SI-NEXT:    v_alignbit_b32 v44, v18, v45, 16
-; SI-NEXT:    v_alignbit_b32 v43, v15, v43, 16
-; SI-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT:    v_alignbit_b32 v6, v22, v28, 16
+; SI-NEXT:    v_alignbit_b32 v45, v46, v49, 16
+; SI-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
 ; SI-NEXT:  .LBB58_2: ; %end
 ; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v60
+; SI-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v62
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v34, 0xffff, v4
-; SI-NEXT:    v_or_b32_e32 v34, v34, v35
-; SI-NEXT:    buffer_store_dword v34, v0, s[0:3], 0 offen
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v34, 0xffff, v55
-; SI-NEXT:    v_lshlrev_b32_e32 v35, 16, v40
-; SI-NEXT:    v_or_b32_e32 v34, v34, v35
-; SI-NEXT:    v_add_i32_e32 v35, vcc, 4, v0
-; SI-NEXT:    buffer_store_dword v34, v35, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v34, 0xffff, v4
-; SI-NEXT:    v_or_b32_e32 v1, v34, v1
-; SI-NEXT:    v_add_i32_e32 v34, vcc, 8, v0
-; SI-NEXT:    buffer_store_dword v1, v34, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v37, 0xffff, v6
+; SI-NEXT:    v_or_b32_e32 v1, v37, v1
+; SI-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v52
-; SI-NEXT:    v_lshlrev_b32_e32 v34, 16, v53
-; SI-NEXT:    v_or_b32_e32 v1, v1, v34
-; SI-NEXT:    v_add_i32_e32 v34, vcc, 12, v0
-; SI-NEXT:    buffer_store_dword v1, v34, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v55
+; SI-NEXT:    v_lshlrev_b32_e32 v37, 16, v40
+; SI-NEXT:    v_or_b32_e32 v1, v1, v37
+; SI-NEXT:    v_add_i32_e32 v37, vcc, 4, v0
+; SI-NEXT:    buffer_store_dword v1, v37, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v59
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 16, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v3
+; SI-NEXT:    v_add_i32_e32 v3, vcc, 8, v0
+; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v48
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v49
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 20, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v52
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v53
+; SI-NEXT:    v_or_b32_e32 v1, v1, v3
+; SI-NEXT:    v_add_i32_e32 v3, vcc, 12, v0
+; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v58
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v13
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 24, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v63
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v10
+; SI-NEXT:    v_or_b32_e32 v1, v1, v3
+; SI-NEXT:    v_add_i32_e32 v3, vcc, 16, v0
+; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v37
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v38
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 28, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; SI-NEXT:    v_or_b32_e32 v1, v1, v3
+; SI-NEXT:    v_add_i32_e32 v3, vcc, 20, v0
+; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v56
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v4
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 32, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v60
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v13
+; SI-NEXT:    v_or_b32_e32 v1, v1, v3
+; SI-NEXT:    v_add_i32_e32 v3, vcc, 24, v0
+; SI-NEXT:    buffer_store_dword v1, v3, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v14
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v20
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 36, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 28, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v47
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v59
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v4
-; SI-NEXT:    v_or_b32_e32 v1, v1, v10
-; SI-NEXT:    v_add_i32_e32 v10, vcc, 40, v0
-; SI-NEXT:    buffer_store_dword v1, v10, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 32, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v7
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v24
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 44, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v18
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 36, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v46
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v51
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 48, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v58
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 40, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v25
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v26
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 52, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v31
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v24
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 44, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v57
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 48, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v26
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v27
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 52, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v29
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v27
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 56, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 56, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v22
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v23
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 60, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v23
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 60, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v56
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v44
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 64, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v19
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v20
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x44, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v61
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v44
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 64, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v43
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x48, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v18
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v19
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 0x44, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v15
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v16
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x4c, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v43
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v57
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v51
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 0x48, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v15
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v16
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 0x4c, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
-; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v54
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v42
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 0x50, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x50, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v11
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v12
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 0x54, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v12
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x54, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v39
-; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v30
-; SI-NEXT:    v_or_b32_e32 v1, v1, v7
-; SI-NEXT:    v_add_i32_e32 v7, vcc, 0x58, v0
-; SI-NEXT:    buffer_store_dword v1, v7, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v50
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v30
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x58, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v63
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x5c, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v54
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v9
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x5c, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v33
-; SI-NEXT:    v_lshlrev_b32_e32 v5, 16, v32
-; SI-NEXT:    v_or_b32_e32 v1, v1, v5
-; SI-NEXT:    v_add_i32_e32 v5, vcc, 0x60, v0
-; SI-NEXT:    buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v36
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v45
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x60, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v31
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
-; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x64, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v46
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v47
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x64, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v50
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v28
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
-; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x68, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v39
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v42
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x68, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v8
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v9
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
-; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x6c, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v35
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v62
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x6c, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v36
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 16, v41
-; SI-NEXT:    v_or_b32_e32 v1, v1, v4
-; SI-NEXT:    v_add_i32_e32 v4, vcc, 0x70, v0
-; SI-NEXT:    buffer_store_dword v1, v4, s[0:3], 0 offen
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v33
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v41
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x70, v0
+; SI-NEXT:    buffer_store_dword v1, v2, s[0:3], 0 offen
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v2
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v32
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v34
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v0, vcc, 0x74, v0
 ; SI-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
@@ -53320,13 +53333,13 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
 ; SI-NEXT:    v_cvt_f32_f16_e32 v20, v20
 ; SI-NEXT:    v_cvt_f32_f16_e32 v32, v32
 ; SI-NEXT:    v_or_b32_e32 v62, v1, v39
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
 ; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
 ; SI-NEXT:    v_cvt_f16_f32_e32 v10, v10
 ; SI-NEXT:    v_add_f32_e32 v9, 0x38000000, v9
 ; SI-NEXT:    v_add_f32_e32 v13, 0x38000000, v13
 ; SI-NEXT:    v_cvt_f32_f16_e32 v31, v31
 ; SI-NEXT:    v_cvt_f32_f16_e32 v35, v35
+; SI-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
 ; SI-NEXT:    v_cvt_f16_f32_e32 v9, v9
 ; SI-NEXT:    v_cvt_f16_f32_e32 v13, v13
 ; SI-NEXT:    v_add_f32_e32 v12, 0x38000000, v12
@@ -54727,15 +54740,15 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v19, v19, 16, v1
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff, v5
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v5, v49
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v37, v82, 16, v39
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v39, v80, 16, v84
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v38, 0xffff, v38
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v18, v18, 16, v0
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v2
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff, v4
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v23, v23, 16, v3
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v5, v49
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v39, v80, 16, v84
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v38, 0xffff, v38
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v48, 0xffff, v48
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v21, v21, 16, v1
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v22, v22, 16, v2
@@ -54774,7 +54787,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v20, v20, 16, v0
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v24, v24, 16, v4
 ; GFX11-FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff, v7
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v11, v35 :: v_dual_and_b32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff, v11
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v13, v54, 16, v13
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v52, 16, v17
 ; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v16, v51, 16, v16
@@ -54785,7 +54798,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v4, v48
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
 ; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v10, v34
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %b, 0
   br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
index 8508de491e880..0a67b2ecda30e 100644
--- a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -997,29 +997,28 @@ define amdgpu_ps float @ps_main(i32 %idx) {
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, 0x3efcd89c
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v13 :: v_dual_mov_b32 v33, v22
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
@@ -2021,29 +2020,28 @@ define amdgpu_vs float @vs_main(i32 %idx) {
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, 0x3efcd89c
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v13 :: v_dual_mov_b32 v33, v22
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
@@ -3045,29 +3043,28 @@ define amdgpu_cs float @cs_main(i32 %idx) {
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, 0x3efcd89c
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v13 :: v_dual_mov_b32 v33, v22
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
@@ -4066,29 +4063,28 @@ define amdgpu_hs float @hs_main(i32 %idx) {
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, 0x3efcd89c
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v13 :: v_dual_mov_b32 v33, v22
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
@@ -5087,29 +5083,28 @@ define amdgpu_gs float @gs_main(i32 %idx) {
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, 0x3efcd89c
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v13 :: v_dual_mov_b32 v33, v22
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
@@ -6120,29 +6115,28 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v20, 0x3efcd89c :: v_dual_mov_b32 v29, v15
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v33, v22 :: v_dual_mov_b32 v30, v13
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
@@ -7152,29 +7146,28 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v22, 0xbf638e39
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v20, 0x3efcd89c :: v_dual_mov_b32 v29, v15
-; GFX11-FLATSCR-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v33, v22 :: v_dual_mov_b32 v30, v13
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v30, v13
 ; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[0:3], off offset:272
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:256
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v1, 0x3f20e7f4
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v9, v18
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v33, v22
+; GFX11-FLATSCR-NEXT:    s_clause 0x3
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[13:16], off offset:240
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
-; GFX11-FLATSCR-NEXT:    s_clause 0x1
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[21:24], off offset:208
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[24:27], off offset:192
 ; GFX11-FLATSCR-NEXT:    scratch_load_b32 v14, v37, off
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v20, v0
 ; GFX11-FLATSCR-NEXT:    s_clause 0x2
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[1:4], off offset:832
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[28:31], off offset:816
 ; GFX11-FLATSCR-NEXT:    scratch_store_b128 off, v[9:12], off offset:800
-; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v29, 0xbf523be1
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
-; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v30, v7 :: v_dual_mov_b32 v31, v17
+; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v29, 0xbf523be1 :: v_dual_mov_b32 v30, v7
+; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v31, v17
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v3, v12
 ; GFX11-FLATSCR-NEXT:    v_mov_b32_e32 v4, v28
 ; GFX11-FLATSCR-NEXT:    v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
diff --git a/llvm/test/CodeGen/RISCV/zilsd.ll b/llvm/test/CodeGen/RISCV/zilsd.ll
index eb5d8237bda8c..3eaaffba250dd 100644
--- a/llvm/test/CodeGen/RISCV/zilsd.ll
+++ b/llvm/test/CodeGen/RISCV/zilsd.ll
@@ -36,25 +36,25 @@ define i64 @load_unaligned(ptr %p) {
 ; SLOW-LABEL: load_unaligned:
 ; SLOW:       # %bb.0:
 ; SLOW-NEXT:    lbu a1, 1(a0)
-; SLOW-NEXT:    lbu a2, 2(a0)
-; SLOW-NEXT:    lbu a3, 3(a0)
-; SLOW-NEXT:    lbu a4, 0(a0)
+; SLOW-NEXT:    lbu a2, 0(a0)
+; SLOW-NEXT:    lbu a3, 2(a0)
+; SLOW-NEXT:    lbu a4, 3(a0)
 ; SLOW-NEXT:    slli a1, a1, 8
-; SLOW-NEXT:    slli a2, a2, 16
-; SLOW-NEXT:    slli a3, a3, 24
-; SLOW-NEXT:    or a1, a1, a4
-; SLOW-NEXT:    lbu a4, 4(a0)
+; SLOW-NEXT:    or a1, a1, a2
+; SLOW-NEXT:    lbu a2, 4(a0)
 ; SLOW-NEXT:    lbu a5, 5(a0)
-; SLOW-NEXT:    or a2, a3, a2
-; SLOW-NEXT:    lbu a3, 6(a0)
+; SLOW-NEXT:    slli a3, a3, 16
+; SLOW-NEXT:    slli a4, a4, 24
+; SLOW-NEXT:    or a3, a4, a3
+; SLOW-NEXT:    lbu a4, 6(a0)
 ; SLOW-NEXT:    lbu a0, 7(a0)
 ; SLOW-NEXT:    slli a5, a5, 8
-; SLOW-NEXT:    or a4, a5, a4
-; SLOW-NEXT:    slli a3, a3, 16
+; SLOW-NEXT:    or a2, a5, a2
+; SLOW-NEXT:    slli a4, a4, 16
 ; SLOW-NEXT:    slli a0, a0, 24
-; SLOW-NEXT:    or a3, a0, a3
-; SLOW-NEXT:    or a0, a2, a1
-; SLOW-NEXT:    or a1, a3, a4
+; SLOW-NEXT:    or a4, a0, a4
+; SLOW-NEXT:    or a0, a3, a1
+; SLOW-NEXT:    or a1, a4, a2
 ; SLOW-NEXT:    ret
 ;
 ; FAST-LABEL: load_unaligned:



More information about the llvm-commits mailing list