[llvm] [AMDGPU] Optimize S_OR_B32 to S_ADDK_I32 where possible (PR #177949)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 4 10:54:31 PST 2026
https://github.com/Iasonaskrpr updated https://github.com/llvm/llvm-project/pull/177949
>From 496cc100ff512dda3b772de414fd043adb80281e Mon Sep 17 00:00:00 2001
From: Iasonaskrpr <iaskarapro at gmail.com>
Date: Sun, 25 Jan 2026 14:42:41 +0200
Subject: [PATCH 1/6] AMDGPU added conversion from disjoint s_or_b32 to
s_addk_i32
---
.../Target/AMDGPU/SIShrinkInstructions.cpp | 34 ++++++++++++++-----
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 5b32bd0b72a59..337b4cc4f4168 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -911,7 +911,8 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
// Try to use S_ADDK_I32 and S_MULK_I32.
if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
- MI.getOpcode() == AMDGPU::S_MUL_I32) {
+ MI.getOpcode() == AMDGPU::S_MUL_I32 ||
+ MI.getOpcode() == AMDGPU::S_OR_B32) {
const MachineOperand *Dest = &MI.getOperand(0);
MachineOperand *Src0 = &MI.getOperand(1);
MachineOperand *Src1 = &MI.getOperand(2);
@@ -931,16 +932,31 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
MRI->setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
continue;
}
-
+ unsigned Opc;
if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
if (Src1->isImm() && isKImmOperand(*Src1)) {
- unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
- AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
-
- Src1->setImm(SignExtend64(Src1->getImm(), 32));
- MI.setDesc(TII->get(Opc));
- MI.tieOperands(0, 1);
- Changed = true;
+ unsigned NewOpc = 0;
+ int64_t ImmVal = Src1->getImm();
+
+ if (MI.getOpcode() == AMDGPU::S_OR_B32) {
+ uint32_t Imm = static_cast<uint32_t>(ImmVal);
+ if (MI.getFlag(MachineInstr::MIFlag::Disjoint)) {
+ bool IsBitSetCandidate = isPowerOf2_32(Imm) && MI.findRegisterDefOperand(AMDGPU::SCC, nullptr)->isDead() && (llvm::countr_zero(Imm) != 0);
+ if (!IsBitSetCandidate) {
+ NewOpc = AMDGPU::S_ADDK_I32;
+ }
+ }
+ }
+ else {
+ NewOpc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
+ }
+
+ if (NewOpc) {
+ Src1->setImm(SignExtend64(ImmVal, 32));
+ MI.setDesc(TII->get(NewOpc));
+ MI.tieOperands(0, 1);
+ Changed = true;
+ }
}
}
}
>From aff4bba0477b562bf1b673b213d2e1a71ace446b Mon Sep 17 00:00:00 2001
From: Iasonaskrpr <iaskarapro at gmail.com>
Date: Sun, 25 Jan 2026 14:43:40 +0200
Subject: [PATCH 2/6] Formatted changes
---
.../Target/AMDGPU/SIShrinkInstructions.cpp | 30 +++++++++++--------
1 file changed, 17 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 337b4cc4f4168..0b82f167a3eb0 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -939,23 +939,27 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
int64_t ImmVal = Src1->getImm();
if (MI.getOpcode() == AMDGPU::S_OR_B32) {
- uint32_t Imm = static_cast<uint32_t>(ImmVal);
- if (MI.getFlag(MachineInstr::MIFlag::Disjoint)) {
- bool IsBitSetCandidate = isPowerOf2_32(Imm) && MI.findRegisterDefOperand(AMDGPU::SCC, nullptr)->isDead() && (llvm::countr_zero(Imm) != 0);
- if (!IsBitSetCandidate) {
- NewOpc = AMDGPU::S_ADDK_I32;
- }
+ uint32_t Imm = static_cast<uint32_t>(ImmVal);
+ if (MI.getFlag(MachineInstr::MIFlag::Disjoint)) {
+ bool IsBitSetCandidate =
+ isPowerOf2_32(Imm) &&
+ MI.findRegisterDefOperand(AMDGPU::SCC, nullptr)->isDead() &&
+ (llvm::countr_zero(Imm) != 0);
+ if (!IsBitSetCandidate) {
+ NewOpc = AMDGPU::S_ADDK_I32;
}
- }
- else {
- NewOpc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
+ }
+ } else {
+ NewOpc = (MI.getOpcode() == AMDGPU::S_ADD_I32)
+ ? AMDGPU::S_ADDK_I32
+ : AMDGPU::S_MULK_I32;
}
if (NewOpc) {
- Src1->setImm(SignExtend64(ImmVal, 32));
- MI.setDesc(TII->get(NewOpc));
- MI.tieOperands(0, 1);
- Changed = true;
+ Src1->setImm(SignExtend64(ImmVal, 32));
+ MI.setDesc(TII->get(NewOpc));
+ MI.tieOperands(0, 1);
+ Changed = true;
}
}
}
>From 30eb40c442e2cca12e89b3d9110ac18d1ffada33 Mon Sep 17 00:00:00 2001
From: Iasonaskrpr <iaskarapro at gmail.com>
Date: Mon, 26 Jan 2026 13:33:22 +0200
Subject: [PATCH 3/6] Removed unused variable
---
.../lib/Target/AMDGPU/SIShrinkInstructions.cpp | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 0b82f167a3eb0..b1b2de5cdd178 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -932,32 +932,30 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
MRI->setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
continue;
}
- unsigned Opc;
if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
if (Src1->isImm() && isKImmOperand(*Src1)) {
- unsigned NewOpc = 0;
- int64_t ImmVal = Src1->getImm();
+ unsigned Opc = 0;
if (MI.getOpcode() == AMDGPU::S_OR_B32) {
- uint32_t Imm = static_cast<uint32_t>(ImmVal);
+ uint32_t Imm = static_cast<uint32_t>(Src1->getImm());
if (MI.getFlag(MachineInstr::MIFlag::Disjoint)) {
bool IsBitSetCandidate =
isPowerOf2_32(Imm) &&
- MI.findRegisterDefOperand(AMDGPU::SCC, nullptr)->isDead() &&
+ MI.registerDefIsDead(AMDGPU::SCC, /*TRI=*/nullptr) &&
(llvm::countr_zero(Imm) != 0);
if (!IsBitSetCandidate) {
- NewOpc = AMDGPU::S_ADDK_I32;
+ Opc = AMDGPU::S_ADDK_I32;
}
}
} else {
- NewOpc = (MI.getOpcode() == AMDGPU::S_ADD_I32)
+ Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32)
? AMDGPU::S_ADDK_I32
: AMDGPU::S_MULK_I32;
}
- if (NewOpc) {
- Src1->setImm(SignExtend64(ImmVal, 32));
- MI.setDesc(TII->get(NewOpc));
+ if (Opc != 0) {
+ Src1->setImm(SignExtend64(Src1->getImm(), 32));
+ MI.setDesc(TII->get(Opc));
MI.tieOperands(0, 1);
Changed = true;
}
>From 919b11d4324aa793ac90fe7f5c227762415c0b13 Mon Sep 17 00:00:00 2001
From: Iasonaskrpr <iaskarapro at gmail.com>
Date: Mon, 26 Jan 2026 14:45:25 +0200
Subject: [PATCH 4/6] Updated tests to work with transformation
---
llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll | 8 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll | 6 +-
.../GlobalISel/divergence-structurizer.ll | 60 +++---
.../test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll | 60 +++---
.../AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll | 6 +-
.../AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll | 10 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll | 178 +++++++++---------
.../CodeGen/AMDGPU/combine-reg-or-const.ll | 2 +-
llvm/test/CodeGen/AMDGPU/s-barrier.ll | 2 +-
9 files changed, 166 insertions(+), 166 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
index 5c60eb696f6b2..f7f86f01d2665 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
@@ -207,8 +207,8 @@ define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX7-NEXT: s_and_b32 s4, s4, 0xffff
; GFX7-NEXT: s_and_b32 s5, s16, 0xffff
; GFX7-NEXT: s_lshl_b32 s4, s4, 16
-; GFX7-NEXT: s_or_b32 s4, s5, s4
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: s_or_b32 s5, s5, s4
+; GFX7-NEXT: v_mov_b32_e32 v0, s5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_add_v2i16:
@@ -232,8 +232,8 @@ define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
; GFX8-NEXT: s_and_b32 s5, 0xffff, s16
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
-; GFX8-NEXT: s_or_b32 s4, s5, s4
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: s_or_b32 s5, s5, s4
+; GFX8-NEXT: v_mov_b32_e32 v0, s5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: s_add_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll
index 57755c6856858..856cb29e1ce4a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -o - %s | FileCheck -check-prefix=GFX7 %s
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
@@ -452,9 +452,9 @@ define amdgpu_ps i32 @s_bswap_v2i16(<2 x i16> inreg %src) {
; GFX7-NEXT: s_lshl_b32 s2, s0, 8
; GFX7-NEXT: s_bfe_u32 s3, s0, 0x80008
; GFX7-NEXT: v_alignbit_b32 v0, s1, v0, 24
-; GFX7-NEXT: s_or_b32 s2, s3, s2
+; GFX7-NEXT: s_or_b32 s3, s3, s2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX7-NEXT: s_and_b32 s0, 0xffff, s2
+; GFX7-NEXT: s_and_b32 s0, 0xffff, s3
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-NEXT: v_or_b32_e32 v0, s0, v0
; GFX7-NEXT: v_readfirstlane_b32 s0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index 5c57d355959ef..5e0b6f35358cc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -526,65 +526,65 @@ define amdgpu_ps i32 @irreducible_cfg(i32 %x, i32 %y, i32 %a0, i32 %a1, i32 %a2,
; GFX10-NEXT: s_mov_b32 s0, exec_lo
; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: s_and_b32 s2, s0, 1
-; GFX10-NEXT: s_xor_b32 s0, vcc_lo, s0
+; GFX10-NEXT: s_xor_b32 s5, vcc_lo, s0
; GFX10-NEXT: s_cmp_lg_u32 s2, 0
; GFX10-NEXT: ; implicit-def: $sgpr2
; GFX10-NEXT: s_cselect_b32 s3, exec_lo, 0
; GFX10-NEXT: s_branch .LBB6_2
; GFX10-NEXT: .LBB6_1: ; %Flow2
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6
-; GFX10-NEXT: s_and_b32 s4, exec_lo, s5
-; GFX10-NEXT: s_mov_b32 s0, exec_lo
-; GFX10-NEXT: s_or_b32 s1, s4, s1
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_and_b32 s0, exec_lo, s4
+; GFX10-NEXT: s_mov_b32 s5, exec_lo
+; GFX10-NEXT: s_or_b32 s1, s0, s1
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
; GFX10-NEXT: s_cbranch_execz .LBB6_8
; GFX10-NEXT: .LBB6_2: ; %irr.guard
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB6_6 Depth 2
-; GFX10-NEXT: s_mov_b32 s4, exec_lo
-; GFX10-NEXT: s_and_saveexec_b32 s5, s0
-; GFX10-NEXT: s_xor_b32 s5, exec_lo, s5
+; GFX10-NEXT: s_mov_b32 s0, exec_lo
+; GFX10-NEXT: s_and_saveexec_b32 s4, s5
+; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4
; GFX10-NEXT: ; %bb.3: ; %.loopexit
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: v_cmp_gt_i32_e64 s0, v5, v0
+; GFX10-NEXT: v_cmp_gt_i32_e64 s7, v5, v0
+; GFX10-NEXT: s_mov_b32 s5, exec_lo
; GFX10-NEXT: s_mov_b32 s6, exec_lo
-; GFX10-NEXT: s_mov_b32 s7, exec_lo
-; GFX10-NEXT: s_xor_b32 s6, vcc_lo, s6
+; GFX10-NEXT: s_xor_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 s3, s3, exec_lo
-; GFX10-NEXT: s_or_b32 s6, s0, s6
-; GFX10-NEXT: s_and_b32 s0, exec_lo, s0
-; GFX10-NEXT: s_xor_b32 s6, s6, s7
-; GFX10-NEXT: s_andn2_b32 s4, s4, exec_lo
-; GFX10-NEXT: s_and_b32 s6, exec_lo, s6
-; GFX10-NEXT: s_or_b32 s3, s3, s0
-; GFX10-NEXT: s_or_b32 s4, s4, s6
+; GFX10-NEXT: s_or_b32 s5, s7, s5
+; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
+; GFX10-NEXT: s_xor_b32 s5, s5, s6
+; GFX10-NEXT: s_and_b32 s6, exec_lo, s7
+; GFX10-NEXT: s_and_b32 s5, exec_lo, s5
+; GFX10-NEXT: s_or_b32 s3, s3, s6
+; GFX10-NEXT: s_or_b32 s0, s0, s5
; GFX10-NEXT: ; %bb.4: ; %Flow1
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: s_andn2_b32 s0, s2, exec_lo
-; GFX10-NEXT: s_and_b32 s2, exec_lo, s3
-; GFX10-NEXT: s_mov_b32 s5, exec_lo
-; GFX10-NEXT: s_or_b32 s2, s0, s2
-; GFX10-NEXT: s_and_saveexec_b32 s6, s4
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_andn2_b32 s2, s2, exec_lo
+; GFX10-NEXT: s_and_b32 s5, exec_lo, s3
+; GFX10-NEXT: s_mov_b32 s4, exec_lo
+; GFX10-NEXT: s_or_b32 s2, s2, s5
+; GFX10-NEXT: s_and_saveexec_b32 s5, s0
; GFX10-NEXT: s_cbranch_execz .LBB6_1
; GFX10-NEXT: ; %bb.5: ; %.preheader
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
; GFX10-NEXT: v_cmp_le_i32_e64 s0, v4, v0
-; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: .LBB6_6: ; %.inner_loop
; GFX10-NEXT: ; Parent Loop BB6_2 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: s_and_b32 s7, exec_lo, s0
-; GFX10-NEXT: s_or_b32 s4, s7, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_or_b32 s6, s7, s6
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s6
; GFX10-NEXT: s_cbranch_execnz .LBB6_6
; GFX10-NEXT: ; %bb.7: ; %Flow
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: s_andn2_b32 s0, s5, exec_lo
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX10-NEXT: s_andn2_b32 s0, s4, exec_lo
; GFX10-NEXT: s_and_b32 s4, exec_lo, 0
-; GFX10-NEXT: s_or_b32 s5, s0, s4
+; GFX10-NEXT: s_or_b32 s4, s0, s4
; GFX10-NEXT: s_branch .LBB6_1
; GFX10-NEXT: .LBB6_8: ; %.exit
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll
index fa72eb72fd723..1f5cd4ab1c6d0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll
@@ -122,7 +122,7 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_and_b32 s4, s1, 0x1ff
; GFX11-NEXT: s_addk_i32 s2, 0xfc10
; GFX11-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX11-NEXT: s_or_b32 s0, s4, s0
+; GFX11-NEXT: s_or_b32 s4, s4, s0
; GFX11-NEXT: s_cselect_b32 s0, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_or_b32 s0, s3, s0
@@ -140,9 +140,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_cmp_lg_u32 s4, s5
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s4, s6, s4
+; GFX11-NEXT: s_or_b32 s6, s6, s4
; GFX11-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-NEXT: s_cselect_b32 s0, s4, s0
+; GFX11-NEXT: s_cselect_b32 s0, s6, s0
; GFX11-NEXT: s_and_b32 s4, s0, 7
; GFX11-NEXT: s_lshr_b32 s0, s0, 2
; GFX11-NEXT: s_cmp_eq_u32 s4, 3
@@ -150,8 +150,8 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_cmp_gt_i32 s4, 5
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-NEXT: s_or_b32 s5, s5, s4
+; GFX11-NEXT: s_cmp_lg_u32 s5, 0
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_add_i32 s0, s0, s4
@@ -162,9 +162,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_lshr_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_and_b32 s1, s1, 0x8000
-; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_or_b32 s1, s1, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: v_mov_b32_e32 v0, s1
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: fptrunc_f64_to_f16_uniform:
@@ -174,7 +174,7 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_and_b32 s4, s1, 0x1ff
; GFX12-NEXT: s_addk_co_i32 s2, 0xfc10
; GFX12-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX12-NEXT: s_or_b32 s0, s4, s0
+; GFX12-NEXT: s_or_b32 s4, s4, s0
; GFX12-NEXT: s_cselect_b32 s0, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_or_b32 s0, s3, s0
@@ -192,9 +192,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_cmp_lg_u32 s4, s5
; GFX12-NEXT: s_cselect_b32 s4, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: s_or_b32 s4, s6, s4
+; GFX12-NEXT: s_or_b32 s6, s6, s4
; GFX12-NEXT: s_cmp_lt_i32 s2, 1
-; GFX12-NEXT: s_cselect_b32 s0, s4, s0
+; GFX12-NEXT: s_cselect_b32 s0, s6, s0
; GFX12-NEXT: s_and_b32 s4, s0, 7
; GFX12-NEXT: s_lshr_b32 s0, s0, 2
; GFX12-NEXT: s_cmp_eq_u32 s4, 3
@@ -202,8 +202,8 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_cmp_gt_i32 s4, 5
; GFX12-NEXT: s_cselect_b32 s4, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: s_or_b32 s4, s5, s4
-; GFX12-NEXT: s_cmp_lg_u32 s4, 0
+; GFX12-NEXT: s_or_b32 s5, s5, s4
+; GFX12-NEXT: s_cmp_lg_u32 s5, 0
; GFX12-NEXT: s_cselect_b32 s4, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_add_co_i32 s0, s0, s4
@@ -214,9 +214,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_lshr_b32 s1, s1, 16
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX12-NEXT: s_and_b32 s1, s1, 0x8000
-; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_or_b32 s1, s1, s0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: v_mov_b32_e32 v0, s1
; GFX12-NEXT: ; return to shader part epilog
;
; GFX1250-LABEL: fptrunc_f64_to_f16_uniform:
@@ -227,7 +227,7 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_and_b32 s4, s1, 0x1ff
; GFX1250-NEXT: s_addk_co_i32 s2, 0xfc10
; GFX1250-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX1250-NEXT: s_or_b32 s0, s4, s0
+; GFX1250-NEXT: s_or_b32 s4, s4, s0
; GFX1250-NEXT: s_cselect_b32 s0, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_or_b32 s0, s3, s0
@@ -245,9 +245,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_cmp_lg_u32 s4, s5
; GFX1250-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_or_b32 s4, s6, s4
+; GFX1250-NEXT: s_or_b32 s6, s6, s4
; GFX1250-NEXT: s_cmp_lt_i32 s2, 1
-; GFX1250-NEXT: s_cselect_b32 s0, s4, s0
+; GFX1250-NEXT: s_cselect_b32 s0, s6, s0
; GFX1250-NEXT: s_and_b32 s4, s0, 7
; GFX1250-NEXT: s_lshr_b32 s0, s0, 2
; GFX1250-NEXT: s_cmp_eq_u32 s4, 3
@@ -255,8 +255,8 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_cmp_gt_i32 s4, 5
; GFX1250-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_or_b32 s4, s5, s4
-; GFX1250-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-NEXT: s_or_b32 s5, s5, s4
+; GFX1250-NEXT: s_cmp_lg_u32 s5, 0
; GFX1250-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_add_co_i32 s0, s0, s4
@@ -267,9 +267,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_lshr_b32 s1, s1, 16
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_and_b32 s1, s1, 0x8000
-; GFX1250-NEXT: s_or_b32 s0, s1, s0
+; GFX1250-NEXT: s_or_b32 s1, s1, s0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v0, s1
; GFX1250-NEXT: ; return to shader part epilog
%result = fptrunc double %a to half
ret half %result
@@ -306,9 +306,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_div(double %a) {
; GFX11-NEXT: v_and_b32_e32 v4, 7, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 2, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v4
-; GFX11-NEXT: v_cmp_lt_i32_e64 s0, 5, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v4
+; GFX11-NEXT: v_cmp_lt_i32_e32 vcc_lo, 5, v4
+; GFX11-NEXT: s_or_b32 s0, s0, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -356,9 +356,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_div(double %a) {
; GFX12-NEXT: v_and_b32_e32 v4, 7, v3
; GFX12-NEXT: v_lshrrev_b32_e32 v3, 2, v3
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v4
-; GFX12-NEXT: v_cmp_lt_i32_e64 s0, 5, v4
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-NEXT: v_cmp_eq_u32_e64 s0, 3, v4
+; GFX12-NEXT: v_cmp_lt_i32_e32 vcc_lo, 5, v4
+; GFX12-NEXT: s_or_b32 s0, s0, vcc_lo
; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX12-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX12-NEXT: s_wait_alu depctr_va_vcc(0)
@@ -406,9 +406,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_div(double %a) {
; GFX1250-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc_lo
; GFX1250-NEXT: v_dual_lshrrev_b32 v3, 2, v3 :: v_dual_bitop2_b32 v4, 7, v3 bitop3:0x40
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v4
-; GFX1250-NEXT: v_cmp_lt_i32_e64 s0, 5, v4
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s0, 3, v4
+; GFX1250-NEXT: v_cmp_lt_i32_e32 vcc_lo, 5, v4
+; GFX1250-NEXT: s_or_b32 s0, s0, vcc_lo
; GFX1250-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX1250-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
index a75ac906cb1c0..20b7e5f093918 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
@@ -95,7 +95,7 @@ define amdgpu_kernel void @bfe_i32_arg_imm_arg(ptr addrspace(1) %out, i32 %src0,
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, 59, s3
+; GFX6-NEXT: s_or_b32 s3, s3, 59
; GFX6-NEXT: s_bfe_i32 s3, s2, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
@@ -114,8 +114,8 @@ define amdgpu_kernel void @bfe_i32_imm_arg_arg(ptr addrspace(1) %out, i32 %src1,
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_and_b32 s4, s2, 63
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, s4, s3
-; GFX6-NEXT: s_bfe_i32 s3, 0x7b, s3
+; GFX6-NEXT: s_or_b32 s4, s4, s3
+; GFX6-NEXT: s_bfe_i32 s3, 0x7b, s4
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll
index 91402df2100bc..d7c4c1dfb4180 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll
@@ -78,8 +78,8 @@ define amdgpu_kernel void @bfe_u32_arg_arg_arg(ptr addrspace(1) %out, i32 %src0,
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_and_b32 s4, s3, 63
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, s4, s3
-; GFX6-NEXT: s_bfe_u32 s3, s2, s3
+; GFX6-NEXT: s_or_b32 s4, s4, s3
+; GFX6-NEXT: s_bfe_u32 s3, s2, s4
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -114,7 +114,7 @@ define amdgpu_kernel void @bfe_u32_arg_imm_arg(ptr addrspace(1) %out, i32 %src0,
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, 59, s3
+; GFX6-NEXT: s_or_b32 s3, s3, 59
; GFX6-NEXT: s_bfe_u32 s3, s2, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
@@ -133,8 +133,8 @@ define amdgpu_kernel void @bfe_u32_imm_arg_arg(ptr addrspace(1) %out, i32 %src1,
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_and_b32 s4, s2, 63
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, s4, s3
-; GFX6-NEXT: s_bfe_u32 s3, 0x7b, s3
+; GFX6-NEXT: s_or_b32 s4, s4, s3
+; GFX6-NEXT: s_bfe_u32 s3, 0x7b, s4
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
index ccc46cc5df39e..c4a3fb1f052cb 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
@@ -1306,8 +1306,8 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_or_b32 s6, s6, s5
+; SI-NEXT: s_or_b32 s4, s4, s6
; SI-NEXT: s_and_b32 s5, s20, 0xff
; SI-NEXT: s_lshl_b32 s6, s21, 8
; SI-NEXT: s_or_b32 s5, s5, s6
@@ -1315,8 +1315,8 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_or_b32 s5, s5, s6
+; SI-NEXT: s_or_b32 s7, s7, s6
+; SI-NEXT: s_or_b32 s5, s5, s7
; SI-NEXT: s_and_b32 s6, s24, 0xff
; SI-NEXT: s_lshl_b32 s7, s25, 8
; SI-NEXT: s_or_b32 s6, s6, s7
@@ -1324,8 +1324,8 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_lshl_b32 s8, s27, 24
; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s7, s8, s7
-; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_or_b32 s8, s8, s7
+; SI-NEXT: s_or_b32 s6, s6, s8
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -1419,9 +1419,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: s_or_b32 s6, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_lshl_b32 s5, s6, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -1431,9 +1431,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: s_or_b32 s7, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s6, 16
+; VI-NEXT: s_lshl_b32 s6, s7, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -1443,9 +1443,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s7, s8, s7
+; VI-NEXT: s_or_b32 s8, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
+; VI-NEXT: s_lshl_b32 s7, s8, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -1554,26 +1554,26 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s8
+; GFX11-NEXT: s_or_b32 s6, s6, s8
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s6, 16
; GFX11-NEXT: s_and_b32 s8, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s8, s9
+; GFX11-NEXT: s_lshl_b32 s6, s8, 16
; GFX11-NEXT: s_and_b32 s8, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s10, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s9, s10, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s8, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
@@ -3275,10 +3275,10 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s18, 0xffff
; SI-NEXT: s_lshl_b32 s4, s4, 16
-; SI-NEXT: s_or_b32 s4, s7, s4
+; SI-NEXT: s_or_b32 s7, s7, s4
; SI-NEXT: v_mov_b32_e32 v0, s5
; SI-NEXT: v_mov_b32_e32 v1, s6
-; SI-NEXT: v_mov_b32_e32 v2, s4
+; SI-NEXT: v_mov_b32_e32 v2, s7
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB17_4:
; SI-NEXT: ; implicit-def: $sgpr6
@@ -4589,8 +4589,8 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_or_b32 s6, s6, s5
+; SI-NEXT: s_or_b32 s4, s4, s6
; SI-NEXT: s_and_b32 s5, s20, 0xff
; SI-NEXT: s_lshl_b32 s6, s21, 8
; SI-NEXT: s_or_b32 s5, s5, s6
@@ -4598,8 +4598,8 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_or_b32 s5, s5, s6
+; SI-NEXT: s_or_b32 s7, s7, s6
+; SI-NEXT: s_or_b32 s5, s5, s7
; SI-NEXT: s_and_b32 s6, s24, 0xff
; SI-NEXT: s_lshl_b32 s7, s25, 8
; SI-NEXT: s_or_b32 s6, s6, s7
@@ -4607,8 +4607,8 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_lshl_b32 s8, s27, 24
; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s7, s8, s7
-; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_or_b32 s8, s8, s7
+; SI-NEXT: s_or_b32 s6, s6, s8
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -4702,9 +4702,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: s_or_b32 s6, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_lshl_b32 s5, s6, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -4714,9 +4714,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: s_or_b32 s7, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s6, 16
+; VI-NEXT: s_lshl_b32 s6, s7, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -4726,9 +4726,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s7, s8, s7
+; VI-NEXT: s_or_b32 s8, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
+; VI-NEXT: s_lshl_b32 s7, s8, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -4837,26 +4837,26 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s8
+; GFX11-NEXT: s_or_b32 s6, s6, s8
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s6, 16
; GFX11-NEXT: s_and_b32 s8, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s8, s9
+; GFX11-NEXT: s_lshl_b32 s6, s8, 16
; GFX11-NEXT: s_and_b32 s8, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s10, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s9, s10, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s8, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
@@ -7520,9 +7520,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: s_or_b32 s6, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_lshl_b32 s5, s6, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -7532,9 +7532,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: s_or_b32 s7, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s6, 16
+; VI-NEXT: s_lshl_b32 s6, s7, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -7544,9 +7544,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s7, s8, s7
+; VI-NEXT: s_or_b32 s8, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
+; VI-NEXT: s_lshl_b32 s7, s8, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -7655,26 +7655,26 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s6, 16
; GFX11-NEXT: s_and_b32 s7, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s9
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
+; GFX11-NEXT: s_lshl_b32 s6, s7, 16
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s10, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s9, s10, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
@@ -9399,9 +9399,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: s_or_b32 s6, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_lshl_b32 s5, s6, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -9411,9 +9411,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: s_or_b32 s7, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s6, 16
+; VI-NEXT: s_lshl_b32 s6, s7, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -9423,9 +9423,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s7, s8, s7
+; VI-NEXT: s_or_b32 s8, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
+; VI-NEXT: s_lshl_b32 s7, s8, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -9534,26 +9534,26 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s6, 16
; GFX11-NEXT: s_and_b32 s7, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s9
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
+; GFX11-NEXT: s_lshl_b32 s6, s7, 16
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s10, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s9, s10, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
@@ -10833,9 +10833,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: s_or_b32 s6, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_lshl_b32 s5, s6, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -10845,9 +10845,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: s_or_b32 s7, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s6, 16
+; VI-NEXT: s_lshl_b32 s6, s7, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -10857,9 +10857,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s7, s8, s7
+; VI-NEXT: s_or_b32 s8, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
+; VI-NEXT: s_lshl_b32 s7, s8, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -10968,26 +10968,26 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s6, 16
; GFX11-NEXT: s_and_b32 s7, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s9
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
+; GFX11-NEXT: s_lshl_b32 s6, s7, 16
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s10, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s9, s10, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
diff --git a/llvm/test/CodeGen/AMDGPU/combine-reg-or-const.ll b/llvm/test/CodeGen/AMDGPU/combine-reg-or-const.ll
index c167834470e3b..83e62cbb9b6fb 100644
--- a/llvm/test/CodeGen/AMDGPU/combine-reg-or-const.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine-reg-or-const.ll
@@ -27,7 +27,7 @@ define protected amdgpu_kernel void @_Z11test_kernelPii(ptr addrspace(1) nocaptu
; CHECK-NEXT: s_addc_u32 s1, s3, s5
; CHECK-NEXT: s_bfe_u32 s2, s6, 0xd0003
; CHECK-NEXT: s_add_i32 s2, s2, s7
-; CHECK-NEXT: s_or_b32 s2, s2, 0xc0
+; CHECK-NEXT: s_addk_i32 s2, 0xc0
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: v_mov_b32_e32 v2, s2
diff --git a/llvm/test/CodeGen/AMDGPU/s-barrier.ll b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
index 35b86998c9cac..82885b07e569c 100644
--- a/llvm/test/CodeGen/AMDGPU/s-barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
@@ -88,7 +88,7 @@ define amdgpu_kernel void @kernel1(ptr addrspace(1) %out, ptr addrspace(3) %in)
; GFX12-SDAG-NEXT: s_lshr_b32 s2, s2, 4
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: s_and_b32 s2, s2, 63
-; GFX12-SDAG-NEXT: s_or_b32 s3, 0x90000, s2
+; GFX12-SDAG-NEXT: s_or_b32 s3, s2, 0x90000
; GFX12-SDAG-NEXT: s_cmp_eq_u32 0, 0
; GFX12-SDAG-NEXT: s_mov_b32 m0, s3
; GFX12-SDAG-NEXT: s_barrier_init m0
>From 389afe33386fef67125535f738b460e1f4d92fc1 Mon Sep 17 00:00:00 2001
From: Iasonaskrpr <iaskarapro at gmail.com>
Date: Mon, 26 Jan 2026 14:48:16 +0200
Subject: [PATCH 5/6] Correctly formatted file
---
llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index b1b2de5cdd178..32fc9c4d8c73d 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -948,9 +948,8 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
}
}
} else {
- Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32)
- ? AMDGPU::S_ADDK_I32
- : AMDGPU::S_MULK_I32;
+ Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? AMDGPU::S_ADDK_I32
+ : AMDGPU::S_MULK_I32;
}
if (Opc != 0) {
>From d5778a3f69a13c99e63acbc39023585e135a5c32 Mon Sep 17 00:00:00 2001
From: Iasonaskrpr <iaskarapro at gmail.com>
Date: Wed, 4 Feb 2026 20:54:03 +0200
Subject: [PATCH 6/6] Updated tests, and all of them pass, also now S_OR to
bitset conversion happens before checking for S_ADDK conversion
---
.../Target/AMDGPU/SIShrinkInstructions.cpp | 54 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll | 8 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll | 4 +-
.../GlobalISel/divergence-structurizer.ll | 60 +-
.../test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll | 60 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll | 66 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll | 50 +-
.../AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll | 6 +-
.../AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll | 10 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll | 584 +++++++++---------
.../CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll | 178 +++---
.../local-stack-alloc-block-sp-reference.ll | 2 +-
llvm/test/CodeGen/AMDGPU/min.ll | 34 +-
llvm/test/CodeGen/AMDGPU/s-barrier.ll | 2 +-
14 files changed, 548 insertions(+), 570 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 32fc9c4d8c73d..14ed778f44f3a 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -909,10 +909,21 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
}
}
+ // Shrink scalar logic operations.
+ if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
+ MI.getOpcode() == AMDGPU::S_OR_B32 ||
+ MI.getOpcode() == AMDGPU::S_XOR_B32) {
+ ChangeKind CK = shrinkScalarLogicOp(MI);
+ if (CK == ChangeKind::UpdateHint)
+ continue;
+ Changed |= (CK == ChangeKind::UpdateInst);
+ }
+
// Try to use S_ADDK_I32 and S_MULK_I32.
if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
MI.getOpcode() == AMDGPU::S_MUL_I32 ||
- MI.getOpcode() == AMDGPU::S_OR_B32) {
+ (MI.getOpcode() == AMDGPU::S_OR_B32 &&
+ MI.getFlag(MachineInstr::MIFlag::Disjoint))) {
const MachineOperand *Dest = &MI.getOperand(0);
MachineOperand *Src0 = &MI.getOperand(1);
MachineOperand *Src1 = &MI.getOperand(2);
@@ -934,30 +945,13 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
}
if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
if (Src1->isImm() && isKImmOperand(*Src1)) {
- unsigned Opc = 0;
-
- if (MI.getOpcode() == AMDGPU::S_OR_B32) {
- uint32_t Imm = static_cast<uint32_t>(Src1->getImm());
- if (MI.getFlag(MachineInstr::MIFlag::Disjoint)) {
- bool IsBitSetCandidate =
- isPowerOf2_32(Imm) &&
- MI.registerDefIsDead(AMDGPU::SCC, /*TRI=*/nullptr) &&
- (llvm::countr_zero(Imm) != 0);
- if (!IsBitSetCandidate) {
- Opc = AMDGPU::S_ADDK_I32;
- }
- }
- } else {
- Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? AMDGPU::S_ADDK_I32
- : AMDGPU::S_MULK_I32;
- }
-
- if (Opc != 0) {
- Src1->setImm(SignExtend64(Src1->getImm(), 32));
- MI.setDesc(TII->get(Opc));
- MI.tieOperands(0, 1);
- Changed = true;
- }
+ unsigned Opc = (MI.getOpcode() == AMDGPU::S_MUL_I32)
+ ? AMDGPU::S_MULK_I32
+ : AMDGPU::S_ADDK_I32;
+ Src1->setImm(SignExtend64(Src1->getImm(), 32));
+ MI.setDesc(TII->get(Opc));
+ MI.tieOperands(0, 1);
+ Changed = true;
}
}
}
@@ -991,16 +985,6 @@ bool SIShrinkInstructions::run(MachineFunction &MF) {
continue;
}
- // Shrink scalar logic operations.
- if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
- MI.getOpcode() == AMDGPU::S_OR_B32 ||
- MI.getOpcode() == AMDGPU::S_XOR_B32) {
- ChangeKind CK = shrinkScalarLogicOp(MI);
- if (CK == ChangeKind::UpdateHint)
- continue;
- Changed |= (CK == ChangeKind::UpdateInst);
- }
-
if (IsPostRA && TII->isMIMG(MI.getOpcode()) &&
ST->getGeneration() >= AMDGPUSubtarget::GFX10) {
Changed |= shrinkMIMG(MI);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
index f7f86f01d2665..5c60eb696f6b2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
@@ -207,8 +207,8 @@ define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX7-NEXT: s_and_b32 s4, s4, 0xffff
; GFX7-NEXT: s_and_b32 s5, s16, 0xffff
; GFX7-NEXT: s_lshl_b32 s4, s4, 16
-; GFX7-NEXT: s_or_b32 s5, s5, s4
-; GFX7-NEXT: v_mov_b32_e32 v0, s5
+; GFX7-NEXT: s_or_b32 s4, s5, s4
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_add_v2i16:
@@ -232,8 +232,8 @@ define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
; GFX8-NEXT: s_and_b32 s5, 0xffff, s16
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
-; GFX8-NEXT: s_or_b32 s5, s5, s4
-; GFX8-NEXT: v_mov_b32_e32 v0, s5
+; GFX8-NEXT: s_or_b32 s4, s5, s4
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: s_add_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll
index 856cb29e1ce4a..71d8d5f584d62 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/bswap.ll
@@ -452,9 +452,9 @@ define amdgpu_ps i32 @s_bswap_v2i16(<2 x i16> inreg %src) {
; GFX7-NEXT: s_lshl_b32 s2, s0, 8
; GFX7-NEXT: s_bfe_u32 s3, s0, 0x80008
; GFX7-NEXT: v_alignbit_b32 v0, s1, v0, 24
-; GFX7-NEXT: s_or_b32 s3, s3, s2
+; GFX7-NEXT: s_or_b32 s2, s3, s2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX7-NEXT: s_and_b32 s0, 0xffff, s3
+; GFX7-NEXT: s_and_b32 s0, 0xffff, s2
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-NEXT: v_or_b32_e32 v0, s0, v0
; GFX7-NEXT: v_readfirstlane_b32 s0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index 5e0b6f35358cc..5c57d355959ef 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -526,65 +526,65 @@ define amdgpu_ps i32 @irreducible_cfg(i32 %x, i32 %y, i32 %a0, i32 %a1, i32 %a2,
; GFX10-NEXT: s_mov_b32 s0, exec_lo
; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: s_and_b32 s2, s0, 1
-; GFX10-NEXT: s_xor_b32 s5, vcc_lo, s0
+; GFX10-NEXT: s_xor_b32 s0, vcc_lo, s0
; GFX10-NEXT: s_cmp_lg_u32 s2, 0
; GFX10-NEXT: ; implicit-def: $sgpr2
; GFX10-NEXT: s_cselect_b32 s3, exec_lo, 0
; GFX10-NEXT: s_branch .LBB6_2
; GFX10-NEXT: .LBB6_1: ; %Flow2
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: s_and_b32 s0, exec_lo, s4
-; GFX10-NEXT: s_mov_b32 s5, exec_lo
-; GFX10-NEXT: s_or_b32 s1, s0, s1
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX10-NEXT: s_and_b32 s4, exec_lo, s5
+; GFX10-NEXT: s_mov_b32 s0, exec_lo
+; GFX10-NEXT: s_or_b32 s1, s4, s1
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
; GFX10-NEXT: s_cbranch_execz .LBB6_8
; GFX10-NEXT: .LBB6_2: ; %irr.guard
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB6_6 Depth 2
-; GFX10-NEXT: s_mov_b32 s0, exec_lo
-; GFX10-NEXT: s_and_saveexec_b32 s4, s5
-; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4
+; GFX10-NEXT: s_mov_b32 s4, exec_lo
+; GFX10-NEXT: s_and_saveexec_b32 s5, s0
+; GFX10-NEXT: s_xor_b32 s5, exec_lo, s5
; GFX10-NEXT: ; %bb.3: ; %.loopexit
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: v_cmp_gt_i32_e64 s7, v5, v0
-; GFX10-NEXT: s_mov_b32 s5, exec_lo
+; GFX10-NEXT: v_cmp_gt_i32_e64 s0, v5, v0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
-; GFX10-NEXT: s_xor_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_mov_b32 s7, exec_lo
+; GFX10-NEXT: s_xor_b32 s6, vcc_lo, s6
; GFX10-NEXT: s_andn2_b32 s3, s3, exec_lo
-; GFX10-NEXT: s_or_b32 s5, s7, s5
-; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
-; GFX10-NEXT: s_xor_b32 s5, s5, s6
-; GFX10-NEXT: s_and_b32 s6, exec_lo, s7
-; GFX10-NEXT: s_and_b32 s5, exec_lo, s5
-; GFX10-NEXT: s_or_b32 s3, s3, s6
-; GFX10-NEXT: s_or_b32 s0, s0, s5
+; GFX10-NEXT: s_or_b32 s6, s0, s6
+; GFX10-NEXT: s_and_b32 s0, exec_lo, s0
+; GFX10-NEXT: s_xor_b32 s6, s6, s7
+; GFX10-NEXT: s_andn2_b32 s4, s4, exec_lo
+; GFX10-NEXT: s_and_b32 s6, exec_lo, s6
+; GFX10-NEXT: s_or_b32 s3, s3, s0
+; GFX10-NEXT: s_or_b32 s4, s4, s6
; GFX10-NEXT: ; %bb.4: ; %Flow1
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: s_andn2_b32 s2, s2, exec_lo
-; GFX10-NEXT: s_and_b32 s5, exec_lo, s3
-; GFX10-NEXT: s_mov_b32 s4, exec_lo
-; GFX10-NEXT: s_or_b32 s2, s2, s5
-; GFX10-NEXT: s_and_saveexec_b32 s5, s0
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 s0, s2, exec_lo
+; GFX10-NEXT: s_and_b32 s2, exec_lo, s3
+; GFX10-NEXT: s_mov_b32 s5, exec_lo
+; GFX10-NEXT: s_or_b32 s2, s0, s2
+; GFX10-NEXT: s_and_saveexec_b32 s6, s4
; GFX10-NEXT: s_cbranch_execz .LBB6_1
; GFX10-NEXT: ; %bb.5: ; %.preheader
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
; GFX10-NEXT: v_cmp_le_i32_e64 s0, v4, v0
-; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB6_6: ; %.inner_loop
; GFX10-NEXT: ; Parent Loop BB6_2 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: s_and_b32 s7, exec_lo, s0
-; GFX10-NEXT: s_or_b32 s6, s7, s6
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s6
+; GFX10-NEXT: s_or_b32 s4, s7, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB6_6
; GFX10-NEXT: ; %bb.7: ; %Flow
; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6
-; GFX10-NEXT: s_andn2_b32 s0, s4, exec_lo
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: s_andn2_b32 s0, s5, exec_lo
; GFX10-NEXT: s_and_b32 s4, exec_lo, 0
-; GFX10-NEXT: s_or_b32 s4, s0, s4
+; GFX10-NEXT: s_or_b32 s5, s0, s4
; GFX10-NEXT: s_branch .LBB6_1
; GFX10-NEXT: .LBB6_8: ; %.exit
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll
index 1f5cd4ab1c6d0..fa72eb72fd723 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fptrunc.ll
@@ -122,7 +122,7 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_and_b32 s4, s1, 0x1ff
; GFX11-NEXT: s_addk_i32 s2, 0xfc10
; GFX11-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX11-NEXT: s_or_b32 s4, s4, s0
+; GFX11-NEXT: s_or_b32 s0, s4, s0
; GFX11-NEXT: s_cselect_b32 s0, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_or_b32 s0, s3, s0
@@ -140,9 +140,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_cmp_lg_u32 s4, s5
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s6, s6, s4
+; GFX11-NEXT: s_or_b32 s4, s6, s4
; GFX11-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-NEXT: s_cselect_b32 s0, s6, s0
+; GFX11-NEXT: s_cselect_b32 s0, s4, s0
; GFX11-NEXT: s_and_b32 s4, s0, 7
; GFX11-NEXT: s_lshr_b32 s0, s0, 2
; GFX11-NEXT: s_cmp_eq_u32 s4, 3
@@ -150,8 +150,8 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_cmp_gt_i32 s4, 5
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s4
-; GFX11-NEXT: s_cmp_lg_u32 s5, 0
+; GFX11-NEXT: s_or_b32 s4, s5, s4
+; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_add_i32 s0, s0, s4
@@ -162,9 +162,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX11-NEXT: s_lshr_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_and_b32 s1, s1, 0x8000
-; GFX11-NEXT: s_or_b32 s1, s1, s0
+; GFX11-NEXT: s_or_b32 s0, s1, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: fptrunc_f64_to_f16_uniform:
@@ -174,7 +174,7 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_and_b32 s4, s1, 0x1ff
; GFX12-NEXT: s_addk_co_i32 s2, 0xfc10
; GFX12-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX12-NEXT: s_or_b32 s4, s4, s0
+; GFX12-NEXT: s_or_b32 s0, s4, s0
; GFX12-NEXT: s_cselect_b32 s0, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_or_b32 s0, s3, s0
@@ -192,9 +192,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_cmp_lg_u32 s4, s5
; GFX12-NEXT: s_cselect_b32 s4, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: s_or_b32 s6, s6, s4
+; GFX12-NEXT: s_or_b32 s4, s6, s4
; GFX12-NEXT: s_cmp_lt_i32 s2, 1
-; GFX12-NEXT: s_cselect_b32 s0, s6, s0
+; GFX12-NEXT: s_cselect_b32 s0, s4, s0
; GFX12-NEXT: s_and_b32 s4, s0, 7
; GFX12-NEXT: s_lshr_b32 s0, s0, 2
; GFX12-NEXT: s_cmp_eq_u32 s4, 3
@@ -202,8 +202,8 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_cmp_gt_i32 s4, 5
; GFX12-NEXT: s_cselect_b32 s4, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: s_or_b32 s5, s5, s4
-; GFX12-NEXT: s_cmp_lg_u32 s5, 0
+; GFX12-NEXT: s_or_b32 s4, s5, s4
+; GFX12-NEXT: s_cmp_lg_u32 s4, 0
; GFX12-NEXT: s_cselect_b32 s4, 1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_add_co_i32 s0, s0, s4
@@ -214,9 +214,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX12-NEXT: s_lshr_b32 s1, s1, 16
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX12-NEXT: s_and_b32 s1, s1, 0x8000
-; GFX12-NEXT: s_or_b32 s1, s1, s0
+; GFX12-NEXT: s_or_b32 s0, s1, s0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_mov_b32_e32 v0, s1
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: ; return to shader part epilog
;
; GFX1250-LABEL: fptrunc_f64_to_f16_uniform:
@@ -227,7 +227,7 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_and_b32 s4, s1, 0x1ff
; GFX1250-NEXT: s_addk_co_i32 s2, 0xfc10
; GFX1250-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX1250-NEXT: s_or_b32 s4, s4, s0
+; GFX1250-NEXT: s_or_b32 s0, s4, s0
; GFX1250-NEXT: s_cselect_b32 s0, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_or_b32 s0, s3, s0
@@ -245,9 +245,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_cmp_lg_u32 s4, s5
; GFX1250-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_or_b32 s6, s6, s4
+; GFX1250-NEXT: s_or_b32 s4, s6, s4
; GFX1250-NEXT: s_cmp_lt_i32 s2, 1
-; GFX1250-NEXT: s_cselect_b32 s0, s6, s0
+; GFX1250-NEXT: s_cselect_b32 s0, s4, s0
; GFX1250-NEXT: s_and_b32 s4, s0, 7
; GFX1250-NEXT: s_lshr_b32 s0, s0, 2
; GFX1250-NEXT: s_cmp_eq_u32 s4, 3
@@ -255,8 +255,8 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_cmp_gt_i32 s4, 5
; GFX1250-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_or_b32 s5, s5, s4
-; GFX1250-NEXT: s_cmp_lg_u32 s5, 0
+; GFX1250-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-NEXT: s_cmp_lg_u32 s4, 0
; GFX1250-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_add_co_i32 s0, s0, s4
@@ -267,9 +267,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_uniform(double inreg %a) {
; GFX1250-NEXT: s_lshr_b32 s1, s1, 16
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_and_b32 s1, s1, 0x8000
-; GFX1250-NEXT: s_or_b32 s1, s1, s0
+; GFX1250-NEXT: s_or_b32 s0, s1, s0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_mov_b32_e32 v0, s1
+; GFX1250-NEXT: v_mov_b32_e32 v0, s0
; GFX1250-NEXT: ; return to shader part epilog
%result = fptrunc double %a to half
ret half %result
@@ -306,9 +306,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_div(double %a) {
; GFX11-NEXT: v_and_b32_e32 v4, 7, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 2, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v4
-; GFX11-NEXT: v_cmp_lt_i32_e32 vcc_lo, 5, v4
-; GFX11-NEXT: s_or_b32 s0, s0, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v4
+; GFX11-NEXT: v_cmp_lt_i32_e64 s0, 5, v4
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -356,9 +356,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_div(double %a) {
; GFX12-NEXT: v_and_b32_e32 v4, 7, v3
; GFX12-NEXT: v_lshrrev_b32_e32 v3, 2, v3
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_cmp_eq_u32_e64 s0, 3, v4
-; GFX12-NEXT: v_cmp_lt_i32_e32 vcc_lo, 5, v4
-; GFX12-NEXT: s_or_b32 s0, s0, vcc_lo
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v4
+; GFX12-NEXT: v_cmp_lt_i32_e64 s0, 5, v4
+; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX12-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX12-NEXT: s_wait_alu depctr_va_vcc(0)
@@ -406,9 +406,9 @@ define amdgpu_ps half @fptrunc_f64_to_f16_div(double %a) {
; GFX1250-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc_lo
; GFX1250-NEXT: v_dual_lshrrev_b32 v3, 2, v3 :: v_dual_bitop2_b32 v4, 7, v3 bitop3:0x40
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_cmp_eq_u32_e64 s0, 3, v4
-; GFX1250-NEXT: v_cmp_lt_i32_e32 vcc_lo, 5, v4
-; GFX1250-NEXT: s_or_b32 s0, s0, vcc_lo
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v4
+; GFX1250-NEXT: v_cmp_lt_i32_e64 s0, 5, v4
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX1250-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX1250-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index 6ad73601859d1..4fefef5e66155 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -664,8 +664,8 @@ define amdgpu_ps i16 @s_fshl_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg, i16 in
; GFX6-NEXT: s_lshr_b32 s1, s1, 1
; GFX6-NEXT: s_lshl_b32 s2, s3, s2
; GFX6-NEXT: s_lshr_b32 s1, s1, s4
-; GFX6-NEXT: s_or_b32 s1, s2, s1
-; GFX6-NEXT: s_and_b32 s1, s1, 0xff
+; GFX6-NEXT: s_or_b32 s2, s2, s1
+; GFX6-NEXT: s_and_b32 s1, s2, 0xff
; GFX6-NEXT: s_and_b32 s0, s0, 0xff
; GFX6-NEXT: s_lshl_b32 s1, s1, 8
; GFX6-NEXT: s_or_b32 s0, s0, s1
@@ -954,17 +954,17 @@ define amdgpu_ps i32 @s_fshl_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg, i32 in
; GFX6-NEXT: s_and_b32 s4, s8, 7
; GFX6-NEXT: s_andn2_b32 s6, 7, s8
; GFX6-NEXT: s_lshr_b32 s1, s1, 25
-; GFX6-NEXT: s_and_b32 s2, s2, 0xff
; GFX6-NEXT: s_lshl_b32 s4, s5, s4
; GFX6-NEXT: s_lshr_b32 s1, s1, s6
+; GFX6-NEXT: s_or_b32 s4, s4, s1
+; GFX6-NEXT: s_and_b32 s1, s2, 0xff
; GFX6-NEXT: s_and_b32 s0, s0, 0xff
-; GFX6-NEXT: s_lshl_b32 s2, s2, 8
-; GFX6-NEXT: s_or_b32 s1, s4, s1
-; GFX6-NEXT: s_or_b32 s0, s0, s2
-; GFX6-NEXT: s_and_b32 s2, s3, 0xff
-; GFX6-NEXT: s_lshl_b32 s2, s2, 16
-; GFX6-NEXT: s_and_b32 s1, s1, 0xff
-; GFX6-NEXT: s_or_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s3, 0xff
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s4, 0xff
; GFX6-NEXT: s_lshl_b32 s1, s1, 24
; GFX6-NEXT: s_or_b32 s0, s0, s1
; GFX6-NEXT: ; return to shader part epilog
@@ -3689,8 +3689,8 @@ define amdgpu_ps i32 @s_fshl_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
; GFX8-NEXT: s_lshr_b32 s1, s1, 17
; GFX8-NEXT: s_lshl_b32 s2, s3, s2
; GFX8-NEXT: s_lshr_b32 s1, s1, s4
-; GFX8-NEXT: s_or_b32 s1, s2, s1
-; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
+; GFX8-NEXT: s_or_b32 s2, s2, s1
+; GFX8-NEXT: s_and_b32 s1, 0xffff, s2
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
; GFX8-NEXT: s_or_b32 s0, s0, s1
@@ -4268,15 +4268,15 @@ define amdgpu_ps i48 @s_fshl_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs, <
; GFX8-NEXT: s_lshr_b32 s2, s2, 17
; GFX8-NEXT: s_lshl_b32 s4, s6, s4
; GFX8-NEXT: s_lshr_b32 s2, s2, s7
-; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
-; GFX8-NEXT: s_or_b32 s2, s4, s2
-; GFX8-NEXT: s_and_b32 s4, s5, 15
+; GFX8-NEXT: s_or_b32 s4, s4, s2
+; GFX8-NEXT: s_and_b32 s2, s5, 15
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s2, 0xffff, s3
; GFX8-NEXT: s_andn2_b32 s5, 15, s5
-; GFX8-NEXT: s_lshr_b32 s3, s3, 1
-; GFX8-NEXT: s_lshl_b32 s1, s1, s4
-; GFX8-NEXT: s_lshr_b32 s3, s3, s5
-; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT: s_or_b32 s1, s1, s3
+; GFX8-NEXT: s_lshr_b32 s2, s2, 1
+; GFX8-NEXT: s_lshr_b32 s2, s2, s5
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s2, 0xffff, s4
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
; GFX8-NEXT: s_or_b32 s0, s0, s2
@@ -4614,29 +4614,29 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
; GFX8-NEXT: s_lshr_b32 s2, s2, 17
; GFX8-NEXT: s_lshl_b32 s4, s6, s4
; GFX8-NEXT: s_lshr_b32 s2, s2, s8
-; GFX8-NEXT: s_or_b32 s2, s4, s2
-; GFX8-NEXT: s_and_b32 s4, s5, 15
+; GFX8-NEXT: s_or_b32 s4, s4, s2
+; GFX8-NEXT: s_and_b32 s2, s5, 15
; GFX8-NEXT: s_lshr_b32 s7, s1, 16
-; GFX8-NEXT: s_lshl_b32 s1, s1, s4
-; GFX8-NEXT: s_and_b32 s4, 0xffff, s3
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s2, 0xffff, s3
; GFX8-NEXT: s_lshr_b32 s9, s5, 16
; GFX8-NEXT: s_andn2_b32 s5, 15, s5
-; GFX8-NEXT: s_lshr_b32 s4, s4, 1
-; GFX8-NEXT: s_lshr_b32 s4, s4, s5
-; GFX8-NEXT: s_or_b32 s1, s1, s4
-; GFX8-NEXT: s_and_b32 s4, s9, 15
+; GFX8-NEXT: s_lshr_b32 s2, s2, 1
+; GFX8-NEXT: s_lshr_b32 s2, s2, s5
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s2, s9, 15
; GFX8-NEXT: s_andn2_b32 s5, 15, s9
; GFX8-NEXT: s_lshr_b32 s3, s3, 17
-; GFX8-NEXT: s_lshl_b32 s4, s7, s4
+; GFX8-NEXT: s_lshl_b32 s2, s7, s2
; GFX8-NEXT: s_lshr_b32 s3, s3, s5
+; GFX8-NEXT: s_or_b32 s2, s2, s3
+; GFX8-NEXT: s_and_b32 s3, 0xffff, s4
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT: s_or_b32 s3, s4, s3
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
-; GFX8-NEXT: s_lshl_b32 s2, s2, 16
-; GFX8-NEXT: s_or_b32 s0, s0, s2
-; GFX8-NEXT: s_and_b32 s2, 0xffff, s3
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s3
; GFX8-NEXT: s_or_b32 s1, s1, s2
; GFX8-NEXT: ; return to shader part epilog
;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index 5afab53628c34..bc6a2e7c43256 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -665,8 +665,8 @@ define amdgpu_ps i16 @s_fshr_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg, i16 in
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80008
; GFX6-NEXT: s_lshl_b32 s3, s3, s4
; GFX6-NEXT: s_lshr_b32 s1, s1, s2
-; GFX6-NEXT: s_or_b32 s1, s3, s1
-; GFX6-NEXT: s_and_b32 s1, s1, 0xff
+; GFX6-NEXT: s_or_b32 s3, s3, s1
+; GFX6-NEXT: s_and_b32 s1, s3, 0xff
; GFX6-NEXT: s_and_b32 s0, s0, 0xff
; GFX6-NEXT: s_lshl_b32 s1, s1, 8
; GFX6-NEXT: s_or_b32 s0, s0, s1
@@ -946,28 +946,28 @@ define amdgpu_ps i32 @s_fshr_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg, i32 in
; GFX6-NEXT: s_bfe_u32 s7, s1, 0x80008
; GFX6-NEXT: s_lshr_b32 s2, s7, s2
; GFX6-NEXT: s_lshr_b32 s6, s1, 24
-; GFX6-NEXT: s_or_b32 s2, s3, s2
-; GFX6-NEXT: s_and_b32 s3, s8, 7
+; GFX6-NEXT: s_or_b32 s3, s3, s2
+; GFX6-NEXT: s_and_b32 s2, s8, 7
; GFX6-NEXT: s_andn2_b32 s7, 7, s8
; GFX6-NEXT: s_lshl_b32 s4, s4, 1
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80010
; GFX6-NEXT: s_lshl_b32 s4, s4, s7
-; GFX6-NEXT: s_lshr_b32 s1, s1, s3
-; GFX6-NEXT: s_or_b32 s1, s4, s1
-; GFX6-NEXT: s_and_b32 s3, s9, 7
-; GFX6-NEXT: s_andn2_b32 s4, 7, s9
+; GFX6-NEXT: s_lshr_b32 s1, s1, s2
+; GFX6-NEXT: s_or_b32 s4, s4, s1
+; GFX6-NEXT: s_and_b32 s1, s9, 7
+; GFX6-NEXT: s_andn2_b32 s2, 7, s9
; GFX6-NEXT: s_lshl_b32 s5, s5, 1
-; GFX6-NEXT: s_and_b32 s2, s2, 0xff
-; GFX6-NEXT: s_lshl_b32 s4, s5, s4
-; GFX6-NEXT: s_lshr_b32 s3, s6, s3
+; GFX6-NEXT: s_lshl_b32 s2, s5, s2
+; GFX6-NEXT: s_lshr_b32 s1, s6, s1
+; GFX6-NEXT: s_or_b32 s2, s2, s1
+; GFX6-NEXT: s_and_b32 s1, s3, 0xff
; GFX6-NEXT: s_and_b32 s0, s0, 0xff
-; GFX6-NEXT: s_lshl_b32 s2, s2, 8
-; GFX6-NEXT: s_and_b32 s1, s1, 0xff
-; GFX6-NEXT: s_or_b32 s3, s4, s3
-; GFX6-NEXT: s_or_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s4, 0xff
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_and_b32 s1, s3, 0xff
+; GFX6-NEXT: s_and_b32 s1, s2, 0xff
; GFX6-NEXT: s_lshl_b32 s1, s1, 24
; GFX6-NEXT: s_or_b32 s0, s0, s1
; GFX6-NEXT: ; return to shader part epilog
@@ -3443,8 +3443,8 @@ define amdgpu_ps i32 @s_fshr_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
; GFX8-NEXT: s_lshl_b32 s3, s3, 1
; GFX8-NEXT: s_lshl_b32 s2, s3, s2
; GFX8-NEXT: s_lshr_b32 s1, s4, s1
-; GFX8-NEXT: s_or_b32 s1, s2, s1
-; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
+; GFX8-NEXT: s_or_b32 s2, s2, s1
+; GFX8-NEXT: s_and_b32 s1, 0xffff, s2
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
; GFX8-NEXT: s_or_b32 s0, s0, s1
@@ -4026,15 +4026,15 @@ define amdgpu_ps i48 @s_fshr_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs, <
; GFX8-NEXT: s_lshl_b32 s6, s6, 1
; GFX8-NEXT: s_lshl_b32 s4, s6, s4
; GFX8-NEXT: s_lshr_b32 s2, s7, s2
-; GFX8-NEXT: s_or_b32 s2, s4, s2
-; GFX8-NEXT: s_and_b32 s4, s5, 15
+; GFX8-NEXT: s_or_b32 s4, s4, s2
+; GFX8-NEXT: s_and_b32 s2, s5, 15
; GFX8-NEXT: s_andn2_b32 s5, 15, s5
; GFX8-NEXT: s_lshl_b32 s1, s1, 1
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
; GFX8-NEXT: s_lshl_b32 s1, s1, s5
-; GFX8-NEXT: s_lshr_b32 s3, s3, s4
-; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT: s_or_b32 s1, s1, s3
+; GFX8-NEXT: s_lshr_b32 s2, s3, s2
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s2, 0xffff, s4
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
; GFX8-NEXT: s_or_b32 s0, s0, s2
@@ -4376,8 +4376,8 @@ define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
; GFX8-NEXT: s_lshl_b32 s6, s6, 1
; GFX8-NEXT: s_lshl_b32 s4, s6, s4
; GFX8-NEXT: s_lshr_b32 s2, s7, s2
-; GFX8-NEXT: s_or_b32 s2, s4, s2
-; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX8-NEXT: s_or_b32 s4, s4, s2
+; GFX8-NEXT: s_and_b32 s2, 0xffff, s4
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
; GFX8-NEXT: s_or_b32 s0, s0, s2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
index 20b7e5f093918..a75ac906cb1c0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
@@ -95,7 +95,7 @@ define amdgpu_kernel void @bfe_i32_arg_imm_arg(ptr addrspace(1) %out, i32 %src0,
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, s3, 59
+; GFX6-NEXT: s_or_b32 s3, 59, s3
; GFX6-NEXT: s_bfe_i32 s3, s2, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
@@ -114,8 +114,8 @@ define amdgpu_kernel void @bfe_i32_imm_arg_arg(ptr addrspace(1) %out, i32 %src1,
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_and_b32 s4, s2, 63
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s4, s4, s3
-; GFX6-NEXT: s_bfe_i32 s3, 0x7b, s4
+; GFX6-NEXT: s_or_b32 s3, s4, s3
+; GFX6-NEXT: s_bfe_i32 s3, 0x7b, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll
index d7c4c1dfb4180..91402df2100bc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll
@@ -78,8 +78,8 @@ define amdgpu_kernel void @bfe_u32_arg_arg_arg(ptr addrspace(1) %out, i32 %src0,
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_and_b32 s4, s3, 63
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s4, s4, s3
-; GFX6-NEXT: s_bfe_u32 s3, s2, s4
+; GFX6-NEXT: s_or_b32 s3, s4, s3
+; GFX6-NEXT: s_bfe_u32 s3, s2, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -114,7 +114,7 @@ define amdgpu_kernel void @bfe_u32_arg_imm_arg(ptr addrspace(1) %out, i32 %src0,
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s3, s3, 59
+; GFX6-NEXT: s_or_b32 s3, 59, s3
; GFX6-NEXT: s_bfe_u32 s3, s2, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
@@ -133,8 +133,8 @@ define amdgpu_kernel void @bfe_u32_imm_arg_arg(ptr addrspace(1) %out, i32 %src1,
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_and_b32 s4, s2, 63
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_or_b32 s4, s4, s3
-; GFX6-NEXT: s_bfe_u32 s3, 0x7b, s4
+; GFX6-NEXT: s_or_b32 s3, s4, s3
+; GFX6-NEXT: s_bfe_u32 s3, 0x7b, s3
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index 0625121f9ea7a..241a2f3457e4f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -73278,7 +73278,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_writelane_b32 v40, s53, 13
; SI-NEXT: v_writelane_b32 v40, s54, 14
; SI-NEXT: v_writelane_b32 v40, s55, 15
-; SI-NEXT: s_mov_b32 s92, s16
; SI-NEXT: v_writelane_b32 v40, s64, 16
; SI-NEXT: v_writelane_b32 v40, s65, 17
; SI-NEXT: v_writelane_b32 v40, s66, 18
@@ -73295,52 +73294,54 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_writelane_b32 v40, s85, 29
; SI-NEXT: v_writelane_b32 v40, s86, 30
; SI-NEXT: v_writelane_b32 v40, s87, 31
-; SI-NEXT: ; implicit-def: $vgpr41 : SGPR spill to VGPR lane
; SI-NEXT: v_writelane_b32 v40, s96, 32
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v41, s23, 0
+; SI-NEXT: ; implicit-def: $vgpr41 : SGPR spill to VGPR lane
; SI-NEXT: v_writelane_b32 v40, s97, 33
-; SI-NEXT: v_writelane_b32 v41, s21, 1
-; SI-NEXT: v_readfirstlane_b32 s47, v29
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_writelane_b32 v41, s28, 0
+; SI-NEXT: v_writelane_b32 v41, s26, 1
+; SI-NEXT: v_writelane_b32 v41, s23, 2
+; SI-NEXT: v_writelane_b32 v41, s22, 3
; SI-NEXT: v_writelane_b32 v40, s98, 34
-; SI-NEXT: v_writelane_b32 v41, s47, 2
+; SI-NEXT: v_writelane_b32 v41, s21, 4
; SI-NEXT: v_writelane_b32 v40, s99, 35
+; SI-NEXT: s_mov_b32 s88, s29
+; SI-NEXT: s_mov_b32 s30, s25
+; SI-NEXT: s_mov_b32 s29, s24
+; SI-NEXT: v_writelane_b32 v41, s20, 5
; SI-NEXT: v_readfirstlane_b32 s82, v30
-; SI-NEXT: v_readfirstlane_b32 s83, v28
-; SI-NEXT: v_readfirstlane_b32 s44, v27
-; SI-NEXT: v_readfirstlane_b32 s96, v26
-; SI-NEXT: v_readfirstlane_b32 s70, v25
-; SI-NEXT: v_readfirstlane_b32 s68, v24
-; SI-NEXT: v_readfirstlane_b32 s84, v23
+; SI-NEXT: v_readfirstlane_b32 s57, v29
+; SI-NEXT: v_readfirstlane_b32 s34, v28
+; SI-NEXT: v_readfirstlane_b32 s83, v27
+; SI-NEXT: v_readfirstlane_b32 s46, v26
+; SI-NEXT: v_readfirstlane_b32 s68, v25
+; SI-NEXT: v_readfirstlane_b32 s52, v24
+; SI-NEXT: v_readfirstlane_b32 s81, v23
; SI-NEXT: v_readfirstlane_b32 s65, v22
; SI-NEXT: v_readfirstlane_b32 s86, v21
-; SI-NEXT: v_readfirstlane_b32 s66, v20
+; SI-NEXT: v_readfirstlane_b32 s84, v20
; SI-NEXT: v_readfirstlane_b32 s87, v19
; SI-NEXT: v_readfirstlane_b32 s80, v18
; SI-NEXT: v_readfirstlane_b32 s36, v17
-; SI-NEXT: v_readfirstlane_b32 s31, v16
+; SI-NEXT: v_readfirstlane_b32 s97, v16
; SI-NEXT: v_readfirstlane_b32 s64, v15
; SI-NEXT: v_readfirstlane_b32 s38, v14
; SI-NEXT: v_readfirstlane_b32 s67, v13
-; SI-NEXT: v_readfirstlane_b32 s34, v12
-; SI-NEXT: v_readfirstlane_b32 s71, v11
-; SI-NEXT: v_readfirstlane_b32 s81, v10
+; SI-NEXT: v_readfirstlane_b32 s48, v12
+; SI-NEXT: v_readfirstlane_b32 s70, v11
+; SI-NEXT: v_readfirstlane_b32 s71, v10
; SI-NEXT: v_readfirstlane_b32 s37, v9
-; SI-NEXT: v_readfirstlane_b32 s35, v8
-; SI-NEXT: v_readfirstlane_b32 s49, v7
+; SI-NEXT: v_readfirstlane_b32 s28, v8
+; SI-NEXT: v_readfirstlane_b32 s93, v7
; SI-NEXT: v_readfirstlane_b32 s94, v6
-; SI-NEXT: v_readfirstlane_b32 s51, v5
-; SI-NEXT: v_readfirstlane_b32 s88, v4
-; SI-NEXT: v_readfirstlane_b32 s53, v3
-; SI-NEXT: v_readfirstlane_b32 s54, v2
-; SI-NEXT: v_readfirstlane_b32 s89, v1
-; SI-NEXT: v_readfirstlane_b32 s90, v0
+; SI-NEXT: v_readfirstlane_b32 s49, v5
+; SI-NEXT: v_readfirstlane_b32 s95, v4
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_readfirstlane_b32 s91, v31
; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_readfirstlane_b32 s16, v32
+; SI-NEXT: v_readfirstlane_b32 s89, v32
; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_readfirstlane_b32 s93, v33
+; SI-NEXT: v_readfirstlane_b32 s79, v33
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
@@ -73350,245 +73351,248 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_readfirstlane_b32 s52, v34
+; SI-NEXT: v_readfirstlane_b32 s39, v34
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_readfirstlane_b32 s55, v35
+; SI-NEXT: v_readfirstlane_b32 s66, v35
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_readfirstlane_b32 s79, v37
+; SI-NEXT: v_readfirstlane_b32 s96, v37
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:76
-; SI-NEXT: v_readfirstlane_b32 s50, v36
+; SI-NEXT: v_readfirstlane_b32 s55, v36
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_readfirstlane_b32 s21, v38
+; SI-NEXT: v_readfirstlane_b32 s47, v38
+; SI-NEXT: v_readfirstlane_b32 s53, v3
+; SI-NEXT: v_readfirstlane_b32 s92, v2
+; SI-NEXT: v_readfirstlane_b32 s20, v1
+; SI-NEXT: v_readfirstlane_b32 s54, v0
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_readfirstlane_b32 s56, v31
+; SI-NEXT: v_readfirstlane_b32 s59, v31
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_readfirstlane_b32 s85, v32
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_readfirstlane_b32 s58, v33
+; SI-NEXT: v_readfirstlane_b32 s21, v33
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_readfirstlane_b32 s98, v39
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_readfirstlane_b32 s46, v48
+; SI-NEXT: v_readfirstlane_b32 s26, v48
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_readfirstlane_b32 s99, v49
; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_readfirstlane_b32 s97, v50
+; SI-NEXT: v_readfirstlane_b32 s24, v50
; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_readfirstlane_b32 s9, v51
-; SI-NEXT: v_writelane_b32 v41, s58, 3
-; SI-NEXT: v_writelane_b32 v41, s9, 4
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v37
+; SI-NEXT: v_readfirstlane_b32 s23, v51
+; SI-NEXT: v_writelane_b32 v41, s21, 6
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_readfirstlane_b32 s78, v34
+; SI-NEXT: v_readfirstlane_b32 s22, v34
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v37
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_readfirstlane_b32 s69, v35
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s30, v36
+; SI-NEXT: v_readfirstlane_b32 s25, v36
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: s_cbranch_scc0 .LBB99_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_and_b32 s4, s92, 0xff
+; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
; SI-NEXT: s_or_b32 s12, s4, s5
; SI-NEXT: s_and_b32 s4, s18, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
; SI-NEXT: s_lshl_b32 s5, s19, 24
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s5, s24, 0xff
-; SI-NEXT: s_lshl_b32 s6, s25, 8
+; SI-NEXT: s_and_b32 s5, s29, 0xff
+; SI-NEXT: s_lshl_b32 s6, s30, 8
; SI-NEXT: s_or_b32 s13, s5, s6
-; SI-NEXT: s_and_b32 s5, s26, 0xff
+; SI-NEXT: v_readlane_b32 s5, v41, 1
+; SI-NEXT: s_and_b32 s5, s5, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s6, s27, 24
; SI-NEXT: s_or_b32 s6, s6, s5
-; SI-NEXT: s_and_b32 s5, s54, 0xff
+; SI-NEXT: s_and_b32 s5, s92, 0xff
; SI-NEXT: s_lshl_b32 s7, s53, 8
; SI-NEXT: s_or_b32 s14, s5, s7
-; SI-NEXT: s_and_b32 s5, s88, 0xff
+; SI-NEXT: s_and_b32 s5, s95, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s7, s51, 24
+; SI-NEXT: s_lshl_b32 s7, s49, 24
; SI-NEXT: s_or_b32 s8, s7, s5
-; SI-NEXT: s_and_b32 s5, s81, 0xff
-; SI-NEXT: s_lshl_b32 s7, s71, 8
+; SI-NEXT: s_and_b32 s5, s71, 0xff
+; SI-NEXT: s_lshl_b32 s7, s70, 8
; SI-NEXT: s_or_b32 s15, s5, s7
-; SI-NEXT: s_and_b32 s5, s34, 0xff
+; SI-NEXT: s_and_b32 s5, s48, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s7, s67, 24
; SI-NEXT: s_or_b32 s10, s7, s5
; SI-NEXT: s_and_b32 s5, s80, 0xff
; SI-NEXT: s_lshl_b32 s7, s87, 8
; SI-NEXT: s_or_b32 s40, s5, s7
-; SI-NEXT: s_and_b32 s5, s66, 0xff
+; SI-NEXT: s_and_b32 s5, s84, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s7, s86, 24
; SI-NEXT: s_or_b32 s60, s7, s5
-; SI-NEXT: s_and_b32 s5, s96, 0xff
-; SI-NEXT: s_lshl_b32 s7, s44, 8
+; SI-NEXT: s_and_b32 s5, s46, 0xff
+; SI-NEXT: s_lshl_b32 s7, s83, 8
; SI-NEXT: s_or_b32 s41, s5, s7
-; SI-NEXT: s_and_b32 s5, s9, 0xff
-; SI-NEXT: s_lshl_b32 s7, s97, 8
+; SI-NEXT: s_and_b32 s5, s23, 0xff
+; SI-NEXT: s_lshl_b32 s7, s24, 8
; SI-NEXT: s_or_b32 s42, s5, s7
-; SI-NEXT: s_and_b32 s5, s21, 0xff
-; SI-NEXT: s_lshl_b32 s7, s79, 8
+; SI-NEXT: s_and_b32 s5, s47, 0xff
+; SI-NEXT: s_lshl_b32 s7, s96, 8
; SI-NEXT: s_or_b32 s43, s5, s7
-; SI-NEXT: v_readlane_b32 s7, v41, 1
-; SI-NEXT: s_and_b32 s5, s20, 0xff
+; SI-NEXT: v_readlane_b32 s5, v41, 5
+; SI-NEXT: v_readlane_b32 s7, v41, 4
+; SI-NEXT: s_and_b32 s5, s5, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_or_b32 s5, s5, s7
-; SI-NEXT: s_and_b32 s7, s22, 0xff
-; SI-NEXT: v_readlane_b32 s9, v41, 0
+; SI-NEXT: v_readlane_b32 s7, v41, 3
+; SI-NEXT: s_and_b32 s7, s7, 0xff
+; SI-NEXT: v_readlane_b32 s9, v41, 2
; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_lshl_b32 s9, s9, 24
-; SI-NEXT: s_or_b32 s57, s9, s7
-; SI-NEXT: s_and_b32 s7, s28, 0xff
-; SI-NEXT: s_lshl_b32 s9, s29, 8
+; SI-NEXT: v_writelane_b32 v41, s46, 9
+; SI-NEXT: s_mov_b32 s56, s30
+; SI-NEXT: s_or_b32 s30, s9, s7
+; SI-NEXT: v_readlane_b32 s7, v41, 0
+; SI-NEXT: s_and_b32 s7, s7, 0xff
+; SI-NEXT: s_lshl_b32 s9, s88, 8
; SI-NEXT: s_or_b32 s7, s7, s9
-; SI-NEXT: s_and_b32 s9, s90, 0xff
+; SI-NEXT: s_and_b32 s9, s54, 0xff
; SI-NEXT: s_lshl_b32 s9, s9, 16
-; SI-NEXT: s_lshl_b32 s11, s89, 24
-; SI-NEXT: s_or_b32 s77, s11, s9
+; SI-NEXT: s_lshl_b32 s11, s20, 24
+; SI-NEXT: s_or_b32 s90, s11, s9
; SI-NEXT: s_and_b32 s9, s94, 0xff
-; SI-NEXT: s_lshl_b32 s11, s49, 8
+; SI-NEXT: s_lshl_b32 s11, s93, 8
; SI-NEXT: s_or_b32 s9, s9, s11
-; SI-NEXT: s_and_b32 s11, s35, 0xff
+; SI-NEXT: s_and_b32 s11, s28, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: v_writelane_b32 v41, s44, 11
; SI-NEXT: s_lshl_b32 s44, s37, 24
; SI-NEXT: s_or_b32 vcc_lo, s44, s11
; SI-NEXT: s_and_b32 s11, s38, 0xff
; SI-NEXT: s_lshl_b32 s44, s64, 8
; SI-NEXT: s_or_b32 s11, s11, s44
-; SI-NEXT: s_and_b32 s44, s31, 0xff
+; SI-NEXT: s_and_b32 s44, s97, 0xff
; SI-NEXT: s_lshl_b32 s44, s44, 16
; SI-NEXT: s_lshl_b32 s45, s36, 24
+; SI-NEXT: v_writelane_b32 v41, s23, 10
; SI-NEXT: s_or_b32 vcc_hi, s45, s44
; SI-NEXT: s_and_b32 s44, s65, 0xff
-; SI-NEXT: s_lshl_b32 s45, s84, 8
+; SI-NEXT: s_lshl_b32 s45, s81, 8
; SI-NEXT: s_or_b32 s44, s44, s45
-; SI-NEXT: s_and_b32 s45, s68, 0xff
+; SI-NEXT: s_and_b32 s45, s52, 0xff
+; SI-NEXT: v_writelane_b32 v41, s92, 11
; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_mov_b32 s23, s21
-; SI-NEXT: s_mov_b32 s21, s46
-; SI-NEXT: s_lshl_b32 s46, s70, 24
+; SI-NEXT: s_lshl_b32 s46, s68, 24
+; SI-NEXT: v_writelane_b32 v41, s93, 12
; SI-NEXT: s_and_b32 s44, s44, 0xffff
-; SI-NEXT: v_writelane_b32 v41, s97, 12
-; SI-NEXT: s_mov_b32 s97, s86
-; SI-NEXT: s_mov_b32 s86, s84
-; SI-NEXT: s_mov_b32 s84, s70
-; SI-NEXT: s_mov_b32 s70, s34
-; SI-NEXT: s_mov_b32 s34, s88
-; SI-NEXT: s_mov_b32 s88, s24
-; SI-NEXT: s_or_b32 s24, s46, s45
-; SI-NEXT: s_or_b32 s61, s44, s24
+; SI-NEXT: v_writelane_b32 v41, s20, 13
+; SI-NEXT: s_or_b32 s20, s46, s45
+; SI-NEXT: s_or_b32 s61, s44, s20
; SI-NEXT: s_and_b32 s44, s82, 0xff
-; SI-NEXT: s_lshl_b32 s45, s30, 8
+; SI-NEXT: s_lshl_b32 s45, s25, 8
; SI-NEXT: s_or_b32 s44, s44, s45
; SI-NEXT: s_and_b32 s45, s69, 0xff
; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_lshl_b32 s46, s78, 24
-; SI-NEXT: s_mov_b32 s95, s90
-; SI-NEXT: s_mov_b32 s90, s18
+; SI-NEXT: s_lshl_b32 s46, s22, 24
+; SI-NEXT: v_writelane_b32 v41, s96, 14
+; SI-NEXT: s_mov_b32 s96, s84
+; SI-NEXT: s_mov_b32 s84, s67
+; SI-NEXT: s_mov_b32 s67, s49
+; SI-NEXT: s_mov_b32 s49, s18
; SI-NEXT: s_or_b32 s18, s46, s45
-; SI-NEXT: s_and_b32 s45, s83, 0xff
+; SI-NEXT: s_and_b32 s45, s34, 0xff
; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_lshl_b32 s46, s47, 24
+; SI-NEXT: s_lshl_b32 s46, s57, 24
; SI-NEXT: s_and_b32 s44, s44, 0xffff
; SI-NEXT: s_or_b32 s62, s46, s45
; SI-NEXT: s_or_b32 s63, s44, s18
; SI-NEXT: s_and_b32 s44, s98, 0xff
-; SI-NEXT: s_lshl_b32 s45, s58, 8
+; SI-NEXT: s_lshl_b32 s45, s21, 8
; SI-NEXT: s_or_b32 s44, s44, s45
; SI-NEXT: s_and_b32 s45, s85, 0xff
; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_lshl_b32 s46, s56, 24
-; SI-NEXT: s_mov_b32 s76, s56
-; SI-NEXT: s_mov_b32 s56, s85
-; SI-NEXT: s_mov_b32 s85, s79
-; SI-NEXT: s_mov_b32 s79, s19
+; SI-NEXT: s_lshl_b32 s46, s59, 24
+; SI-NEXT: s_mov_b32 s23, s88
+; SI-NEXT: s_mov_b32 s88, s19
; SI-NEXT: s_or_b32 s19, s46, s45
; SI-NEXT: s_and_b32 s45, s99, 0xff
; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_lshl_b32 s46, s21, 24
+; SI-NEXT: s_lshl_b32 s46, s26, 24
; SI-NEXT: s_and_b32 s44, s44, 0xffff
; SI-NEXT: s_or_b32 s72, s46, s45
; SI-NEXT: s_or_b32 s73, s44, s19
-; SI-NEXT: s_and_b32 s44, s52, 0xff
-; SI-NEXT: s_lshl_b32 s45, s93, 8
+; SI-NEXT: s_and_b32 s44, s39, 0xff
+; SI-NEXT: s_lshl_b32 s45, s79, 8
; SI-NEXT: s_or_b32 s44, s44, s45
-; SI-NEXT: s_and_b32 s45, s16, 0xff
-; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_lshl_b32 s46, s91, 24
+; SI-NEXT: s_and_b32 s45, s89, 0xff
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_mov_b32 s47, s96
-; SI-NEXT: s_mov_b32 s96, s78
-; SI-NEXT: s_mov_b32 s78, s69
-; SI-NEXT: s_mov_b32 s69, s68
-; SI-NEXT: s_mov_b32 s68, s38
-; SI-NEXT: s_mov_b32 s38, s35
-; SI-NEXT: s_mov_b32 s35, s89
-; SI-NEXT: s_or_b32 s89, s46, s45
-; SI-NEXT: s_and_b32 s45, s50, 0xff
-; SI-NEXT: s_or_b32 s5, s5, s57
; SI-NEXT: s_lshl_b32 s45, s45, 16
-; SI-NEXT: s_lshl_b32 s46, s55, 24
+; SI-NEXT: s_lshl_b32 s46, s91, 24
+; SI-NEXT: s_or_b32 s5, s5, s30
+; SI-NEXT: s_mov_b32 s77, s47
+; SI-NEXT: s_mov_b32 s47, s24
+; SI-NEXT: s_mov_b32 s24, s83
+; SI-NEXT: s_mov_b32 s83, s70
+; SI-NEXT: s_mov_b32 s70, s38
+; SI-NEXT: s_mov_b32 s38, s91
+; SI-NEXT: s_mov_b32 s91, s23
+; SI-NEXT: s_mov_b32 s23, s17
+; SI-NEXT: s_or_b32 s17, s46, s45
+; SI-NEXT: s_and_b32 s45, s55, 0xff
; SI-NEXT: s_and_b32 s12, s12, 0xffff
+; SI-NEXT: s_and_b32 s7, s7, 0xffff
; SI-NEXT: s_and_b32 s9, s9, 0xffff
-; SI-NEXT: s_or_b32 s74, s46, s45
-; SI-NEXT: s_mov_b32 s45, s83
-; SI-NEXT: s_mov_b32 s83, s91
-; SI-NEXT: s_mov_b32 s91, s28
+; SI-NEXT: s_and_b32 s11, s11, 0xffff
+; SI-NEXT: s_mov_b32 s76, s59
+; SI-NEXT: s_mov_b32 s59, s57
+; SI-NEXT: s_mov_b32 s57, s34
+; SI-NEXT: s_mov_b32 s34, s82
+; SI-NEXT: s_mov_b32 s82, s52
+; SI-NEXT: s_mov_b32 s52, s95
+; SI-NEXT: s_mov_b32 s95, s27
+; SI-NEXT: s_mov_b32 s21, s22
+; SI-NEXT: s_mov_b32 s22, s69
+; SI-NEXT: s_mov_b32 s69, s48
+; SI-NEXT: s_mov_b32 s48, s37
+; SI-NEXT: s_mov_b32 s37, s56
+; SI-NEXT: s_lshl_b32 s45, s45, 16
+; SI-NEXT: s_lshl_b32 s46, s66, 24
+; SI-NEXT: s_and_b32 s44, s44, 0xffff
+; SI-NEXT: s_mov_b32 s56, s98
+; SI-NEXT: s_mov_b32 s98, s28
; SI-NEXT: s_and_b32 s28, s42, 0xffff
-; SI-NEXT: s_mov_b32 s59, s94
-; SI-NEXT: s_mov_b32 s94, s27
; SI-NEXT: s_and_b32 s27, s43, 0xffff
; SI-NEXT: s_or_b32 s42, s12, s4
; SI-NEXT: s_mov_b32 s43, s5
; SI-NEXT: s_lshr_b64 s[4:5], s[4:5], 16
+; SI-NEXT: s_or_b32 s7, s7, s90
; SI-NEXT: s_or_b32 s9, s9, vcc_lo
-; SI-NEXT: v_writelane_b32 v41, s4, 5
-; SI-NEXT: s_and_b32 s11, s11, 0xffff
-; SI-NEXT: v_writelane_b32 v41, s5, 6
-; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 16
; SI-NEXT: s_or_b32 s11, s11, vcc_hi
-; SI-NEXT: v_writelane_b32 v41, s4, 7
-; SI-NEXT: s_and_b32 s7, s7, 0xffff
-; SI-NEXT: s_and_b32 s44, s44, 0xffff
-; SI-NEXT: v_writelane_b32 v41, s5, 8
-; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 16
-; SI-NEXT: s_or_b32 s7, s7, s77
-; SI-NEXT: s_or_b32 s75, s44, s89
+; SI-NEXT: s_or_b32 s74, s46, s45
+; SI-NEXT: s_or_b32 s75, s44, s17
; SI-NEXT: s_and_b32 s13, s13, 0xffff
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: s_and_b32 s58, s15, 0xffff
-; SI-NEXT: s_mov_b32 s44, s82
-; SI-NEXT: s_mov_b32 s82, s81
-; SI-NEXT: s_mov_b32 s81, s55
-; SI-NEXT: s_mov_b32 s55, s54
-; SI-NEXT: s_mov_b32 s54, s51
-; SI-NEXT: s_mov_b32 s51, s37
-; SI-NEXT: s_mov_b32 s37, s16
+; SI-NEXT: s_mov_b32 s92, s16
; SI-NEXT: s_and_b32 s16, s40, 0xffff
-; SI-NEXT: s_mov_b32 s46, s98
-; SI-NEXT: s_mov_b32 s98, s93
-; SI-NEXT: s_and_b32 s93, s41, 0xffff
-; SI-NEXT: v_writelane_b32 v41, s4, 9
-; SI-NEXT: s_mov_b32 s39, s49
+; SI-NEXT: s_mov_b32 s31, s29
+; SI-NEXT: s_and_b32 s29, s41, 0xffff
+; SI-NEXT: v_writelane_b32 v41, s4, 7
+; SI-NEXT: s_mov_b32 s93, s39
+; SI-NEXT: s_mov_b32 s39, s79
+; SI-NEXT: v_writelane_b32 v41, s5, 8
; SI-NEXT: s_or_b32 s40, s13, s6
; SI-NEXT: s_mov_b32 s41, s7
-; SI-NEXT: s_lshr_b64 s[48:49], s[6:7], 16
+; SI-NEXT: s_lshr_b64 s[78:79], s[6:7], 16
; SI-NEXT: s_or_b32 s14, s14, s8
; SI-NEXT: s_mov_b32 s15, s9
+; SI-NEXT: s_lshr_b64 s[50:51], s[8:9], 16
; SI-NEXT: s_or_b32 s12, s58, s10
; SI-NEXT: s_mov_b32 s13, s11
-; SI-NEXT: v_writelane_b32 v41, s5, 10
+; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16
; SI-NEXT: s_or_b32 s10, s16, s60
; SI-NEXT: s_mov_b32 s11, s61
; SI-NEXT: s_lshr_b64 s[60:61], s[60:61], 16
-; SI-NEXT: s_or_b32 s8, s93, s62
+; SI-NEXT: s_or_b32 s8, s29, s62
; SI-NEXT: s_mov_b32 s9, s63
; SI-NEXT: s_lshr_b64 s[62:63], s[62:63], 16
; SI-NEXT: s_or_b32 s6, s28, s72
@@ -73597,68 +73601,68 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_or_b32 s4, s27, s74
; SI-NEXT: s_mov_b32 s5, s75
; SI-NEXT: s_lshr_b64 s[74:75], s[74:75], 16
-; SI-NEXT: s_mov_b32 s16, s37
-; SI-NEXT: s_mov_b32 s37, s51
-; SI-NEXT: s_mov_b32 s51, s54
-; SI-NEXT: s_mov_b32 s54, s55
-; SI-NEXT: s_mov_b32 s55, s81
-; SI-NEXT: s_mov_b32 s81, s82
-; SI-NEXT: s_mov_b32 s82, s44
-; SI-NEXT: v_readlane_b32 s44, v41, 11
-; SI-NEXT: s_mov_b32 s93, s98
-; SI-NEXT: s_mov_b32 s98, s46
-; SI-NEXT: s_mov_b32 s46, s21
-; SI-NEXT: s_mov_b32 s21, s23
-; SI-NEXT: s_mov_b32 s28, s91
-; SI-NEXT: s_mov_b32 s91, s83
-; SI-NEXT: s_mov_b32 s83, s45
-; SI-NEXT: s_mov_b32 s27, s94
-; SI-NEXT: s_mov_b32 s94, s59
-; SI-NEXT: s_lshr_b32 s23, s57, 16
-; SI-NEXT: s_lshr_b32 s57, s77, 16
-; SI-NEXT: s_lshr_b32 s59, vcc_lo, 16
+; SI-NEXT: s_mov_b32 s16, s92
+; SI-NEXT: s_mov_b32 s79, s39
+; SI-NEXT: s_mov_b32 s39, s93
+; SI-NEXT: s_mov_b32 s29, s31
+; SI-NEXT: s_mov_b32 s28, s98
+; SI-NEXT: s_mov_b32 s98, s56
+; SI-NEXT: s_lshr_b32 s35, s30, 16
+; SI-NEXT: v_readlane_b32 s46, v41, 9
+; SI-NEXT: s_lshr_b32 s56, s90, 16
+; SI-NEXT: s_lshr_b32 s58, vcc_lo, 16
; SI-NEXT: s_lshr_b32 s61, vcc_hi, 16
-; SI-NEXT: s_lshr_b32 s63, s24, 16
-; SI-NEXT: s_mov_b32 s24, s88
-; SI-NEXT: s_mov_b32 s88, s34
-; SI-NEXT: s_mov_b32 s34, s70
-; SI-NEXT: s_mov_b32 s70, s84
-; SI-NEXT: s_mov_b32 s84, s86
-; SI-NEXT: s_mov_b32 s86, s97
-; SI-NEXT: v_readlane_b32 s97, v41, 12
+; SI-NEXT: s_lshr_b32 s63, s20, 16
+; SI-NEXT: v_readlane_b32 s20, v41, 13
+; SI-NEXT: v_readlane_b32 s93, v41, 12
+; SI-NEXT: v_readlane_b32 s92, v41, 11
; SI-NEXT: s_lshr_b32 s73, s18, 16
-; SI-NEXT: s_mov_b32 s18, s90
-; SI-NEXT: s_mov_b32 s90, s95
-; SI-NEXT: s_mov_b32 s49, s39
+; SI-NEXT: s_mov_b32 s18, s49
+; SI-NEXT: s_mov_b32 s49, s67
+; SI-NEXT: s_mov_b32 s67, s84
+; SI-NEXT: s_mov_b32 s84, s96
+; SI-NEXT: v_readlane_b32 s96, v41, 14
; SI-NEXT: s_lshr_b32 s75, s19, 16
-; SI-NEXT: s_mov_b32 s19, s79
-; SI-NEXT: s_mov_b32 s79, s85
-; SI-NEXT: s_mov_b32 s85, s56
-; SI-NEXT: s_mov_b32 s56, s76
-; SI-NEXT: s_lshr_b32 s45, s89, 16
-; SI-NEXT: s_mov_b32 s89, s35
-; SI-NEXT: s_mov_b32 s35, s38
-; SI-NEXT: s_mov_b32 s38, s68
-; SI-NEXT: s_mov_b32 s68, s69
-; SI-NEXT: s_mov_b32 s69, s78
-; SI-NEXT: s_mov_b32 s78, s96
-; SI-NEXT: s_mov_b32 s96, s47
+; SI-NEXT: s_mov_b32 s19, s88
+; SI-NEXT: s_lshr_b32 s45, s17, 16
+; SI-NEXT: s_mov_b32 s17, s23
+; SI-NEXT: v_readlane_b32 s23, v41, 10
+; SI-NEXT: s_mov_b32 s30, s37
+; SI-NEXT: s_mov_b32 s37, s48
+; SI-NEXT: s_mov_b32 s48, s69
+; SI-NEXT: s_mov_b32 s69, s22
+; SI-NEXT: s_mov_b32 s22, s21
+; SI-NEXT: s_mov_b32 s27, s95
+; SI-NEXT: s_mov_b32 s95, s52
+; SI-NEXT: s_mov_b32 s52, s82
+; SI-NEXT: s_mov_b32 s82, s34
+; SI-NEXT: s_mov_b32 s88, s91
+; SI-NEXT: s_mov_b32 s91, s38
+; SI-NEXT: s_mov_b32 s38, s70
+; SI-NEXT: s_mov_b32 s70, s83
+; SI-NEXT: s_mov_b32 s83, s24
+; SI-NEXT: s_mov_b32 s24, s47
+; SI-NEXT: s_mov_b32 s47, s77
+; SI-NEXT: s_mov_b32 s34, s57
+; SI-NEXT: s_mov_b32 s57, s59
+; SI-NEXT: s_mov_b32 s59, s76
; SI-NEXT: s_mov_b64 s[76:77], 0
; SI-NEXT: s_branch .LBB99_3
; SI-NEXT: .LBB99_2:
; SI-NEXT: ; implicit-def: $sgpr4
; SI-NEXT: s_mov_b64 s[76:77], -1
-; SI-NEXT: v_writelane_b32 v41, s4, 5
-; SI-NEXT: v_writelane_b32 v41, s5, 6
-; SI-NEXT: ; implicit-def: $sgpr4
+; SI-NEXT: v_writelane_b32 v41, s4, 7
; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: ; implicit-def: $sgpr23
+; SI-NEXT: v_writelane_b32 v41, s5, 8
+; SI-NEXT: ; implicit-def: $sgpr35
; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: ; implicit-def: $sgpr48
-; SI-NEXT: ; implicit-def: $sgpr57
+; SI-NEXT: ; implicit-def: $sgpr78
+; SI-NEXT: ; implicit-def: $sgpr56
; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: ; implicit-def: $sgpr59
+; SI-NEXT: ; implicit-def: $sgpr50
+; SI-NEXT: ; implicit-def: $sgpr58
; SI-NEXT: ; implicit-def: $sgpr12
+; SI-NEXT: ; implicit-def: $sgpr44
; SI-NEXT: ; implicit-def: $sgpr61
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr60
@@ -73669,39 +73673,31 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr72
; SI-NEXT: ; implicit-def: $sgpr75
+; SI-NEXT: ; implicit-def: $sgpr4
; SI-NEXT: ; implicit-def: $sgpr74
; SI-NEXT: ; implicit-def: $sgpr45
-; SI-NEXT: v_writelane_b32 v41, s4, 7
-; SI-NEXT: v_writelane_b32 v41, s5, 8
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: v_writelane_b32 v41, s4, 9
-; SI-NEXT: v_writelane_b32 v41, s5, 10
-; SI-NEXT: ; implicit-def: $sgpr4
; SI-NEXT: .LBB99_3: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[76:77]
-; SI-NEXT: v_readlane_b32 s76, v41, 5
-; SI-NEXT: v_readlane_b32 s77, v41, 6
-; SI-NEXT: s_mov_b32 s58, s76
; SI-NEXT: v_readlane_b32 s76, v41, 7
; SI-NEXT: v_readlane_b32 s77, v41, 8
; SI-NEXT: s_cbranch_vccnz .LBB99_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_add_i32 s21, s21, 3
+; SI-NEXT: s_add_i32 s21, s47, 3
; SI-NEXT: s_and_b32 s4, s21, 0xff
-; SI-NEXT: s_lshl_b32 s5, s79, 8
-; SI-NEXT: s_add_i32 s50, s50, 3
+; SI-NEXT: s_lshl_b32 s5, s96, 8
+; SI-NEXT: s_add_i32 s50, s55, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s6, s50, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s55, 24
+; SI-NEXT: s_lshl_b32 s5, s66, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_add_i32 s39, s52, 3
+; SI-NEXT: s_add_i32 s39, s39, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s5, s39, 0xff
-; SI-NEXT: s_lshl_b32 s6, s93, 8
-; SI-NEXT: s_add_i32 s79, s16, 3
+; SI-NEXT: s_lshl_b32 s6, s79, 8
+; SI-NEXT: s_add_i32 s79, s89, 3
; SI-NEXT: s_or_b32 s5, s6, s5
; SI-NEXT: s_and_b32 s7, s79, 0xff
; SI-NEXT: s_addk_i32 s5, 0x300
@@ -73709,21 +73705,20 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_and_b32 s5, s5, 0xffff
; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_add_i32 s23, s23, 3
; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: v_readlane_b32 s6, v41, 4
-; SI-NEXT: s_add_i32 s23, s6, 3
; SI-NEXT: s_and_b32 s6, s23, 0xff
-; SI-NEXT: s_lshl_b32 s7, s97, 8
+; SI-NEXT: s_lshl_b32 s7, s24, 8
; SI-NEXT: s_add_i32 s99, s99, 3
; SI-NEXT: s_or_b32 s6, s7, s6
; SI-NEXT: s_and_b32 s8, s99, 0xff
; SI-NEXT: s_addk_i32 s6, 0x300
-; SI-NEXT: s_lshl_b32 s7, s46, 24
+; SI-NEXT: s_lshl_b32 s7, s26, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s6, s6, 0xffff
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_add_i32 s98, s98, 3
-; SI-NEXT: v_readlane_b32 s8, v41, 3
+; SI-NEXT: v_readlane_b32 s8, v41, 6
; SI-NEXT: s_or_b32 s6, s7, s6
; SI-NEXT: s_and_b32 s7, s98, 0xff
; SI-NEXT: s_lshl_b32 s8, s8, 8
@@ -73731,32 +73726,31 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_and_b32 s9, s85, 0xff
; SI-NEXT: s_addk_i32 s7, 0x300
-; SI-NEXT: s_lshl_b32 s8, s56, 24
+; SI-NEXT: s_lshl_b32 s8, s59, 24
; SI-NEXT: s_lshl_b32 s9, s9, 16
; SI-NEXT: s_and_b32 s7, s7, 0xffff
; SI-NEXT: s_or_b32 s8, s8, s9
-; SI-NEXT: s_add_i32 s96, s96, 3
+; SI-NEXT: s_add_i32 s96, s46, 3
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_and_b32 s8, s96, 0xff
-; SI-NEXT: s_lshl_b32 s9, s44, 8
-; SI-NEXT: s_add_i32 s83, s83, 3
+; SI-NEXT: s_lshl_b32 s9, s83, 8
+; SI-NEXT: s_add_i32 s83, s34, 3
; SI-NEXT: s_or_b32 s8, s9, s8
-; SI-NEXT: v_readlane_b32 s9, v41, 2
; SI-NEXT: s_and_b32 s10, s83, 0xff
; SI-NEXT: s_addk_i32 s8, 0x300
-; SI-NEXT: s_lshl_b32 s9, s9, 24
+; SI-NEXT: s_lshl_b32 s9, s57, 24
; SI-NEXT: s_lshl_b32 s10, s10, 16
; SI-NEXT: s_and_b32 s8, s8, 0xffff
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_add_i32 s82, s82, 3
; SI-NEXT: s_or_b32 s8, s9, s8
; SI-NEXT: s_and_b32 s9, s82, 0xff
-; SI-NEXT: s_lshl_b32 s10, s30, 8
+; SI-NEXT: s_lshl_b32 s10, s25, 8
; SI-NEXT: s_add_i32 s69, s69, 3
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s11, s69, 0xff
; SI-NEXT: s_addk_i32 s9, 0x300
-; SI-NEXT: s_lshl_b32 s10, s78, 24
+; SI-NEXT: s_lshl_b32 s10, s22, 24
; SI-NEXT: s_lshl_b32 s11, s11, 16
; SI-NEXT: s_and_b32 s9, s9, 0xffff
; SI-NEXT: s_or_b32 s10, s10, s11
@@ -73764,7 +73758,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s80, 0xff
; SI-NEXT: s_lshl_b32 s11, s87, 8
-; SI-NEXT: s_add_i32 s66, s66, 3
+; SI-NEXT: s_add_i32 s66, s84, 3
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: s_and_b32 s12, s66, 0xff
; SI-NEXT: s_addk_i32 s10, 0x300
@@ -73775,20 +73769,20 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_add_i32 s65, s65, 3
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: s_and_b32 s11, s65, 0xff
-; SI-NEXT: s_lshl_b32 s12, s84, 8
-; SI-NEXT: s_add_i32 s52, s68, 3
+; SI-NEXT: s_lshl_b32 s12, s81, 8
+; SI-NEXT: s_add_i32 s52, s52, 3
; SI-NEXT: s_or_b32 s11, s12, s11
; SI-NEXT: s_and_b32 s13, s52, 0xff
; SI-NEXT: s_addk_i32 s11, 0x300
-; SI-NEXT: s_lshl_b32 s12, s70, 24
+; SI-NEXT: s_lshl_b32 s12, s68, 24
; SI-NEXT: s_lshl_b32 s13, s13, 16
; SI-NEXT: s_and_b32 s11, s11, 0xffff
; SI-NEXT: s_or_b32 s12, s12, s13
-; SI-NEXT: s_add_i32 s55, s81, 3
+; SI-NEXT: s_add_i32 s55, s71, 3
; SI-NEXT: s_or_b32 s11, s12, s11
; SI-NEXT: s_and_b32 s12, s55, 0xff
-; SI-NEXT: s_lshl_b32 s13, s71, 8
-; SI-NEXT: s_add_i32 s48, s34, 3
+; SI-NEXT: s_lshl_b32 s13, s70, 8
+; SI-NEXT: s_add_i32 s48, s48, 3
; SI-NEXT: s_or_b32 s12, s13, s12
; SI-NEXT: s_and_b32 s14, s48, 0xff
; SI-NEXT: s_addk_i32 s12, 0x300
@@ -73800,7 +73794,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_or_b32 s12, s13, s12
; SI-NEXT: s_and_b32 s13, s38, 0xff
; SI-NEXT: s_lshl_b32 s14, s64, 8
-; SI-NEXT: s_add_i32 s31, s31, 3
+; SI-NEXT: s_add_i32 s31, s97, 3
; SI-NEXT: s_or_b32 s13, s14, s13
; SI-NEXT: s_and_b32 s15, s31, 0xff
; SI-NEXT: s_addk_i32 s13, 0x300
@@ -73808,84 +73802,85 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s15, s15, 16
; SI-NEXT: s_and_b32 s13, s13, 0xffff
; SI-NEXT: s_or_b32 s14, s14, s15
-; SI-NEXT: s_add_i32 s36, s54, 3
+; SI-NEXT: s_add_i32 s36, s92, 3
; SI-NEXT: s_or_b32 s13, s14, s13
; SI-NEXT: s_and_b32 s14, s36, 0xff
; SI-NEXT: s_lshl_b32 s15, s53, 8
-; SI-NEXT: s_add_i32 s95, s88, 3
+; SI-NEXT: s_add_i32 s95, s95, 3
; SI-NEXT: s_or_b32 s14, s15, s14
; SI-NEXT: s_and_b32 s21, s95, 0xff
; SI-NEXT: s_addk_i32 s14, 0x300
-; SI-NEXT: s_lshl_b32 s15, s51, 24
+; SI-NEXT: s_lshl_b32 s15, s49, 24
; SI-NEXT: s_lshl_b32 s21, s21, 16
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: s_or_b32 s15, s15, s21
; SI-NEXT: s_add_i32 s94, s94, 3
; SI-NEXT: s_or_b32 s14, s15, s14
; SI-NEXT: s_and_b32 s15, s94, 0xff
-; SI-NEXT: s_lshl_b32 s21, s49, 8
-; SI-NEXT: s_add_i32 s91, s35, 3
+; SI-NEXT: s_lshl_b32 s21, s93, 8
+; SI-NEXT: s_add_i32 s91, s28, 3
; SI-NEXT: s_or_b32 s15, s21, s15
-; SI-NEXT: s_and_b32 s16, s91, 0xff
+; SI-NEXT: s_and_b32 s22, s91, 0xff
; SI-NEXT: s_addk_i32 s15, 0x300
; SI-NEXT: s_lshl_b32 s21, s37, 24
-; SI-NEXT: s_lshl_b32 s16, s16, 16
+; SI-NEXT: s_lshl_b32 s22, s22, 16
; SI-NEXT: s_and_b32 s15, s15, 0xffff
-; SI-NEXT: s_or_b32 s21, s21, s16
-; SI-NEXT: s_add_i32 s24, s24, 3
+; SI-NEXT: s_or_b32 s21, s21, s22
+; SI-NEXT: s_add_i32 s24, s29, 3
+; SI-NEXT: v_readlane_b32 s23, v41, 1
+; SI-NEXT: s_add_i32 s16, s16, 3
; SI-NEXT: s_or_b32 s15, s21, s15
; SI-NEXT: s_and_b32 s21, s24, 0xff
-; SI-NEXT: s_lshl_b32 s16, s25, 8
-; SI-NEXT: s_add_i32 s26, s26, 3
-; SI-NEXT: s_or_b32 s21, s16, s21
-; SI-NEXT: s_and_b32 s23, s26, 0xff
-; SI-NEXT: s_addk_i32 s21, 0x300
-; SI-NEXT: s_lshl_b32 s16, s27, 24
-; SI-NEXT: s_lshl_b32 s23, s23, 16
-; SI-NEXT: s_and_b32 s21, s21, 0xffff
-; SI-NEXT: s_or_b32 s16, s16, s23
-; SI-NEXT: s_or_b32 s21, s16, s21
-; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_add_i32 s40, s21, 0x3000000
-; SI-NEXT: s_and_b32 s21, s28, 0xff
-; SI-NEXT: s_lshl_b32 s16, s29, 8
-; SI-NEXT: s_lshl_b32 s23, s89, 24
-; SI-NEXT: s_add_i32 s89, s90, 3
-; SI-NEXT: s_or_b32 s21, s16, s21
-; SI-NEXT: s_and_b32 s16, s89, 0xff
-; SI-NEXT: s_addk_i32 s21, 0x300
-; SI-NEXT: s_lshl_b32 s16, s16, 16
-; SI-NEXT: s_and_b32 s21, s21, 0xffff
-; SI-NEXT: s_or_b32 s16, s23, s16
-; SI-NEXT: s_or_b32 s16, s16, s21
-; SI-NEXT: s_add_i32 s41, s16, 0x3000000
-; SI-NEXT: s_add_i32 s16, s92, 3
+; SI-NEXT: s_lshl_b32 s22, s30, 8
+; SI-NEXT: s_add_i32 s26, s23, 3
; SI-NEXT: s_and_b32 s16, s16, 0xff
; SI-NEXT: s_lshl_b32 s17, s17, 8
; SI-NEXT: s_add_i32 s18, s18, 3
+; SI-NEXT: s_or_b32 s21, s22, s21
+; SI-NEXT: s_and_b32 s23, s26, 0xff
; SI-NEXT: s_or_b32 s16, s17, s16
; SI-NEXT: s_and_b32 s18, s18, 0xff
+; SI-NEXT: s_addk_i32 s21, 0x300
+; SI-NEXT: s_lshl_b32 s22, s27, 24
+; SI-NEXT: s_lshl_b32 s23, s23, 16
; SI-NEXT: s_addk_i32 s16, 0x300
; SI-NEXT: s_lshl_b32 s17, s19, 24
; SI-NEXT: s_lshl_b32 s18, s18, 16
+; SI-NEXT: s_and_b32 s21, s21, 0xffff
+; SI-NEXT: s_or_b32 s22, s22, s23
; SI-NEXT: s_and_b32 s16, s16, 0xffff
; SI-NEXT: s_or_b32 s17, s17, s18
+; SI-NEXT: s_or_b32 s21, s22, s21
; SI-NEXT: s_or_b32 s16, s17, s16
+; SI-NEXT: s_add_i32 s40, s21, 0x3000000
+; SI-NEXT: v_readlane_b32 s21, v41, 0
; SI-NEXT: s_add_i32 s42, s16, 0x3000000
-; SI-NEXT: s_add_i32 s16, s20, 3
-; SI-NEXT: v_readlane_b32 s17, v41, 1
+; SI-NEXT: v_readlane_b32 s16, v41, 5
+; SI-NEXT: s_add_i32 s28, s21, 3
+; SI-NEXT: s_add_i32 s16, s16, 3
+; SI-NEXT: v_readlane_b32 s17, v41, 4
+; SI-NEXT: v_readlane_b32 s18, v41, 3
+; SI-NEXT: s_and_b32 s21, s28, 0xff
+; SI-NEXT: s_lshl_b32 s22, s88, 8
+; SI-NEXT: s_add_i32 s89, s54, 3
; SI-NEXT: s_and_b32 s16, s16, 0xff
; SI-NEXT: s_lshl_b32 s17, s17, 8
-; SI-NEXT: s_add_i32 s18, s22, 3
+; SI-NEXT: s_add_i32 s18, s18, 3
+; SI-NEXT: s_or_b32 s21, s22, s21
+; SI-NEXT: s_and_b32 s22, s89, 0xff
; SI-NEXT: s_or_b32 s16, s17, s16
-; SI-NEXT: v_readlane_b32 s17, v41, 0
+; SI-NEXT: v_readlane_b32 s17, v41, 2
; SI-NEXT: s_and_b32 s18, s18, 0xff
+; SI-NEXT: s_addk_i32 s21, 0x300
+; SI-NEXT: s_lshl_b32 s20, s20, 24
+; SI-NEXT: s_lshl_b32 s22, s22, 16
; SI-NEXT: s_addk_i32 s16, 0x300
; SI-NEXT: s_lshl_b32 s17, s17, 24
; SI-NEXT: s_lshl_b32 s18, s18, 16
+; SI-NEXT: s_and_b32 s21, s21, 0xffff
+; SI-NEXT: s_or_b32 s20, s20, s22
; SI-NEXT: s_and_b32 s16, s16, 0xffff
; SI-NEXT: s_or_b32 s17, s17, s18
-; SI-NEXT: s_or_b32 s16, s17, s16
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
; SI-NEXT: s_add_i32 s5, s5, 0x3000000
; SI-NEXT: s_add_i32 s6, s6, 0x3000000
@@ -73896,49 +73891,49 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_add_i32 s11, s11, 0x3000000
; SI-NEXT: s_add_i32 s12, s12, 0x3000000
; SI-NEXT: s_add_i32 s13, s13, 0x3000000
-; SI-NEXT: s_add_i32 s43, s16, 0x3000000
+; SI-NEXT: s_or_b32 s20, s20, s21
+; SI-NEXT: s_or_b32 s16, s17, s16
; SI-NEXT: s_add_i32 s14, s14, 0x3000000
; SI-NEXT: s_add_i32 s15, s15, 0x3000000
-; SI-NEXT: s_lshr_b64 s[58:59], s[42:43], 16
-; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16
+; SI-NEXT: s_add_i32 s41, s20, 0x3000000
+; SI-NEXT: s_add_i32 s43, s16, 0x3000000
+; SI-NEXT: s_lshr_b64 s[44:45], s[12:13], 16
; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16
; SI-NEXT: s_lshr_b64 s[62:63], s[8:9], 16
; SI-NEXT: s_lshr_b64 s[72:73], s[6:7], 16
; SI-NEXT: s_lshr_b64 s[74:75], s[4:5], 16
-; SI-NEXT: s_lshr_b64 s[48:49], s[40:41], 16
-; SI-NEXT: s_lshr_b64 s[76:77], s[14:15], 16
-; SI-NEXT: v_writelane_b32 v41, s16, 9
-; SI-NEXT: s_lshr_b32 s23, s43, 16
-; SI-NEXT: s_lshr_b32 s57, s41, 16
-; SI-NEXT: s_lshr_b32 s59, s15, 16
+; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16
+; SI-NEXT: s_lshr_b64 s[78:79], s[40:41], 16
+; SI-NEXT: s_lshr_b64 s[50:51], s[14:15], 16
+; SI-NEXT: s_lshr_b32 s35, s43, 16
+; SI-NEXT: s_lshr_b32 s56, s41, 16
+; SI-NEXT: s_lshr_b32 s58, s15, 16
; SI-NEXT: s_lshr_b32 s61, s13, 16
; SI-NEXT: s_lshr_b32 s63, s11, 16
; SI-NEXT: s_lshr_b32 s73, s9, 16
; SI-NEXT: s_lshr_b32 s75, s7, 16
; SI-NEXT: s_lshr_b32 s45, s5, 16
-; SI-NEXT: v_writelane_b32 v41, s17, 10
; SI-NEXT: .LBB99_5: ; %end
; SI-NEXT: s_and_b32 s16, s42, 0xffff
-; SI-NEXT: s_lshl_b32 s17, s58, 16
+; SI-NEXT: s_lshl_b32 s17, s76, 16
; SI-NEXT: s_or_b32 s16, s16, s17
; SI-NEXT: s_and_b32 s17, s43, 0xffff
-; SI-NEXT: s_lshl_b32 s18, s23, 16
+; SI-NEXT: s_lshl_b32 s18, s35, 16
; SI-NEXT: s_or_b32 s17, s17, s18
; SI-NEXT: s_and_b32 s18, s40, 0xffff
-; SI-NEXT: s_lshl_b32 s19, s48, 16
+; SI-NEXT: s_lshl_b32 s19, s78, 16
; SI-NEXT: s_or_b32 s18, s18, s19
; SI-NEXT: s_and_b32 s19, s41, 0xffff
-; SI-NEXT: s_lshl_b32 s20, s57, 16
+; SI-NEXT: s_lshl_b32 s20, s56, 16
; SI-NEXT: s_or_b32 s19, s19, s20
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: s_lshl_b32 s20, s76, 16
+; SI-NEXT: s_lshl_b32 s20, s50, 16
; SI-NEXT: s_or_b32 s14, s14, s20
; SI-NEXT: s_and_b32 s15, s15, 0xffff
-; SI-NEXT: s_lshl_b32 s20, s59, 16
+; SI-NEXT: s_lshl_b32 s20, s58, 16
; SI-NEXT: s_or_b32 s15, s15, s20
-; SI-NEXT: v_readlane_b32 s20, v41, 9
; SI-NEXT: s_and_b32 s12, s12, 0xffff
-; SI-NEXT: s_lshl_b32 s20, s20, 16
+; SI-NEXT: s_lshl_b32 s20, s44, 16
; SI-NEXT: s_or_b32 s12, s12, s20
; SI-NEXT: s_and_b32 s13, s13, 0xffff
; SI-NEXT: s_lshl_b32 s20, s61, 16
@@ -73967,7 +73962,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_and_b32 s5, s5, 0xffff
; SI-NEXT: s_lshl_b32 s20, s45, 16
; SI-NEXT: s_or_b32 s5, s5, s20
-; SI-NEXT: v_readlane_b32 s21, v41, 10
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
index c4a3fb1f052cb..ccc46cc5df39e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
@@ -1306,8 +1306,8 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s6, s6, s5
-; SI-NEXT: s_or_b32 s4, s4, s6
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s20, 0xff
; SI-NEXT: s_lshl_b32 s6, s21, 8
; SI-NEXT: s_or_b32 s5, s5, s6
@@ -1315,8 +1315,8 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s7, s7, s6
-; SI-NEXT: s_or_b32 s5, s5, s7
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s24, 0xff
; SI-NEXT: s_lshl_b32 s7, s25, 8
; SI-NEXT: s_or_b32 s6, s6, s7
@@ -1324,8 +1324,8 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_lshl_b32 s8, s27, 24
; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s8, s8, s7
-; SI-NEXT: s_or_b32 s6, s6, s8
+; SI-NEXT: s_or_b32 s7, s8, s7
+; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_cbranch_execnz .LBB7_3
; SI-NEXT: .LBB7_2: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -1419,9 +1419,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s6, s6, s5
+; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s6, 16
+; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -1431,9 +1431,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s7, s7, s6
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s7, 16
+; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -1443,9 +1443,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s8, s8, s7
+; VI-NEXT: s_or_b32 s7, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s8, 16
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -1554,26 +1554,26 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s6, s6, s8
+; GFX11-NEXT: s_or_b32 s5, s6, s8
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s6, 16
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s6, s8, 16
+; GFX11-NEXT: s_or_b32 s6, s8, s9
; GFX11-NEXT: s_and_b32 s8, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s10, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s8, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
@@ -3275,10 +3275,10 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s18, 0xffff
; SI-NEXT: s_lshl_b32 s4, s4, 16
-; SI-NEXT: s_or_b32 s7, s7, s4
+; SI-NEXT: s_or_b32 s4, s7, s4
; SI-NEXT: v_mov_b32_e32 v0, s5
; SI-NEXT: v_mov_b32_e32 v1, s6
-; SI-NEXT: v_mov_b32_e32 v2, s7
+; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB17_4:
; SI-NEXT: ; implicit-def: $sgpr6
@@ -4589,8 +4589,8 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s6, s6, s5
-; SI-NEXT: s_or_b32 s4, s4, s6
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s20, 0xff
; SI-NEXT: s_lshl_b32 s6, s21, 8
; SI-NEXT: s_or_b32 s5, s5, s6
@@ -4598,8 +4598,8 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s7, s7, s6
-; SI-NEXT: s_or_b32 s5, s5, s7
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s24, 0xff
; SI-NEXT: s_lshl_b32 s7, s25, 8
; SI-NEXT: s_or_b32 s6, s6, s7
@@ -4607,8 +4607,8 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_lshl_b32 s8, s27, 24
; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s8, s8, s7
-; SI-NEXT: s_or_b32 s6, s6, s8
+; SI-NEXT: s_or_b32 s7, s8, s7
+; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_cbranch_execnz .LBB23_3
; SI-NEXT: .LBB23_2: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -4702,9 +4702,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s6, s6, s5
+; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s6, 16
+; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -4714,9 +4714,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s7, s7, s6
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s7, 16
+; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -4726,9 +4726,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s8, s8, s7
+; VI-NEXT: s_or_b32 s7, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s8, 16
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -4837,26 +4837,26 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s6, s6, s8
+; GFX11-NEXT: s_or_b32 s5, s6, s8
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s6, 16
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s6, s8, 16
+; GFX11-NEXT: s_or_b32 s6, s8, s9
; GFX11-NEXT: s_and_b32 s8, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s10, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s8, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
@@ -7520,9 +7520,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s6, s6, s5
+; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s6, 16
+; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -7532,9 +7532,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s7, s7, s6
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s7, 16
+; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -7544,9 +7544,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s8, s8, s7
+; VI-NEXT: s_or_b32 s7, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s8, 16
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -7655,26 +7655,26 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s9
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s6, s7, 16
+; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s10, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
@@ -9399,9 +9399,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s6, s6, s5
+; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s6, 16
+; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -9411,9 +9411,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s7, s7, s6
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s7, 16
+; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -9423,9 +9423,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s8, s8, s7
+; VI-NEXT: s_or_b32 s7, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s8, 16
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -9534,26 +9534,26 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s9
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s6, s7, 16
+; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s10, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
@@ -10833,9 +10833,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: s_or_b32 s6, s6, s5
+; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s6, 16
+; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
@@ -10845,9 +10845,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_addk_i32 s5, 0x300
-; VI-NEXT: s_or_b32 s7, s7, s6
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_lshl_b32 s6, s7, 16
+; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
@@ -10857,9 +10857,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_addk_i32 s6, 0x300
-; VI-NEXT: s_or_b32 s8, s8, s7
+; VI-NEXT: s_or_b32 s7, s8, s7
; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s8, 16
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
@@ -10968,26 +10968,26 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s17, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s9
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s6, s7, 16
+; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s9, s21, 8
; GFX11-NEXT: s_and_b32 s10, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s11, s23, 8
; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s10, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s9
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
index 3d02d70d2fdbb..3cdcac3faa999 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
@@ -237,7 +237,7 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
; MUBUF-NEXT: s_movk_i32 s5, 0x12c4
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
-; MUBUF-NEXT: s_or_b32 s4, s4, 0x12c0
+; MUBUF-NEXT: s_addk_i32 s4, 0x12c0
; MUBUF-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll
index eff0680fe9a31..5283233a0b461 100644
--- a/llvm/test/CodeGen/AMDGPU/min.ll
+++ b/llvm/test/CodeGen/AMDGPU/min.ll
@@ -3798,36 +3798,36 @@ define amdgpu_kernel void @s_test_umin_ult_v8i16(ptr addrspace(1) %out, <8 x i16
; VI-NEXT: s_min_u32 s3, s3, s7
; VI-NEXT: s_min_u32 s10, s11, s10
; VI-NEXT: s_lshl_b32 s3, s3, 16
-; VI-NEXT: s_or_b32 s3, s10, s3
-; VI-NEXT: s_and_b32 s7, s6, 0xffff
-; VI-NEXT: s_and_b32 s10, s2, 0xffff
+; VI-NEXT: s_or_b32 s10, s10, s3
+; VI-NEXT: s_and_b32 s3, s6, 0xffff
+; VI-NEXT: s_and_b32 s7, s2, 0xffff
; VI-NEXT: s_lshr_b32 s6, s6, 16
; VI-NEXT: s_lshr_b32 s2, s2, 16
; VI-NEXT: s_min_u32 s2, s2, s6
-; VI-NEXT: s_min_u32 s7, s10, s7
+; VI-NEXT: s_min_u32 s3, s7, s3
; VI-NEXT: s_lshl_b32 s2, s2, 16
-; VI-NEXT: s_or_b32 s2, s7, s2
-; VI-NEXT: s_and_b32 s6, s5, 0xffff
-; VI-NEXT: s_and_b32 s7, s1, 0xffff
+; VI-NEXT: s_or_b32 s3, s3, s2
+; VI-NEXT: s_and_b32 s2, s5, 0xffff
+; VI-NEXT: s_and_b32 s6, s1, 0xffff
; VI-NEXT: s_lshr_b32 s5, s5, 16
; VI-NEXT: s_lshr_b32 s1, s1, 16
; VI-NEXT: s_min_u32 s1, s1, s5
-; VI-NEXT: s_min_u32 s6, s7, s6
+; VI-NEXT: s_min_u32 s2, s6, s2
; VI-NEXT: s_lshl_b32 s1, s1, 16
-; VI-NEXT: s_or_b32 s1, s6, s1
-; VI-NEXT: s_and_b32 s5, s4, 0xffff
-; VI-NEXT: s_and_b32 s6, s0, 0xffff
+; VI-NEXT: s_or_b32 s2, s2, s1
+; VI-NEXT: s_and_b32 s1, s4, 0xffff
+; VI-NEXT: s_and_b32 s5, s0, 0xffff
; VI-NEXT: s_lshr_b32 s4, s4, 16
; VI-NEXT: s_lshr_b32 s0, s0, 16
; VI-NEXT: s_min_u32 s0, s0, s4
-; VI-NEXT: s_min_u32 s5, s6, s5
+; VI-NEXT: s_min_u32 s1, s5, s1
; VI-NEXT: s_lshl_b32 s0, s0, 16
-; VI-NEXT: s_or_b32 s0, s5, s0
+; VI-NEXT: s_or_b32 s1, s1, s0
; VI-NEXT: v_mov_b32_e32 v4, s8
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: v_mov_b32_e32 v2, s2
-; VI-NEXT: v_mov_b32_e32 v3, s3
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_mov_b32_e32 v3, s10
; VI-NEXT: v_mov_b32_e32 v5, s9
; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; VI-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/s-barrier.ll b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
index 82885b07e569c..35b86998c9cac 100644
--- a/llvm/test/CodeGen/AMDGPU/s-barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
@@ -88,7 +88,7 @@ define amdgpu_kernel void @kernel1(ptr addrspace(1) %out, ptr addrspace(3) %in)
; GFX12-SDAG-NEXT: s_lshr_b32 s2, s2, 4
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: s_and_b32 s2, s2, 63
-; GFX12-SDAG-NEXT: s_or_b32 s3, s2, 0x90000
+; GFX12-SDAG-NEXT: s_or_b32 s3, 0x90000, s2
; GFX12-SDAG-NEXT: s_cmp_eq_u32 0, 0
; GFX12-SDAG-NEXT: s_mov_b32 m0, s3
; GFX12-SDAG-NEXT: s_barrier_init m0
More information about the llvm-commits
mailing list