[llvm] add ext/trunc pseudo for 16/32 conversion (PR #189189)
Guo Chen via llvm-commits
llvm-commits at lists.llvm.org
Sat Mar 28 12:29:13 PDT 2026
https://github.com/broxigarchen created https://github.com/llvm/llvm-project/pull/189189
None
>From 6caa9edd34d2703b7aae7519e53531ec07af76b7 Mon Sep 17 00:00:00 2001
From: guochen2 <guochen2 at amd.com>
Date: Sat, 28 Mar 2026 15:26:36 -0400
Subject: [PATCH] add ext/trunc pseudo for 16/32 conversion
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 76 ++++++++
llvm/lib/Target/AMDGPU/SIISelLowering.h | 2 +
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 7 +
llvm/lib/Target/AMDGPU/SIInstructions.td | 19 ++
llvm/lib/Target/AMDGPU/VOP1Instructions.td | 17 +-
.../inst-select-build-vector-trunc.v2s16.mir | 18 +-
.../GlobalISel/inst-select-fceil.s16.mir | 55 ++++--
.../GlobalISel/inst-select-ffloor.s16.mir | 44 +++--
...st-select-pseudo-scalar-transcendental.mir | 24 +--
.../inst-select-scalar-float-sop1.mir | 53 +++---
.../inst-select-scalar-float-sop2.mir | 53 +++---
.../AMDGPU/GlobalISel/inst-select-sitofp.mir | 10 +-
.../AMDGPU/GlobalISel/inst-select-uitofp.mir | 10 +-
.../CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll | 168 ++++++++---------
.../CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll | 164 ++++++++--------
llvm/test/CodeGen/AMDGPU/function-args.ll | 112 ++++-------
.../llvm.amdgcn.image.gather4.a16.dim.ll | 69 +++----
.../AMDGPU/llvm.amdgcn.image.msaa.load.ll | 10 +-
.../llvm.amdgcn.image.sample.a16.dim.ll | 178 +++++++++---------
.../llvm.amdgcn.image.sample.g16.encode.ll | 6 +-
.../AMDGPU/llvm.amdgcn.image.sample.g16.ll | 6 +-
.../CodeGen/AMDGPU/llvm.amdgcn.mov.dpp8.ll | 3 +
.../AMDGPU/llvm.amdgcn.waitcnt.out.order.ll | 34 ++--
llvm/test/CodeGen/AMDGPU/load-constant-i1.ll | 129 ++++++-------
24 files changed, 687 insertions(+), 580 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 39d1e762ac08a..81c4dd4ae5041 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7020,6 +7020,13 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MRI.setSimpleHint(MI.getOperand(0).getReg(), MI.getOperand(6).getReg());
return BB;
}
+ case AMDGPU::EXT_SRC16_V32_PSEUDO:
+ case AMDGPU::EXT_SRC16_S32_PSEUDO:
+ case AMDGPU::TRUNC_SRC32_V16_PSEUDO:
+ case AMDGPU::TRUNC_SRC32_S16_PSEUDO: {
+ LowerTrue16ExtTruncPseudo(MI);
+ return BB;
+ }
default:
if (TII->isImage(MI) || TII->isMUBUF(MI)) {
if (!MI.mayStore())
@@ -18356,6 +18363,72 @@ void SITargetLowering::AddMemOpInit(MachineInstr &MI) const {
MI.tieOperands(DstIdx, MI.getNumOperands() - 1);
}
+void SITargetLowering::LowerTrue16ExtTruncPseudo(MachineInstr &MI) const {
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
+ const DebugLoc &DL = MI.getDebugLoc();
+ MachineFunction *MF = MI.getMF();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ auto BB = MI.getParent();
+ switch (MI.getOpcode()) {
+ case AMDGPU::EXT_SRC16_V32_PSEUDO:
+ case AMDGPU::EXT_SRC16_S32_PSEUDO: {
+ // v32 = ext vgpr16 => reg_seq
+ // v32 = ext sreg32 => copy
+ // s32 = ext vgpr16 => reg_seq + copy
+ // s32 = ext sreg32 => copy
+ if (MRI.constrainRegClass(MI.getOperand(1).getReg(),
+ &AMDGPU::VGPR_16RegClass)) {
+ Register NewDstReg;
+ Register Hi16Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), Hi16Reg);
+ if (MI.getOpcode() == AMDGPU::EXT_SRC16_S32_PSEUDO)
+ NewDstReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ else
+ NewDstReg = MI.getOperand(0).getReg();
+
+ BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), NewDstReg)
+ .add(MI.getOperand(1))
+ .addImm(AMDGPU::lo16)
+ .addReg(Hi16Reg)
+ .addImm(AMDGPU::hi16);
+ if (MI.getOpcode() == AMDGPU::EXT_SRC16_S32_PSEUDO)
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), MI.getOperand(0).getReg())
+ .addReg(NewDstReg);
+ } else {
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), MI.getOperand(0).getReg())
+ .add(MI.getOperand(1));
+ }
+ break;
+ }
+ case AMDGPU::TRUNC_SRC32_V16_PSEUDO: {
+ // v16 = trunc vgpr32 => copy
+ // v16 = trunc sreg32 => copy + copy
+ Register NewSrcReg;
+ if (MRI.constrainRegClass(MI.getOperand(1).getReg(),
+ &AMDGPU::SReg_32RegClass)) {
+ NewSrcReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), NewSrcReg)
+ .add(MI.getOperand(1));
+ } else {
+ NewSrcReg = MI.getOperand(1).getReg();
+ }
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), MI.getOperand(0).getReg())
+ .addReg(NewSrcReg, {}, AMDGPU::lo16);
+ break;
+ }
+ case AMDGPU::TRUNC_SRC32_S16_PSEUDO: {
+ // s16 = trunc vgpr32 => copy
+ // s16 = trunc sreg32 => copy
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), MI.getOperand(0).getReg())
+ .add(MI.getOperand(1));
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid True16 Ext/Trunc Pseudo");
+ }
+ MI.eraseFromParent();
+}
+
/// Assign the register class depending on the number of
/// bits set in the writemask
void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
@@ -18390,6 +18463,9 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
if (TII->isImage(MI))
TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::vaddr);
+
+ if (TII->isTrue16ExtTruncPseudo(MI.getOpcode()))
+ LowerTrue16ExtTruncPseudo(MI);
}
static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index e37bd938dc35d..161fd697b732d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -320,6 +320,8 @@ class SITargetLowering final : public AMDGPUTargetLowering {
SDLoc DL, SDValue Ops[],
MemSDNode *M) const;
+ void LowerTrue16ExtTruncPseudo(MachineInstr &MI) const;
+
public:
SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index cc0b0408bc09c..97f50192e214f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -711,6 +711,13 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
return get(Opcode).TSFlags & SIInstrFlags::FLAT;
}
+ bool isTrue16ExtTruncPseudo(uint32_t Opcode) const {
+ return (Opcode == AMDGPU::EXT_SRC16_V32_PSEUDO ||
+ Opcode == AMDGPU::EXT_SRC16_S32_PSEUDO ||
+ Opcode == AMDGPU::TRUNC_SRC32_V16_PSEUDO ||
+ Opcode == AMDGPU::TRUNC_SRC32_S16_PSEUDO);
+ }
+
/// \returns true for SCRATCH_ instructions, or FLAT/BUF instructions unless
/// the MMOs do not include scratch.
/// Conservatively correct; will return true if \p MI cannot be proven
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index b3b8d9863ca02..dfb1d2b53248d 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -22,6 +22,25 @@ class DivergentSextInreg<ValueType VT> : PatFrag<
(sext_inreg $src, VT),
[{ return N->isDivergent(); }]>;
+// True16 pseduo instruction for 16b/32b conversion
+// These are needed since 16b could be in sreg32/vgpr16
+// which has different reg size
+class TRUE16_EXT_TRUNC_PSEUDO<RegisterClassLike DstRC,
+ RegisterOperand SrcOp>: PseudoInstSI<
+ (outs DstRC:$dst), (ins SrcOp:$src0)> {
+ let hasPostISelHook = 1;
+ let usesCustomInserter = 1;
+ let isPseudo = 1;
+ let hasSideEffects = 0;
+ let mayLoad = 0;
+ let mayStore = 0;
+ let Uses = [EXEC];
+}
+def EXT_SRC16_V32_PSEUDO: TRUE16_EXT_TRUNC_PSEUDO<VGPR_32, VCSrcT_b16>;
+def EXT_SRC16_S32_PSEUDO: TRUE16_EXT_TRUNC_PSEUDO<SReg_32, VCSrcT_b16>;
+def TRUNC_SRC32_V16_PSEUDO: TRUE16_EXT_TRUNC_PSEUDO<VGPR_16, VCSrc_b32>;
+def TRUNC_SRC32_S16_PSEUDO: TRUE16_EXT_TRUNC_PSEUDO<SReg_32, VCSrc_b32>;
+
include "SOPInstructions.td"
include "VOPInstructions.td"
include "SMInstructions.td"
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 86e7675626ba6..905f9c306f85a 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -1730,34 +1730,29 @@ def : GCNPat <
let True16Predicate = UseRealTrue16Insts in {
def : GCNPat<
(i32 (UniformUnaryFrag<anyext> i16:$src)),
- (COPY $src)
+ (EXT_SRC16_S32_PSEUDO $src)
>;
def : GCNPat<
(i32 (DivergentUnaryFrag<anyext> i16:$src)),
- (REG_SEQUENCE VGPR_32, $src, lo16, (i16 (IMPLICIT_DEF)), hi16)
+ (EXT_SRC16_V32_PSEUDO $src)
>;
def : GCNPat<
- (i64 (UniformUnaryFrag<anyext> i16:$src)),
+ (i64 (anyext i16:$src)),
(REG_SEQUENCE VReg_64,
- (i32 (COPY $src)), sub0,
+ (i32 (EXT_SRC16_V32_PSEUDO $src)), sub0,
(V_MOV_B32_e32 (i32 0)), sub1)
>;
-def : GCNPat<
- (i64 (DivergentUnaryFrag<anyext> i16:$src)),
- (REG_SEQUENCE VReg_64, $src, lo16, (i16 (IMPLICIT_DEF)), hi16, (i32 (IMPLICIT_DEF)), sub1)
->;
-
def : GCNPat<
(i16 (UniformUnaryFrag<trunc> i32:$src)),
- (COPY $src)
+ (TRUNC_SRC32_S16_PSEUDO $src)
>;
def : GCNPat<
(i16 (DivergentUnaryFrag<trunc> i32:$src)),
- (EXTRACT_SUBREG $src, lo16)
+ (TRUNC_SRC32_V16_PSEUDO $src)
>;
def : GCNPat <
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector-trunc.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector-trunc.v2s16.mir
index 3b4f66b82193f..c9bdc8d961fb0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector-trunc.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector-trunc.v2s16.mir
@@ -637,11 +637,19 @@ tracksRegLiveness: true
body: |
bb.0:
- ; GFX9PLUS-LABEL: name: test_build_vector_trunc_s_v2s16_anyext_impdef_anyext_constant
- ; GFX9PLUS: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
- ; GFX9PLUS-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 123
- ; GFX9PLUS-NEXT: [[S_PACK_LL_B32_B16_:%[0-9]+]]:sreg_32 = S_PACK_LL_B32_B16 [[DEF]], [[S_MOV_B32_]]
- ; GFX9PLUS-NEXT: S_ENDPGM 0, implicit [[S_PACK_LL_B32_B16_]]
+ ; GFX9-LABEL: name: test_build_vector_trunc_s_v2s16_anyext_impdef_anyext_constant
+ ; GFX9: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GFX9-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 123
+ ; GFX9-NEXT: [[S_PACK_LL_B32_B16_:%[0-9]+]]:sreg_32 = S_PACK_LL_B32_B16 [[DEF]], [[S_MOV_B32_]]
+ ; GFX9-NEXT: S_ENDPGM 0, implicit [[S_PACK_LL_B32_B16_]]
+ ;
+ ; GFX11-LABEL: name: test_build_vector_trunc_s_v2s16_anyext_impdef_anyext_constant
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 123
+ ; GFX11-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[DEF]], implicit $exec
+ ; GFX11-NEXT: [[EXT_SRC1:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_MOV_B32_]], implicit $exec
+ ; GFX11-NEXT: [[S_PACK_LL_B32_B16_:%[0-9]+]]:sreg_32 = S_PACK_LL_B32_B16 [[EXT_SRC]], [[EXT_SRC1]]
+ ; GFX11-NEXT: S_ENDPGM 0, implicit [[S_PACK_LL_B32_B16_]]
%0:sgpr(s16) = G_IMPLICIT_DEF
%1:sgpr(s16) = G_CONSTANT i16 123
%2:sgpr(s32) = G_ANYEXT %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
index df2f390124ebd..177697c163839 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GCN,GFX8 %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GCN,GFX11-TRUE16 %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GCN,GFX11-FAKE16 %s
+# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GFX8 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
---
name: fceil_s16_ss
@@ -13,14 +13,32 @@ body: |
bb.0:
liveins: $sgpr0
- ; GCN-LABEL: name: fceil_s16_ss
- ; GCN: liveins: $sgpr0
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
- ; GCN-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
- ; GCN-NEXT: [[FCEIL:%[0-9]+]]:sreg_32(s16) = G_FCEIL [[TRUNC]]
- ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FCEIL]](s16)
- ; GCN-NEXT: $sgpr0 = COPY [[COPY1]](s32)
+ ; GFX8-LABEL: name: fceil_s16_ss
+ ; GFX8: liveins: $sgpr0
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8-NEXT: [[FCEIL:%[0-9]+]]:sreg_32(s16) = G_FCEIL [[TRUNC]]
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FCEIL]](s16)
+ ; GFX8-NEXT: $sgpr0 = COPY [[COPY1]](s32)
+ ;
+ ; GFX11-TRUE16-LABEL: name: fceil_s16_ss
+ ; GFX11-TRUE16: liveins: $sgpr0
+ ; GFX11-TRUE16-NEXT: {{ $}}
+ ; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; GFX11-TRUE16-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-TRUE16-NEXT: [[FCEIL:%[0-9]+]]:sreg_32(s16) = G_FCEIL [[TRUNC]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32(s32) = EXT_SRC16_S32_PSEUDO [[FCEIL]](s16), implicit $exec
+ ; GFX11-TRUE16-NEXT: $sgpr0 = COPY [[EXT_SRC]](s32)
+ ;
+ ; GFX11-FAKE16-LABEL: name: fceil_s16_ss
+ ; GFX11-FAKE16: liveins: $sgpr0
+ ; GFX11-FAKE16-NEXT: {{ $}}
+ ; GFX11-FAKE16-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; GFX11-FAKE16-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-FAKE16-NEXT: [[FCEIL:%[0-9]+]]:sreg_32(s16) = G_FCEIL [[TRUNC]]
+ ; GFX11-FAKE16-NEXT: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FCEIL]](s16)
+ ; GFX11-FAKE16-NEXT: $sgpr0 = COPY [[COPY1]](s32)
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
%2:sgpr(s16) = G_FCEIL %1
@@ -51,9 +69,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-TRUE16-NEXT: [[COPY1:%[0-9]+]]:vgpr_16 = COPY [[COPY]].lo16
; GFX11-TRUE16-NEXT: [[V_CEIL_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CEIL_F16_t16_e64 0, [[COPY1]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CEIL_F16_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CEIL_F16_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: fceil_s16_vv
; GFX11-FAKE16: liveins: $vgpr0
@@ -90,9 +107,8 @@ body: |
; GFX11-TRUE16-NEXT: {{ $}}
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX11-TRUE16-NEXT: [[V_CEIL_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CEIL_F16_t16_e64 0, [[COPY]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CEIL_F16_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CEIL_F16_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: fceil_s16_vs
; GFX11-FAKE16: liveins: $sgpr0
@@ -130,9 +146,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-TRUE16-NEXT: [[COPY1:%[0-9]+]]:vgpr_16 = COPY [[COPY]].lo16
; GFX11-TRUE16-NEXT: [[V_CEIL_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CEIL_F16_t16_e64 1, [[COPY1]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CEIL_F16_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CEIL_F16_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: fceil_fneg_s16_vv
; GFX11-FAKE16: liveins: $vgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
index df62806b61918..d1d50210ff94b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefix=VI %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GCN,GFX11-TRUE16 %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GCN,GFX11-FAKE16 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -run-pass=instruction-select -global-isel-abort=0 -verify-machineinstrs -o - %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
---
name: ffloor_s16_ss
@@ -22,14 +22,23 @@ body: |
; VI-NEXT: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FFLOOR]](s16)
; VI-NEXT: $sgpr0 = COPY [[COPY1]](s32)
;
- ; GCN-LABEL: name: ffloor_s16_ss
- ; GCN: liveins: $sgpr0
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
- ; GCN-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
- ; GCN-NEXT: [[FFLOOR:%[0-9]+]]:sreg_32(s16) = G_FFLOOR [[TRUNC]]
- ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FFLOOR]](s16)
- ; GCN-NEXT: $sgpr0 = COPY [[COPY1]](s32)
+ ; GFX11-TRUE16-LABEL: name: ffloor_s16_ss
+ ; GFX11-TRUE16: liveins: $sgpr0
+ ; GFX11-TRUE16-NEXT: {{ $}}
+ ; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; GFX11-TRUE16-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-TRUE16-NEXT: [[FFLOOR:%[0-9]+]]:sreg_32(s16) = G_FFLOOR [[TRUNC]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32(s32) = EXT_SRC16_S32_PSEUDO [[FFLOOR]](s16), implicit $exec
+ ; GFX11-TRUE16-NEXT: $sgpr0 = COPY [[EXT_SRC]](s32)
+ ;
+ ; GFX11-FAKE16-LABEL: name: ffloor_s16_ss
+ ; GFX11-FAKE16: liveins: $sgpr0
+ ; GFX11-FAKE16-NEXT: {{ $}}
+ ; GFX11-FAKE16-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; GFX11-FAKE16-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-FAKE16-NEXT: [[FFLOOR:%[0-9]+]]:sreg_32(s16) = G_FFLOOR [[TRUNC]]
+ ; GFX11-FAKE16-NEXT: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FFLOOR]](s16)
+ ; GFX11-FAKE16-NEXT: $sgpr0 = COPY [[COPY1]](s32)
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
%2:sgpr(s16) = G_FFLOOR %1
@@ -60,9 +69,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-TRUE16-NEXT: [[COPY1:%[0-9]+]]:vgpr_16 = COPY [[COPY]].lo16
; GFX11-TRUE16-NEXT: [[V_FLOOR_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_FLOOR_F16_t16_e64 0, [[COPY1]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_FLOOR_F16_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_FLOOR_F16_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: ffloor_s16_vv
; GFX11-FAKE16: liveins: $vgpr0
@@ -99,9 +107,8 @@ body: |
; GFX11-TRUE16-NEXT: {{ $}}
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX11-TRUE16-NEXT: [[V_FLOOR_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_FLOOR_F16_t16_e64 0, [[COPY]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_FLOOR_F16_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_FLOOR_F16_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: ffloor_s16_vs
; GFX11-FAKE16: liveins: $sgpr0
@@ -139,9 +146,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-TRUE16-NEXT: [[COPY1:%[0-9]+]]:vgpr_16 = COPY [[COPY]].lo16
; GFX11-TRUE16-NEXT: [[V_FLOOR_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_FLOOR_F16_t16_e64 1, [[COPY1]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_FLOOR_F16_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_FLOOR_F16_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: ffloor_fneg_s16_vv
; GFX11-FAKE16: liveins: $vgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pseudo-scalar-transcendental.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pseudo-scalar-transcendental.mir
index 916ead0ec4b6a..9359d61cc9fd5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pseudo-scalar-transcendental.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pseudo-scalar-transcendental.mir
@@ -35,8 +35,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK-NEXT: [[V_S_EXP_F16_e64_:%[0-9]+]]:sreg_32_xexec = nofpexcept V_S_EXP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_S_EXP_F16_e64_]]
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[V_S_EXP_F16_e64_]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.exp2), %1
@@ -78,8 +78,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK-NEXT: [[V_S_LOG_F16_e64_:%[0-9]+]]:sreg_32_xexec = nofpexcept V_S_LOG_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_S_LOG_F16_e64_]]
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[V_S_LOG_F16_e64_]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.log), %1
@@ -121,8 +121,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK-NEXT: [[V_S_RCP_F16_e64_:%[0-9]+]]:sreg_32_xexec = nnan ninf nsz arcp contract afn reassoc nofpexcept V_S_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_S_RCP_F16_e64_]]
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[V_S_RCP_F16_e64_]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = nnan ninf nsz arcp contract afn reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %1(s16)
@@ -164,8 +164,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK-NEXT: [[V_S_RSQ_F16_e64_:%[0-9]+]]:sreg_32_xexec = nofpexcept V_S_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_S_RSQ_F16_e64_]]
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[V_S_RSQ_F16_e64_]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %1(s16)
@@ -207,8 +207,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK-NEXT: [[V_S_SQRT_F16_e64_:%[0-9]+]]:sreg_32_xexec = nofpexcept V_S_SQRT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_S_SQRT_F16_e64_]]
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[V_S_SQRT_F16_e64_]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_FSQRT %1
@@ -250,8 +250,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK-NEXT: [[V_S_SQRT_F16_e64_:%[0-9]+]]:sreg_32_xexec = nofpexcept V_S_SQRT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_S_SQRT_F16_e64_]]
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[V_S_SQRT_F16_e64_]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.sqrt), %1(s16)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop1.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop1.mir
index 130f87e44eac7..33dcbaf494277 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop1.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop1.mir
@@ -52,8 +52,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_CVT_I32_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_CVT_I32_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_CVT_I32_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_CVT_I32_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = G_FPTOSI %0(s32)
$sgpr0 = COPY %1(s32)
@@ -71,8 +71,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_CVT_U32_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_CVT_U32_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_CVT_U32_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_CVT_U32_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = G_FPTOUI %0(s32)
$sgpr0 = COPY %1(s32)
@@ -90,8 +90,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_CVT_F32_F16 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_CVT_F32_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_CVT_F32_F16 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_CVT_F32_F16_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s32) = G_FPEXT %1(s16)
@@ -133,8 +133,9 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_CVT_F16_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_CVT_F16_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_CVT_F16_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_CVT_F16_F32_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_FPTRUNC %0(s32)
%2:sgpr(s32) = G_ANYEXT %1(s16)
@@ -153,8 +154,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_CEIL_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_CEIL_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_CEIL_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_CEIL_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = G_FCEIL %0
$sgpr0 = COPY %1(s32)
@@ -172,8 +173,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_FLOOR_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_FLOOR_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_FLOOR_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_FLOOR_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = G_FFLOOR %0
$sgpr0 = COPY %1(s32)
@@ -191,8 +192,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_TRUNC_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_TRUNC_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_TRUNC_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_TRUNC_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = G_INTRINSIC_TRUNC %0
$sgpr0 = COPY %1(s32)
@@ -210,8 +211,8 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %1:sreg_32 = nofpexcept S_RNDNE_F32 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %1
+ ; GFX1150-NEXT: [[S_RNDNE_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_RNDNE_F32 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_RNDNE_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = G_INTRINSIC_ROUNDEVEN %0
$sgpr0 = COPY %1(s32)
@@ -229,8 +230,9 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_CEIL_F16 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_CEIL_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_CEIL_F16 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_CEIL_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_FCEIL %1
@@ -250,8 +252,9 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_FLOOR_F16 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_FLOOR_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_FLOOR_F16 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_FLOOR_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_FFLOOR %1
@@ -271,8 +274,9 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_TRUNC_F16 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_TRUNC_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_TRUNC_F16 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_TRUNC_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_INTRINSIC_TRUNC %1
@@ -292,8 +296,9 @@ body: |
; GFX1150: liveins: $sgpr0
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_RNDNE_F16 [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_RNDNE_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_RNDNE_F16 [[COPY]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_RNDNE_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s16) = G_INTRINSIC_ROUNDEVEN %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop2.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop2.mir
index d80a13c4d7c79..470af43ac184a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop2.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-scalar-float-sop2.mir
@@ -15,8 +15,8 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_ADD_F32 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_ADD_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_ADD_F32 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_ADD_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s32) = G_FADD %0, %1
@@ -36,8 +36,8 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_SUB_F32 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_SUB_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_SUB_F32 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_SUB_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s32) = G_FSUB %0, %1
@@ -57,8 +57,8 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_MUL_F32 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_MUL_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_MUL_F32 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_MUL_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s32) = G_FMUL %0, %1
@@ -78,8 +78,8 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_MIN_F32 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_MIN_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_MIN_F32 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_MIN_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s32) = G_FMINNUM %0, %1
@@ -99,8 +99,8 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_MAX_F32 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_MAX_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_MAX_F32 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_MAX_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s32) = G_FMAXNUM %0, %1
@@ -120,8 +120,9 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %4:sreg_32 = nofpexcept S_ADD_F16 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %4
+ ; GFX1150-NEXT: [[S_ADD_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_ADD_F16 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_ADD_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s32) = COPY $sgpr1
@@ -144,8 +145,9 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %4:sreg_32 = nofpexcept S_SUB_F16 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %4
+ ; GFX1150-NEXT: [[S_SUB_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_SUB_F16 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_SUB_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s32) = COPY $sgpr1
@@ -168,8 +170,9 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %4:sreg_32 = nofpexcept S_MUL_F16 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %4
+ ; GFX1150-NEXT: [[S_MUL_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_MUL_F16 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_MUL_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s32) = COPY $sgpr1
@@ -192,8 +195,9 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %4:sreg_32 = nofpexcept S_MIN_F16 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %4
+ ; GFX1150-NEXT: [[S_MIN_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_MIN_F16 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_MIN_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s32) = COPY $sgpr1
@@ -234,8 +238,8 @@ body: |
; GFX1150-NEXT: {{ $}}
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX1150-NEXT: %2:sreg_32 = nofpexcept S_CVT_PK_RTZ_F16_F32 [[COPY]], [[COPY1]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %2
+ ; GFX1150-NEXT: [[S_CVT_PK_RTZ_F16_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_CVT_PK_RTZ_F16_F32 [[COPY]], [[COPY1]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_CVT_PK_RTZ_F16_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0(s32), %1(s32)
@@ -256,8 +260,8 @@ body: |
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
; GFX1150-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX1150-NEXT: %3:sreg_32 = nofpexcept S_FMAC_F32 [[COPY1]], [[COPY2]], [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %3
+ ; GFX1150-NEXT: [[S_FMAC_F32_:%[0-9]+]]:sreg_32 = nofpexcept S_FMAC_F32 [[COPY1]], [[COPY2]], [[COPY]], implicit $mode
+ ; GFX1150-NEXT: $sgpr0 = COPY [[S_FMAC_F32_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s32) = COPY $sgpr2
@@ -279,8 +283,9 @@ body: |
; GFX1150-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX1150-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
; GFX1150-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX1150-NEXT: %6:sreg_32 = nofpexcept S_FMAC_F16 [[COPY1]], [[COPY2]], [[COPY]], implicit $mode
- ; GFX1150-NEXT: $sgpr0 = COPY %6
+ ; GFX1150-NEXT: [[S_FMAC_F16_:%[0-9]+]]:sreg_32 = nofpexcept S_FMAC_F16 [[COPY1]], [[COPY2]], [[COPY]], implicit $mode
+ ; GFX1150-NEXT: [[EXT_SRC:%[0-9]+]]:sreg_32 = EXT_SRC16_S32_PSEUDO [[S_FMAC_F16_]], implicit $exec
+ ; GFX1150-NEXT: $sgpr0 = COPY [[EXT_SRC]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0(s32)
%2:sgpr(s32) = COPY $sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
index 3888ce87b46fd..aacc17d925a0d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
@@ -92,9 +92,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-TRUE16-NEXT: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
; GFX11-TRUE16-NEXT: [[V_CVT_F16_F32_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[V_CVT_F32_I32_e32_]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CVT_F16_F32_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CVT_F16_F32_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: sitofp_s32_to_s16_vv
; GFX11-FAKE16: liveins: $vgpr0
@@ -141,9 +140,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX11-TRUE16-NEXT: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
; GFX11-TRUE16-NEXT: [[V_CVT_F16_F32_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[V_CVT_F32_I32_e32_]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CVT_F16_F32_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CVT_F16_F32_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: sitofp_s32_to_s16_vs
; GFX11-FAKE16: liveins: $sgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
index 35d622dc57d18..5dc798452a471 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
@@ -106,9 +106,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-TRUE16-NEXT: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
; GFX11-TRUE16-NEXT: [[V_CVT_F16_F32_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[V_CVT_F32_U32_e32_]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CVT_F16_F32_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CVT_F16_F32_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: uitofp_s32_to_s16_vv
; GFX11-FAKE16: liveins: $vgpr0
@@ -155,9 +154,8 @@ body: |
; GFX11-TRUE16-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX11-TRUE16-NEXT: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
; GFX11-TRUE16-NEXT: [[V_CVT_F16_F32_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[V_CVT_F32_U32_e32_]], 0, 0, 0, implicit $mode, implicit $exec
- ; GFX11-TRUE16-NEXT: [[DEF:%[0-9]+]]:vgpr_16 = IMPLICIT_DEF
- ; GFX11-TRUE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_CVT_F16_F32_t16_e64_]], %subreg.lo16, [[DEF]], %subreg.hi16
- ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[REG_SEQUENCE]]
+ ; GFX11-TRUE16-NEXT: [[EXT_SRC:%[0-9]+]]:vgpr_32 = EXT_SRC16_V32_PSEUDO [[V_CVT_F16_F32_t16_e64_]], implicit $exec
+ ; GFX11-TRUE16-NEXT: $vgpr0 = COPY [[EXT_SRC]]
;
; GFX11-FAKE16-LABEL: name: uitofp_s32_to_s16_vs
; GFX11-FAKE16: liveins: $sgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index d917cb0d623bc..9b4a38c1431ac 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -33926,19 +33926,19 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v14
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v13
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v12
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v11
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v10
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v9
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v8
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v6
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v5
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v4
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v3
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v2
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v1
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_4
@@ -33946,30 +33946,30 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s43, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s44, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s12, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s9, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s14, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s13, 8
; GFX11-TRUE16-NEXT: s_or_b32 s43, s43, s44
; GFX11-TRUE16-NEXT: s_or_b32 s44, s45, s46
; GFX11-TRUE16-NEXT: v_perm_b32 v6, s18, s19, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s43, s43, s44
; GFX11-TRUE16-NEXT: s_and_b32 s44, s11, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s45, s7, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s45, s8, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s2, s3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s44, s44, s45
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s40, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s15, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s10, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s16, s17, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, s44
; GFX11-TRUE16-NEXT: s_or_b32 s44, s45, s46
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s8, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s7, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s4, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v0, s0, s1, v4
; GFX11-TRUE16-NEXT: s_or_b32 s45, s45, s46
; GFX11-TRUE16-NEXT: v_perm_b32 v7, s22, s23, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s26, s27, v4
-; GFX11-TRUE16-NEXT: s_and_b32 s47, s14, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s47, s12, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s5, 8
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, s45
; GFX11-TRUE16-NEXT: s_or_b32 s45, s47, s46
@@ -33978,8 +33978,8 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s44, s44, s45
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v5.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v5, s41, s15, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s13, s6, v4
+; GFX11-TRUE16-NEXT: v_perm_b32 v5, s41, s40, v4
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s9, s6, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s43
@@ -33988,24 +33988,24 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB99_3
; GFX11-TRUE16-NEXT: .LBB99_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_add_i32 s13, s13, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s9, s9, 3
; GFX11-TRUE16-NEXT: s_add_i32 s41, s41, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s8, s8, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s13, s6, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s8, s4, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v4, s14, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, 3
; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s9, s6, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s7, s4, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v4, s12, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s41, s15, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s41, s40, v0
; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s12, s9, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s14, s13, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s28, s29, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, 3
; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, 3
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
@@ -34013,8 +34013,8 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s20, s21, v0
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s40, s10, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v8, s11, s7, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s15, s10, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v8, s11, s8, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v11, s26, s27, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v1
@@ -39385,19 +39385,19 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v14
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v13
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v12
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v11
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v10
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v9
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v8
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v6
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v5
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v4
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v3
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v2
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v1
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_4
@@ -39405,30 +39405,30 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s43, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s44, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s12, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s9, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s14, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s13, 8
; GFX11-TRUE16-NEXT: s_or_b32 s43, s43, s44
; GFX11-TRUE16-NEXT: s_or_b32 s44, s45, s46
; GFX11-TRUE16-NEXT: v_perm_b32 v6, s18, s19, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s43, s43, s44
; GFX11-TRUE16-NEXT: s_and_b32 s44, s11, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s45, s7, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s45, s8, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s2, s3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s44, s44, s45
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s40, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s15, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s10, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s16, s17, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, s44
; GFX11-TRUE16-NEXT: s_or_b32 s44, s45, s46
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s8, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s7, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s4, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v0, s0, s1, v4
; GFX11-TRUE16-NEXT: s_or_b32 s45, s45, s46
; GFX11-TRUE16-NEXT: v_perm_b32 v7, s22, s23, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s26, s27, v4
-; GFX11-TRUE16-NEXT: s_and_b32 s47, s14, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s47, s12, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s5, 8
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, s45
; GFX11-TRUE16-NEXT: s_or_b32 s45, s47, s46
@@ -39437,8 +39437,8 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s44, s44, s45
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v5.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v5, s41, s15, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s13, s6, v4
+; GFX11-TRUE16-NEXT: v_perm_b32 v5, s41, s40, v4
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s9, s6, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s43
@@ -39447,24 +39447,24 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB107_3
; GFX11-TRUE16-NEXT: .LBB107_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_add_i32 s13, s13, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s9, s9, 3
; GFX11-TRUE16-NEXT: s_add_i32 s41, s41, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s8, s8, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s13, s6, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s8, s4, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v4, s14, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, 3
; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s9, s6, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s7, s4, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v4, s12, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s41, s15, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s41, s40, v0
; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s12, s9, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s14, s13, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s28, s29, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, 3
; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, 3
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
@@ -39472,8 +39472,8 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s20, s21, v0
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s40, s10, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v8, s11, s7, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s15, s10, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v8, s11, s8, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v11, s26, s27, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v1
@@ -43783,19 +43783,19 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v14
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v13
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v12
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v11
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v10
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v9
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v8
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v6
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v5
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v4
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v3
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v2
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v1
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s42, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_4
@@ -43803,30 +43803,30 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0xc0c0004
; GFX11-TRUE16-NEXT: s_and_b32 s43, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s44, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s12, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s9, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s14, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s13, 8
; GFX11-TRUE16-NEXT: s_or_b32 s43, s43, s44
; GFX11-TRUE16-NEXT: s_or_b32 s44, s45, s46
; GFX11-TRUE16-NEXT: v_perm_b32 v6, s18, s19, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s43, s43, s44
; GFX11-TRUE16-NEXT: s_and_b32 s44, s11, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s45, s7, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s45, s8, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s2, s3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s44, s44, s45
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s40, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s15, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s10, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s16, s17, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, s44
; GFX11-TRUE16-NEXT: s_or_b32 s44, s45, s46
-; GFX11-TRUE16-NEXT: s_and_b32 s45, s8, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s45, s7, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s4, 8
; GFX11-TRUE16-NEXT: v_perm_b32 v0, s0, s1, v4
; GFX11-TRUE16-NEXT: s_or_b32 s45, s45, s46
; GFX11-TRUE16-NEXT: v_perm_b32 v7, s22, s23, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s26, s27, v4
-; GFX11-TRUE16-NEXT: s_and_b32 s47, s14, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s47, s12, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s46, s5, 8
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, s45
; GFX11-TRUE16-NEXT: s_or_b32 s45, s47, s46
@@ -43835,8 +43835,8 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s44, s44, s45
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v5.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v5, s41, s15, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s13, s6, v4
+; GFX11-TRUE16-NEXT: v_perm_b32 v5, s41, s40, v4
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s9, s6, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s43
@@ -43845,24 +43845,24 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB111_3
; GFX11-TRUE16-NEXT: .LBB111_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_add_i32 s13, s13, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s9, s9, 3
; GFX11-TRUE16-NEXT: s_add_i32 s41, s41, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s8, s8, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s13, s6, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s8, s4, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v4, s14, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, 3
; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s9, s6, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s7, s4, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v4, s12, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s41, s15, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s41, s40, v0
; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s12, s9, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s14, s13, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s28, s29, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, 3
; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, 3
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
@@ -43870,8 +43870,8 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s20, s21, v0
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s40, s10, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v8, s11, s7, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s15, s10, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v8, s11, s8, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v11, s26, s27, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index 9ae6700ac1825..12a36cc68d06d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -19199,31 +19199,31 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v21
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v20
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v19
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v18
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v18
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v17
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v16
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v16
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v15
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s43, v14
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v14
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v13
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v12
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v11
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s46, v10
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v9
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v8
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s44, v7
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s47, v6
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v5
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v4
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v11
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v9
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s43, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s46, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v5
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s44, v4
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s56, v3
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s57, v2
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v1
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s47, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s58, 0
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_and_b32 s59, s43, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s59, s14, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s60, s8, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s61, s4, 8
; GFX11-TRUE16-NEXT: s_or_b32 s59, s59, s60
@@ -19236,15 +19236,15 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v5.l
; GFX11-TRUE16-NEXT: v_perm_b32 v3, s24, s25, v8
-; GFX11-TRUE16-NEXT: v_perm_b32 v5, s45, s14, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v5, s47, s45, v8
; GFX11-TRUE16-NEXT: s_and_b32 s60, s6, 0xff
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v6.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v6, s42, s13, v8
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s40, s10, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v6, s44, s40, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s41, s11, v8
; GFX11-TRUE16-NEXT: s_or_b32 s60, s60, s61
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s16, s17, v8
-; GFX11-TRUE16-NEXT: v_perm_b32 v9, s12, s9, v8
-; GFX11-TRUE16-NEXT: s_and_b32 s62, s11, 0xff
+; GFX11-TRUE16-NEXT: v_perm_b32 v9, s13, s9, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s62, s10, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s61, s7, 8
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
; GFX11-TRUE16-NEXT: v_perm_b32 v4, s28, s29, v8
@@ -19253,57 +19253,57 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s57, s56, v8
; GFX11-TRUE16-NEXT: s_or_b32 s60, s62, s61
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v6.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v6, s47, s44, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v6, s46, s43, v8
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v7.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s46, s41, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s42, s15, v8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s59, s59, s60
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v9.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v9, s15, s5, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v9, s12, s5, v8
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v10.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, s59
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s46, s46, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s15, s5, v0
; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s10, s10, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s12, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s13, s13, 3
; GFX11-TRUE16-NEXT: v_perm_b32 v2, s6, s4, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v4, s11, s7, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s47, s47, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v4, s10, s7, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s46, s46, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s46, s41, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s42, s15, v0
; GFX11-TRUE16-NEXT: s_add_i32 s57, s57, 3
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s12, s9, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s47, s44, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s13, s9, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s46, s43, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v4, s57, s56, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s45, s45, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s47, s47, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s28, s29, v0
; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s43, s8, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s14, s8, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s45, s14, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s47, s45, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s20, s21, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s41, s41, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s44, s44, 3
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s40, s10, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v12, s42, s13, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s41, s11, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v12, s44, s40, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v15, s26, s27, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v1
@@ -25541,31 +25541,31 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v21
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v20
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v19
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v18
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v18
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v17
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v16
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v16
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v15
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s43, v14
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v14
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v13
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s12, v12
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v11
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s46, v10
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v9
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v8
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s44, v7
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s47, v6
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v5
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v4
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s15, v11
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v9
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s41, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s43, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s46, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s40, v5
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s44, v4
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s56, v3
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s57, v2
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v1
-; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s47, v0
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s58, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s58, 0
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_and_b32 s59, s43, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s59, s14, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s60, s8, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s61, s4, 8
; GFX11-TRUE16-NEXT: s_or_b32 s59, s59, s60
@@ -25578,15 +25578,15 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v5.l
; GFX11-TRUE16-NEXT: v_perm_b32 v3, s24, s25, v8
-; GFX11-TRUE16-NEXT: v_perm_b32 v5, s45, s14, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v5, s47, s45, v8
; GFX11-TRUE16-NEXT: s_and_b32 s60, s6, 0xff
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v6.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v6, s42, s13, v8
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s40, s10, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v6, s44, s40, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s41, s11, v8
; GFX11-TRUE16-NEXT: s_or_b32 s60, s60, s61
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s16, s17, v8
-; GFX11-TRUE16-NEXT: v_perm_b32 v9, s12, s9, v8
-; GFX11-TRUE16-NEXT: s_and_b32 s62, s11, 0xff
+; GFX11-TRUE16-NEXT: v_perm_b32 v9, s13, s9, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s62, s10, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s61, s7, 8
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
; GFX11-TRUE16-NEXT: v_perm_b32 v4, s28, s29, v8
@@ -25595,57 +25595,57 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_perm_b32 v5, s57, s56, v8
; GFX11-TRUE16-NEXT: s_or_b32 s60, s62, s61
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v6.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v6, s47, s44, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v6, s46, s43, v8
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v7.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, s46, s41, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v7, s42, s15, v8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s59, s59, s60
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v9.l
-; GFX11-TRUE16-NEXT: v_perm_b32 v9, s15, s5, v8
+; GFX11-TRUE16-NEXT: v_perm_b32 v9, s12, s5, v8
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v10.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, s59
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s46, s46, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s15, s5, v0
; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s10, s10, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s12, s5, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s13, s13, 3
; GFX11-TRUE16-NEXT: v_perm_b32 v2, s6, s4, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v4, s11, s7, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s47, s47, 3
+; GFX11-TRUE16-NEXT: v_perm_b32 v4, s10, s7, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s46, s46, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s46, s41, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s42, s15, v0
; GFX11-TRUE16-NEXT: s_add_i32 s57, s57, 3
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v1
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, s12, s9, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s47, s44, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v1, s13, s9, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s46, s43, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v4, s57, s56, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s45, s45, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s47, s47, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s28, s29, v0
; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s43, s8, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s14, s8, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v4
-; GFX11-TRUE16-NEXT: v_perm_b32 v2, s45, s14, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v2, s47, s45, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v1
; GFX11-TRUE16-NEXT: v_perm_b32 v1, s20, s21, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, 3
-; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s41, s41, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s44, s44, 3
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
-; GFX11-TRUE16-NEXT: v_perm_b32 v3, s40, s10, v0
-; GFX11-TRUE16-NEXT: v_perm_b32 v12, s42, s13, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v3, s41, s11, v0
+; GFX11-TRUE16-NEXT: v_perm_b32 v12, s44, s40, v0
; GFX11-TRUE16-NEXT: v_perm_b32 v15, s26, s27, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v1
diff --git a/llvm/test/CodeGen/AMDGPU/function-args.ll b/llvm/test/CodeGen/AMDGPU/function-args.ll
index 3928a44595761..efc92a8691312 100644
--- a/llvm/test/CodeGen/AMDGPU/function-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/function-args.ll
@@ -1566,81 +1566,43 @@ define void @void_func_v32i8(<32 x i8> %arg0) #0 {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-TRUE16-LABEL: void_func_v32i8:
-; GFX11-TRUE16: ; %bb.0:
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: scratch_load_u8 v31, off, s32
-; GFX11-TRUE16-NEXT: v_perm_b32 v12, v12, v13, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v13, v14, v15, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v8, v8, v9, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v9, v10, v11, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v10, v4, v5, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v11, v6, v7, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v0, v0, v1, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v14, v2, v3, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v1, v24, v25, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v4, v26, v27, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v5, v20, v21, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v7, v22, v23, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v15, v16, v17, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v16, v18, v19, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_perm_b32 v17, v28, v29, 0xc0c0004
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v13, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v4, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v16, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v9, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v11, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v14, 16, v0
-; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_perm_b32 v12, v30, v31, 0xc0c0004
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v12, 16, v17
-; GFX11-TRUE16-NEXT: buffer_store_b128 v[4:7], off, s[0:3], 0
-; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 0
-; GFX11-TRUE16-NEXT: buffer_store_b128 v[0:3], off, s[0:3], 0
-; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-FAKE16-LABEL: void_func_v32i8:
-; GFX11-FAKE16: ; %bb.0:
-; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: scratch_load_u8 v31, off, s32
-; GFX11-FAKE16-NEXT: v_perm_b32 v12, v12, v13, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v13, v14, v15, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v8, v8, v9, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v9, v10, v11, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v10, v4, v5, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v11, v6, v7, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v14, v2, v3, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v1, v28, v29, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v24, v25, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v26, v27, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v7, v20, v21, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v15, v22, v23, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v16, v16, v17, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v19, 0xc0c0004
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v13, 16, v12
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v5, 16, v4
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v15, 16, v7
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v9, 16, v8
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v17, 16, v16
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v14, 16, v0
-; GFX11-FAKE16-NEXT: s_mov_b64 s[0:1], 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-FAKE16-NEXT: s_mov_b32 s2, -1
-; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_perm_b32 v12, v30, v31, 0xc0c0004
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v12, 16, v1
-; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v11, 16, v10
-; GFX11-FAKE16-NEXT: buffer_store_b128 v[4:7], off, s[0:3], 0
-; GFX11-FAKE16-NEXT: s_mov_b64 s[0:1], 0
-; GFX11-FAKE16-NEXT: buffer_store_b128 v[0:3], off, s[0:3], 0
-; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-LABEL: void_func_v32i8:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_u8 v31, off, s32
+; GFX11-NEXT: v_perm_b32 v12, v12, v13, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v13, v14, v15, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v8, v8, v9, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v9, v10, v11, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v10, v4, v5, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v11, v6, v7, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v0, v0, v1, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v14, v2, v3, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v1, v28, v29, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v4, v24, v25, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v5, v26, v27, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v7, v20, v21, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v15, v22, v23, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v16, v16, v17, 0xc0c0004
+; GFX11-NEXT: v_perm_b32 v17, v18, v19, 0xc0c0004
+; GFX11-NEXT: v_lshl_or_b32 v3, v13, 16, v12
+; GFX11-NEXT: v_lshl_or_b32 v6, v5, 16, v4
+; GFX11-NEXT: v_lshl_or_b32 v5, v15, 16, v7
+; GFX11-NEXT: v_lshl_or_b32 v2, v9, 16, v8
+; GFX11-NEXT: v_lshl_or_b32 v4, v17, 16, v16
+; GFX11-NEXT: v_lshl_or_b32 v0, v14, 16, v0
+; GFX11-NEXT: s_mov_b64 s[0:1], 16
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_perm_b32 v12, v30, v31, 0xc0c0004
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshl_or_b32 v7, v12, 16, v1
+; GFX11-NEXT: v_lshl_or_b32 v1, v11, 16, v10
+; GFX11-NEXT: buffer_store_b128 v[4:7], off, s[0:3], 0
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: buffer_store_b128 v[0:3], off, s[0:3], 0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
store <32 x i8> %arg0, ptr addrspace(1) null
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.a16.dim.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.a16.dim.ll
index d45705edce2c8..3f77aa7ddb97b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.a16.dim.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.a16.dim.ll
@@ -98,11 +98,10 @@ define amdgpu_ps <4 x float> @gather4_cube(<8 x i32> inreg %rsrc, <4 x i32> inre
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_gather4 v[0:3], v[2:3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_CUBE a16
+; GFX11-TRUE16-NEXT: image_gather4 v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_CUBE a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -121,8 +120,9 @@ define amdgpu_ps <4 x float> @gather4_cube(<8 x i32> inreg %rsrc, <4 x i32> inre
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_gather4 v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_CUBE a16
+; GFX12-TRUE16-NEXT: image_gather4 v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_CUBE a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -166,11 +166,10 @@ define amdgpu_ps <4 x float> @gather4_2darray(<8 x i32> inreg %rsrc, <4 x i32> i
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_gather4 v[0:3], v[2:3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX11-TRUE16-NEXT: image_gather4 v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -189,8 +188,9 @@ define amdgpu_ps <4 x float> @gather4_2darray(<8 x i32> inreg %rsrc, <4 x i32> i
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_gather4 v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX12-TRUE16-NEXT: image_gather4 v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -300,11 +300,10 @@ define amdgpu_ps <4 x float> @gather4_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inr
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_gather4_cl v[0:3], v[2:3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_gather4_cl v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -323,8 +322,9 @@ define amdgpu_ps <4 x float> @gather4_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inr
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_gather4_cl v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_gather4_cl v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -371,8 +371,9 @@ define amdgpu_ps <4 x float> @gather4_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> i
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_gather4_c_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_gather4_c_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -391,8 +392,9 @@ define amdgpu_ps <4 x float> @gather4_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> i
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_gather4_c_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_gather4_c_cl v[0:3], [v0, v1, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -570,12 +572,10 @@ define amdgpu_ps <4 x float> @gather4_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> i
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v3.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_gather4_b_cl v[0:3], v[2:4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_gather4_b_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -594,8 +594,9 @@ define amdgpu_ps <4 x float> @gather4_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> i
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_gather4_b_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_gather4_b_cl v[0:3], [v0, v1, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -643,8 +644,9 @@ define amdgpu_ps <4 x float> @gather4_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32>
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_gather4_c_b_cl v[0:3], [v0, v1, v2, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_gather4_c_b_cl v[0:3], v[0:3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -663,8 +665,9 @@ define amdgpu_ps <4 x float> @gather4_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32>
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_gather4_c_b_cl v[0:3], [v0, v1, v2, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_gather4_c_b_cl v[0:3], [v0, v1, v2, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -700,10 +703,9 @@ define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inre
;
; GFX11-TRUE16-LABEL: gather4_l_2d:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
-; GFX11-TRUE16-NEXT: image_gather4_l v[0:3], v[2:3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX11-TRUE16-NEXT: image_gather4_l v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -717,7 +719,8 @@ define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inre
; GFX12-TRUE16-LABEL: gather4_l_2d:
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX12-TRUE16-NEXT: image_gather4_l v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX12-TRUE16-NEXT: image_gather4_l v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -753,7 +756,8 @@ define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX11-TRUE16-LABEL: gather4_c_l_2d:
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
-; GFX11-TRUE16-NEXT: image_gather4_c_l v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
+; GFX11-TRUE16-NEXT: image_gather4_c_l v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -767,7 +771,8 @@ define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX12-TRUE16-LABEL: gather4_c_l_2d:
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
-; GFX12-TRUE16-NEXT: image_gather4_c_l v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
+; GFX12-TRUE16-NEXT: image_gather4_c_l v[0:3], [v0, v1, v2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
index 3d64ef16a3c8c..0e138112f761d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
@@ -285,10 +285,9 @@ main_body:
define amdgpu_ps <4 x float> @load_2dmsaa_a16(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %fragid) {
; GFX11-TRUE16-LABEL: load_2dmsaa_a16:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l ; encoding: [0x02,0x39,0x06,0x7e]
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l ; encoding: [0x00,0x39,0x04,0x7e]
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l ; encoding: [0x01,0x39,0x04,0x7f]
-; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16 ; encoding: [0x98,0x01,0x61,0xf0,0x02,0x00,0x00,0x00]
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l ; encoding: [0x01,0x39,0x00,0x7f]
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l ; encoding: [0x02,0x39,0x02,0x7e]
+; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[0:1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16 ; encoding: [0x98,0x01,0x61,0xf0,0x00,0x00,0x00,0x00]
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -302,7 +301,8 @@ define amdgpu_ps <4 x float> @load_2dmsaa_a16(<8 x i32> inreg %rsrc, i16 %s, i16
; GFX12-TRUE16-LABEL: load_2dmsaa_a16:
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l ; encoding: [0x01,0x39,0x00,0x7f]
-; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16 ; encoding: [0x46,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00]
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l ; encoding: [0x02,0x39,0x02,0x7e]
+; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v1], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16 ; encoding: [0x46,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00]
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0 ; encoding: [0x00,0x00,0xc2,0xbf]
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.a16.dim.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.a16.dim.ll
index 437f438efc554..035fe3964f35f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.a16.dim.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.a16.dim.ll
@@ -139,11 +139,10 @@ define amdgpu_ps <4 x float> @sample_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample v[0:3], v[2:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
+; GFX11-TRUE16-NEXT: image_sample v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -162,8 +161,9 @@ define amdgpu_ps <4 x float> @sample_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
+; GFX12-TRUE16-NEXT: image_sample v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -207,11 +207,10 @@ define amdgpu_ps <4 x float> @sample_cube(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample v[0:3], v[2:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_CUBE a16
+; GFX11-TRUE16-NEXT: image_sample v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_CUBE a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -230,8 +229,9 @@ define amdgpu_ps <4 x float> @sample_cube(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_CUBE a16
+; GFX12-TRUE16-NEXT: image_sample v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_CUBE a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -341,11 +341,10 @@ define amdgpu_ps <4 x float> @sample_2darray(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample v[0:3], v[2:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX11-TRUE16-NEXT: image_sample v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -364,8 +363,9 @@ define amdgpu_ps <4 x float> @sample_2darray(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX12-TRUE16-NEXT: image_sample v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -582,11 +582,10 @@ define amdgpu_ps <4 x float> @sample_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inre
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample_cl v[0:3], v[2:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_sample_cl v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -605,8 +604,9 @@ define amdgpu_ps <4 x float> @sample_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inre
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample_cl v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_sample_cl v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -719,8 +719,9 @@ define amdgpu_ps <4 x float> @sample_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample_c_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_sample_c_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -739,8 +740,9 @@ define amdgpu_ps <4 x float> @sample_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample_c_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_sample_c_cl v[0:3], [v0, v1, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1066,12 +1068,10 @@ define amdgpu_ps <4 x float> @sample_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v3.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample_b_cl v[0:3], v[2:4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_sample_b_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1090,8 +1090,9 @@ define amdgpu_ps <4 x float> @sample_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample_b_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_sample_b_cl v[0:3], [v0, v1, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1205,8 +1206,9 @@ define amdgpu_ps <4 x float> @sample_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32>
; GFX11-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX11-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.l
; GFX11-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX11-TRUE16-NEXT: image_sample_c_b_cl v[0:3], [v0, v1, v2, v4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: image_sample_c_b_cl v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1225,8 +1227,9 @@ define amdgpu_ps <4 x float> @sample_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32>
; GFX12-TRUE16-NEXT: s_mov_b32 s12, exec_lo
; GFX12-TRUE16-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.l
; GFX12-TRUE16-NEXT: s_and_b32 exec_lo, exec_lo, s12
-; GFX12-TRUE16-NEXT: image_sample_c_b_cl v[0:3], [v0, v1, v2, v4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: image_sample_c_b_cl v[0:3], [v0, v1, v2, v3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1364,12 +1367,13 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg
;
; GFX11-TRUE16-LABEL: sample_d_3d:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v6.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX11-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v5, v[8:9]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v7.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v8.l
+; GFX11-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v4, v2, v3, v5, v[0:1]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1384,13 +1388,12 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg
;
; GFX12-TRUE16-LABEL: sample_d_3d:
; GFX12-TRUE16: ; %bb.0: ; %main_body
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.l, v6.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.h, v7.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v8.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX12-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v[7:9]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX12-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v1, v3, v[5:7]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1570,14 +1573,14 @@ define amdgpu_ps <4 x float> @sample_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> in
;
; GFX11-TRUE16-LABEL: sample_d_cl_2d:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v5.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v1.l
-; GFX11-TRUE16-NEXT: image_sample_d_cl_g16 v[0:3], v[4:7], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v5.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v6.l
+; GFX11-TRUE16-NEXT: image_sample_d_cl_g16 v[0:3], v[7:10], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1595,7 +1598,8 @@ define amdgpu_ps <4 x float> @sample_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> in
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, v5.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX12-TRUE16-NEXT: image_sample_d_cl_g16 v[0:3], [v0, v2, v4, v6], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.l
+; GFX12-TRUE16-NEXT: image_sample_d_cl_g16 v[0:3], [v0, v2, v4, v1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1687,7 +1691,8 @@ define amdgpu_ps <4 x float> @sample_c_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32>
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
-; GFX11-TRUE16-NEXT: image_sample_c_d_cl_g16 v[0:3], [v0, v1, v3, v5, v7], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.l
+; GFX11-TRUE16-NEXT: image_sample_c_d_cl_g16 v[0:3], [v0, v1, v3, v5, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1702,12 +1707,13 @@ define amdgpu_ps <4 x float> @sample_c_d_cl_2d(<8 x i32> inreg %rsrc, <4 x i32>
;
; GFX12-TRUE16-LABEL: sample_c_d_cl_2d:
; GFX12-TRUE16: ; %bb.0: ; %main_body
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.l, v7.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, v6.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
-; GFX12-TRUE16-NEXT: image_sample_c_d_cl_g16 v[0:3], [v0, v1, v3, v[7:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v6.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.l
+; GFX12-TRUE16-NEXT: image_sample_c_d_cl_g16 v[0:3], [v0, v4, v3, v[1:2]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1790,10 +1796,9 @@ define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg
;
; GFX11-TRUE16-LABEL: sample_l_2d:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
-; GFX11-TRUE16-NEXT: image_sample_l v[0:3], v[2:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX11-TRUE16-NEXT: image_sample_l v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1807,7 +1812,8 @@ define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX12-TRUE16-LABEL: sample_l_2d:
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX12-TRUE16-NEXT: image_sample_l v[0:3], [v0, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX12-TRUE16-NEXT: image_sample_l v[0:3], [v0, v1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1891,7 +1897,8 @@ define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inr
; GFX11-TRUE16-LABEL: sample_c_l_2d:
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
-; GFX11-TRUE16-NEXT: image_sample_c_l v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
+; GFX11-TRUE16-NEXT: image_sample_c_l v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -1905,7 +1912,8 @@ define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inr
; GFX12-TRUE16-LABEL: sample_c_l_2d:
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
-; GFX12-TRUE16-NEXT: image_sample_c_l v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
+; GFX12-TRUE16-NEXT: image_sample_c_l v[0:3], [v0, v1, v2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -2102,12 +2110,13 @@ define amdgpu_ps float @sample_c_d_o_2darray_V1(<8 x i32> inreg %rsrc, <4 x i32>
;
; GFX11-TRUE16-LABEL: sample_c_d_o_2darray_V1:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v6.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v5.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
-; GFX11-TRUE16-NEXT: image_sample_c_d_o_g16 v0, [v0, v1, v2, v4, v[8:9]], s[0:7], s[8:11] dmask:0x4 dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v7.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.l
+; GFX11-TRUE16-NEXT: image_sample_c_d_o_g16 v0, [v0, v1, v5, v4, v[2:3]], s[0:7], s[8:11] dmask:0x4 dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -2122,13 +2131,13 @@ define amdgpu_ps float @sample_c_d_o_2darray_V1(<8 x i32> inreg %rsrc, <4 x i32>
;
; GFX12-TRUE16-LABEL: sample_c_d_o_2darray_V1:
; GFX12-TRUE16: ; %bb.0: ; %main_body
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.l, v6.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, v5.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v10.l, v6.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v10.h, v7.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.h, v5.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v11.l, v8.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
-; GFX12-TRUE16-NEXT: image_sample_c_d_o_g16 v0, [v0, v1, v2, v[7:9]], s[0:7], s[8:11] dmask:0x4 dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX12-TRUE16-NEXT: image_sample_c_d_o_g16 v0, [v0, v1, v2, v[9:11]], s[0:7], s[8:11] dmask:0x4 dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
@@ -2173,12 +2182,13 @@ define amdgpu_ps <2 x float> @sample_c_d_o_2darray_V2(<8 x i32> inreg %rsrc, <4
;
; GFX11-TRUE16-LABEL: sample_c_d_o_2darray_V2:
; GFX11-TRUE16: ; %bb.0: ; %main_body
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v6.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v5.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
-; GFX11-TRUE16-NEXT: image_sample_c_d_o_g16 v[0:1], [v0, v1, v2, v4, v[8:9]], s[0:7], s[8:11] dmask:0x6 dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v7.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.l
+; GFX11-TRUE16-NEXT: image_sample_c_d_o_g16 v[0:1], [v0, v1, v5, v4, v[2:3]], s[0:7], s[8:11] dmask:0x6 dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -2193,13 +2203,13 @@ define amdgpu_ps <2 x float> @sample_c_d_o_2darray_V2(<8 x i32> inreg %rsrc, <4
;
; GFX12-TRUE16-LABEL: sample_c_d_o_2darray_V2:
; GFX12-TRUE16: ; %bb.0: ; %main_body
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.l, v6.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, v5.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v10.l, v6.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v10.h, v7.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v9.h, v5.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v11.l, v8.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
-; GFX12-TRUE16-NEXT: image_sample_c_d_o_g16 v[0:1], [v0, v1, v2, v[7:9]], s[0:7], s[8:11] dmask:0x6 dim:SQ_RSRC_IMG_2D_ARRAY a16
+; GFX12-TRUE16-NEXT: image_sample_c_d_o_g16 v[0:1], [v0, v1, v2, v[9:11]], s[0:7], s[8:11] dmask:0x6 dim:SQ_RSRC_IMG_2D_ARRAY a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.encode.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.encode.ll
index e7b048dda1c1f..a00b90e80bb1d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.encode.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.encode.ll
@@ -88,7 +88,8 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l ; encoding: [0x04,0x39,0x06,0x7f]
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l ; encoding: [0x01,0x39,0x00,0x7f]
-; GFX11-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v5, v[6:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D ; encoding: [0x09,0x0f,0xe4,0xf0,0x00,0x00,0x00,0x08,0x02,0x03,0x05,0x06]
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.l ; encoding: [0x05,0x39,0x02,0x7e]
+; GFX11-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v1, v[6:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D ; encoding: [0x09,0x0f,0xe4,0xf0,0x00,0x00,0x00,0x08,0x02,0x03,0x01,0x06]
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -104,7 +105,8 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l ; encoding: [0x04,0x39,0x06,0x7f]
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l ; encoding: [0x01,0x39,0x00,0x7f]
-; GFX12-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v[5:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D ; encoding: [0x02,0x40,0xce,0xe7,0x00,0x00,0x00,0x04,0x00,0x02,0x03,0x05]
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l ; encoding: [0x02,0x39,0x02,0x7e]
+; GFX12-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v1, v3, v[5:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D ; encoding: [0x02,0x40,0xce,0xe7,0x00,0x00,0x00,0x04,0x00,0x01,0x03,0x05]
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0 ; encoding: [0x00,0x00,0xc2,0xbf]
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.ll
index 45cebaf449d54..812d0cbe48289 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.ll
@@ -88,7 +88,8 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX11-TRUE16: ; %bb.0: ; %main_body
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX11-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v5, v[6:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.l
+; GFX11-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v1, v[6:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
@@ -104,7 +105,8 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX12-TRUE16: ; %bb.0: ; %main_body
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, v4.l
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
-; GFX12-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v2, v3, v[5:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX12-TRUE16-NEXT: image_sample_d_g16 v[0:3], [v0, v1, v3, v[5:8]], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: ; return to shader part epilog
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp8.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp8.ll
index e7d8683137dd5..4c702505f07dc 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp8.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp8.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10PLUS %s
; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10PLUS %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefix=GFX10PLUS %s
@@ -189,3 +190,5 @@ define amdgpu_ps void @dpp8_double(double %in, ptr addrspace(1) %out) {
declare i32 @llvm.amdgcn.mov.dpp8.i32(i32, i32) #0
attributes #0 = { nounwind readnone convergent }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10PLUS: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll
index f0031dd3e93c0..3120c847784cf 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll
@@ -85,11 +85,10 @@ define amdgpu_ps <3 x float> @sample_gather(<8 x i32> inreg %rsrc, <4 x i32> inr
define amdgpu_ps <3 x float> @sample_load(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, <8 x i32> inreg %rsrc2, i16 %s.16, i16 %t.16, i16 %fragid) {
; GFX11-TRUE16-LABEL: sample_load:
; GFX11-TRUE16: ; %bb.0:
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0
-; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[0:1], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
; GFX11-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
@@ -105,11 +104,10 @@ define amdgpu_ps <3 x float> @sample_load(<8 x i32> inreg %rsrc, <4 x i32> inreg
;
; GFX1150-TRUE16-LABEL: sample_load:
; GFX1150-TRUE16: ; %bb.0:
-; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, 0
-; GFX1150-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX1150-TRUE16-NEXT: image_msaa_load v[0:3], v[0:1], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX1150-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -128,8 +126,9 @@ define amdgpu_ps <3 x float> @sample_load(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX12-TRUE16-LABEL: sample_load:
; GFX12-TRUE16: ; %bb.0:
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v1], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
@@ -159,11 +158,10 @@ define amdgpu_ps <3 x float> @sample_load(<8 x i32> inreg %rsrc, <4 x i32> inreg
define amdgpu_ps <3 x float> @load_sample(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, <8 x i32> inreg %rsrc2, i16 %s.16, i16 %t.16, i16 %fragid) {
; GFX11-TRUE16-LABEL: load_sample:
; GFX11-TRUE16: ; %bb.0:
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0
-; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[0:1], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
; GFX11-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: ; return to shader part epilog
@@ -179,11 +177,10 @@ define amdgpu_ps <3 x float> @load_sample(<8 x i32> inreg %rsrc, <4 x i32> inreg
;
; GFX1150-TRUE16-LABEL: load_sample:
; GFX1150-TRUE16: ; %bb.0:
-; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
-; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
-; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, 0
-; GFX1150-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX1150-TRUE16-NEXT: image_msaa_load v[0:3], v[0:1], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX1150-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -202,8 +199,9 @@ define amdgpu_ps <3 x float> @load_sample(<8 x i32> inreg %rsrc, <4 x i32> inreg
; GFX12-TRUE16-LABEL: load_sample:
; GFX12-TRUE16: ; %bb.0:
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v1], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index 04350ee8d17e1..763d7ee4e255d 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -5962,21 +5962,19 @@ define amdgpu_kernel void @constant_sextload_v4i1_to_v4i64(ptr addrspace(1) %out
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: global_load_d16_u8 v0, v8, s[2:3]
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, 3, v0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 2, v0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 1, v0
+; GFX12-NEXT: v_readfirstlane_b32 s3, v0
; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_bfe_i32 v6, v1, 0, 1
-; GFX12-NEXT: v_bfe_i32 v4, v2, 0, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_bfe_i32 v2, v3, 0, 1
+; GFX12-NEXT: s_lshr_b32 s2, s3, 2
+; GFX12-NEXT: s_lshr_b32 s4, s3, 3
+; GFX12-NEXT: s_lshr_b32 s6, s3, 1
+; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
+; GFX12-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v7, s5
+; GFX12-NEXT: v_dual_mov_b32 v5, s3 :: v_dual_mov_b32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s7
; GFX12-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_ashrrev_i32_e32 v7, 31, v6
-; GFX12-NEXT: v_ashrrev_i32_e32 v5, 31, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX12-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX12-NEXT: v_mov_b32_e32 v2, s6
; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: global_store_b128 v8, v[4:7], s[0:1] offset:16
; GFX12-NEXT: global_store_b128 v8, v[0:3], s[0:1]
@@ -6369,8 +6367,7 @@ define amdgpu_kernel void @constant_sextload_v8i1_to_v8i64(ptr addrspace(1) %out
; GFX12-NEXT: global_load_d16_u8 v0, v16, s[2:3]
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: v_readfirstlane_b32 s3, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v9, s3
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 1
; GFX12-NEXT: s_lshr_b32 s2, s3, 6
; GFX12-NEXT: s_lshr_b32 s4, s3, 7
; GFX12-NEXT: s_lshr_b32 s6, s3, 4
@@ -6378,29 +6375,27 @@ define amdgpu_kernel void @constant_sextload_v8i1_to_v8i64(ptr addrspace(1) %out
; GFX12-NEXT: s_lshr_b32 s10, s3, 2
; GFX12-NEXT: s_lshr_b32 s12, s3, 3
; GFX12-NEXT: s_lshr_b32 s14, s3, 1
-; GFX12-NEXT: s_wait_alu depctr_sa_sdst(0)
; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
-; GFX12-NEXT: v_bfe_i32 v12, v9, 0, 1
; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000
; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000
; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
-; GFX12-NEXT: s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX12-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v7, s5
+; GFX12-NEXT: v_dual_mov_b32 v5, s3 :: v_dual_mov_b32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v9, s7
; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000
-; GFX12-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
-; GFX12-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
-; GFX12-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
-; GFX12-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
-; GFX12-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
-; GFX12-NEXT: v_ashrrev_i32_e32 v13, 31, v12
+; GFX12-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v11, s9
+; GFX12-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v13, s11
+; GFX12-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v15, s13
+; GFX12-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v3, s15
+; GFX12-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX12-NEXT: v_mov_b32_e32 v2, s14
; GFX12-NEXT: s_clause 0x3
-; GFX12-NEXT: global_store_b128 v16, v[0:3], s[0:1] offset:48
-; GFX12-NEXT: global_store_b128 v16, v[4:7], s[0:1] offset:32
-; GFX12-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:16
-; GFX12-NEXT: global_store_b128 v16, v[12:15], s[0:1]
+; GFX12-NEXT: global_store_b128 v16, v[4:7], s[0:1] offset:48
+; GFX12-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:32
+; GFX12-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:16
+; GFX12-NEXT: global_store_b128 v16, v[0:3], s[0:1]
; GFX12-NEXT: s_endpgm
;
; GFX1250-LABEL: constant_sextload_v8i1_to_v8i64:
@@ -7075,11 +7070,9 @@ define amdgpu_kernel void @constant_sextload_v16i1_to_v16i64(ptr addrspace(1) %o
; GFX12-NEXT: global_load_d16_b16 v0, v32, s[2:3]
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: v_readfirstlane_b32 s3, v0
-; GFX12-NEXT: s_lshr_b32 s4, s3, 15
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 1
; GFX12-NEXT: s_lshr_b32 s2, s3, 14
-; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_dual_mov_b32 v28, s3 :: v_dual_mov_b32 v3, s5
+; GFX12-NEXT: s_lshr_b32 s4, s3, 15
; GFX12-NEXT: s_lshr_b32 s6, s3, 12
; GFX12-NEXT: s_lshr_b32 s8, s3, 13
; GFX12-NEXT: s_lshr_b32 s10, s3, 10
@@ -7093,49 +7086,47 @@ define amdgpu_kernel void @constant_sextload_v16i1_to_v16i64(ptr addrspace(1) %o
; GFX12-NEXT: s_lshr_b32 s26, s3, 2
; GFX12-NEXT: s_lshr_b32 s28, s3, 3
; GFX12-NEXT: s_lshr_b32 s30, s3, 1
-; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000
-; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000
; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
+; GFX12-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v7, s5
+; GFX12-NEXT: v_dual_mov_b32 v5, s3 :: v_dual_mov_b32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v9, s7
; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000
; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000
+; GFX12-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v11, s9
+; GFX12-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v13, s11
; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000
; GFX12-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000
-; GFX12-NEXT: s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v5, s7
-; GFX12-NEXT: v_dual_mov_b32 v1, s3 :: v_dual_mov_b32 v2, s4
-; GFX12-NEXT: v_dual_mov_b32 v7, s9 :: v_dual_mov_b32 v4, s6
-; GFX12-NEXT: v_dual_mov_b32 v9, s11 :: v_dual_mov_b32 v6, s8
-; GFX12-NEXT: v_dual_mov_b32 v11, s13 :: v_dual_mov_b32 v8, s10
-; GFX12-NEXT: v_dual_mov_b32 v13, s15 :: v_dual_mov_b32 v10, s12
-; GFX12-NEXT: v_mov_b32_e32 v15, s17
-; GFX12-NEXT: v_bfe_i32 v28, v28, 0, 1
-; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x10000
-; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000
-; GFX12-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v17, s19
-; GFX12-NEXT: v_dual_mov_b32 v14, s16 :: v_dual_mov_b32 v19, s21
+; GFX12-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v15, s13
+; GFX12-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v17, s15
+; GFX12-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
; GFX12-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000
; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000
-; GFX12-NEXT: v_dual_mov_b32 v16, s18 :: v_dual_mov_b32 v21, s23
-; GFX12-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v23, s25
-; GFX12-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
-; GFX12-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v25, s27
-; GFX12-NEXT: v_dual_mov_b32 v22, s24 :: v_dual_mov_b32 v27, s29
-; GFX12-NEXT: v_dual_mov_b32 v24, s26 :: v_dual_mov_b32 v31, s31
-; GFX12-NEXT: v_mov_b32_e32 v26, s28
-; GFX12-NEXT: v_mov_b32_e32 v30, s30
-; GFX12-NEXT: s_clause 0x1
-; GFX12-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:112
-; GFX12-NEXT: global_store_b128 v32, v[4:7], s[0:1] offset:96
-; GFX12-NEXT: v_ashrrev_i32_e32 v29, 31, v28
-; GFX12-NEXT: s_clause 0x5
-; GFX12-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:80
-; GFX12-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:64
-; GFX12-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:48
-; GFX12-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX12-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:16
-; GFX12-NEXT: global_store_b128 v32, v[28:31], s[0:1]
+; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x10000
+; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000
+; GFX12-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v19, s17
+; GFX12-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v21, s19
+; GFX12-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v23, s21
+; GFX12-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v25, s23
+; GFX12-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX12-NEXT: v_dual_mov_b32 v24, s22 :: v_dual_mov_b32 v27, s25
+; GFX12-NEXT: v_dual_mov_b32 v26, s24 :: v_dual_mov_b32 v29, s27
+; GFX12-NEXT: v_dual_mov_b32 v28, s26 :: v_dual_mov_b32 v31, s29
+; GFX12-NEXT: v_dual_mov_b32 v30, s28 :: v_dual_mov_b32 v3, s31
+; GFX12-NEXT: v_mov_b32_e32 v2, s30
+; GFX12-NEXT: s_clause 0x7
+; GFX12-NEXT: global_store_b128 v32, v[4:7], s[0:1] offset:112
+; GFX12-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:96
+; GFX12-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
+; GFX12-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
+; GFX12-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:48
+; GFX12-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:32
+; GFX12-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:16
+; GFX12-NEXT: global_store_b128 v32, v[0:3], s[0:1]
; GFX12-NEXT: s_endpgm
;
; GFX1250-LABEL: constant_sextload_v16i1_to_v16i64:
More information about the llvm-commits
mailing list