[llvm] AMDGPU: Start using AV_MOV_B64_IMM_PSEUDO (PR #154500)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 21 20:17:50 PDT 2025
https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/154500
>From 9d9396c0554d6d755312628e521a357e145707a1 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 12:05:02 +0900
Subject: [PATCH 1/5] AMDGPU: Add pseudoinstruction for 64-bit agpr or vgpr
constants
64-bit version of 7425af4b7aaa31da10bd1bc7996d3bb212c79d88. We
still need to lower to 32-bit v_accagpr_write_b32s, so this has
a unique value restriction that requires both halves of the constant
to be 32-bit inline immediates. This only introduces the new
pseudo definitions, but doesn't try to use them yet.
---
llvm/lib/Target/AMDGPU/SIDefines.h | 4 +
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 48 +++++++
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 3 +
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 7 +
llvm/lib/Target/AMDGPU/SIInstructions.td | 20 +++
.../AMDGPU/amdgpu-prepare-agpr-alloc.mir | 112 +++++++++++++++
.../AMDGPU/av_movimm_pseudo_expansion.mir | 136 +++++++++++++++++-
llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir | 107 ++++++++++++++
.../CodeGen/AMDGPU/inflate-av-remat-imm.mir | 57 ++++++++
.../test/CodeGen/AMDGPU/peephole-fold-imm.mir | 39 +++++
llvm/test/CodeGen/AMDGPU/vgpr-remat.mir | 45 ++++++
.../AMDGPU/av_mov_b64_imm_pseudo.mir | 25 ++++
12 files changed, 601 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 7c019031ff249..268b153c6c924 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -243,6 +243,10 @@ enum OperandType : unsigned {
// Operand for SDWA instructions
OPERAND_SDWA_VOPC_DST,
+ // Operand for AV_MOV_B64_IMM_PSEUDO, which is a pair of 32-bit inline
+ // constants.
+ OPERAND_INLINE_C_AV64_PSEUDO,
+
OPERAND_REG_IMM_FIRST = OPERAND_REG_IMM_INT32,
OPERAND_REG_IMM_LAST = OPERAND_REG_IMM_V2FP32,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index cc4bee0f1f454..98ff9270391d2 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1349,6 +1349,7 @@ bool SIInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
case AMDGPU::V_MOV_B64_e32:
case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
+ case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
case AMDGPU::S_MOV_B64_IMM_PSEUDO:
case AMDGPU::V_MOV_B64_PSEUDO: {
const MachineOperand &Src0 = MI.getOperand(1);
@@ -2133,6 +2134,25 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
get(IsAGPR ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::V_MOV_B32_e32));
break;
}
+ case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
+ Register Dst = MI.getOperand(0).getReg();
+ if (SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst))) {
+ uint64_t Imm = static_cast<uint64_t>(MI.getOperand(1).getImm());
+
+ Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
+ Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstLo)
+ .addImm(SignExtend64<32>(Lo_32(Imm)))
+ .addReg(Dst, RegState::Implicit | RegState::Define);
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstHi)
+ .addImm(SignExtend64<32>(Hi_32(Imm)))
+ .addReg(Dst, RegState::Implicit | RegState::Define);
+ MI.eraseFromParent();
+ break;
+ }
+
+ [[fallthrough]];
+ }
case AMDGPU::V_MOV_B64_PSEUDO: {
Register Dst = MI.getOperand(0).getReg();
Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
@@ -3425,6 +3445,11 @@ bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
case AMDGPU::V_ACCVGPR_MOV_B32:
case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
return true;
+ case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
+ // TODO: We could fold this, but it's a strange case. The immediate value
+ // can't be directly folded into any real use. We would have to spread new
+ // immediate legality checks around and only accept subregister extracts for
+ // profitability.
default:
return false;
}
@@ -4471,6 +4496,8 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
case AMDGPU::OPERAND_KIMM16:
case AMDGPU::OPERAND_KIMM64:
return false;
+ case AMDGPU::OPERAND_INLINE_C_AV64_PSEUDO:
+ return isLegalAV64PseudoImm(Imm);
case AMDGPU::OPERAND_INPUT_MODS:
case MCOI::OPERAND_IMMEDIATE:
// Always embedded in the instruction for free.
@@ -4536,6 +4563,12 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
return ST.hasVOP3Literal();
}
+bool SIInstrInfo::isLegalAV64PseudoImm(uint64_t Imm) const {
+ // 2 32-bit inline constants packed into one.
+ return AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) &&
+ AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm());
+}
+
bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
// GFX90A does not have V_MUL_LEGACY_F32_e32.
if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
@@ -4896,6 +4929,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
case MCOI::OPERAND_IMMEDIATE:
case AMDGPU::OPERAND_KIMM32:
case AMDGPU::OPERAND_KIMM64:
+ case AMDGPU::OPERAND_INLINE_C_AV64_PSEUDO:
// Check if this operand is an immediate.
// FrameIndex operands will be replaced by immediates, so they are
// allowed.
@@ -5518,6 +5552,20 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
}
}
+#if 0
+ if (Opcode == AMDGPU::AV_MOV_B64_IMM_PSEUDO) {
+ const MachineOperand &SrcOp = MI.getOperand(1);
+ uint64_t Imm = static_cast<uint64_t>(SrcOp.getImm());
+
+ if (!AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) ||
+ !AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm())) {
+ ErrInfo = "AV_MOV_B64_IMM_PSEUDO only accepts a pair of 32-bit inline "
+ "immediates";
+ return false;
+ }
+ }
+#endif
+
if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
const MachineOperand &SrcOp = MI.getOperand(1);
if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 12ffae78f7b2c..f7c7bb509c9ef 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1183,6 +1183,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
const MachineOperand &MO) const;
+ /// Check if this immediate value can be used for AV_MOV_B64_IMM_PSEUDO.
+ bool isLegalAV64PseudoImm(uint64_t Imm) const;
+
/// Return true if this 64-bit VALU instruction has a 32-bit encoding.
/// This function will return false if you pass it a 32-bit instruction.
bool hasVALU32BitEncoding(unsigned Opcode) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index c425d9753dd1e..0374526e35c44 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1068,6 +1068,13 @@ def SplitBarrier : ImmOperand<i32> {
let PrintMethod = "printOperand";
}
+// Pseudo-operand type. This is a pair of 32-bit inline constants
+// packed into a single 64-bit value.
+def AV_64_PSEUDO_IMM : Operand<i64> {
+ let OperandNamespace = "AMDGPU";
+ let OperandType = "OPERAND_INLINE_C_AV64_PSEUDO";
+}
+
def VReg32OrOffClass : AsmOperandClass {
let Name = "VReg32OrOff";
let ParserMethod = "parseVReg32OrOff";
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d3c15bd8f672a..13dc68157197f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -160,6 +160,26 @@ def AV_MOV_B32_IMM_PSEUDO
let UseNamedOperandTable = 1;
}
+// 64-bit materialize immediate which supports AGPR or VGPR. This has
+// an unusual operand restriction which requires the two halves of the
+// immediate to each be 32-bit inline immediate values.
+//
+// FIXME: This unnecessarily has the even aligned vector register
+// requirement applied.
+def AV_MOV_B64_IMM_PSEUDO
+ : VPseudoInstSI<(outs AV_64:$vdst), (ins AV_64_PSEUDO_IMM:$src0)> {
+ let isReMaterializable = 1;
+ let isAsCheapAsAMove = 1;
+
+ // Imprecise, technically if AGPR it's VOP3 and VOP1 for AGPR. But
+ // this tricks the rematerialize logic into working for it.
+ let VOP3 = 1;
+ let isMoveImm = 1;
+ let SchedRW = [Write32Bit, Write32Bit];
+ let Size = 16; // 2 x v_accwrite_write_b32 in the worst case
+ let UseNamedOperandTable = 1;
+}
+
// 64-bit vector move with dpp. Expanded post-RA.
def V_MOV_B64_DPP_PSEUDO : VOP_DPP_Pseudo <"v_mov_b64_dpp", VOP_I64_I64> {
let Size = 16; // Requires two 8-byte v_mov_b32_dpp to complete.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
index 69bdb1f5066f0..d277c8104fe44 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
@@ -13,6 +13,14 @@
ret void
}
+ define void @func64() {
+ ret void
+ }
+
+ define void @func64_no_agprs() "amdgpu-agpr-alloc"="0,0" {
+ ret void
+ }
+
...
---
name: func
@@ -93,3 +101,107 @@ body: |
%1:agpr_32 = V_ACCVGPR_WRITE_B32_e64 2, implicit $exec
...
+
+---
+name: func64
+tracksRegLiveness: true
+stack:
+ - { id: 0, size: 4 }
+body: |
+ ; HAS-AGPR-LABEL: name: func64
+ ; HAS-AGPR: bb.0:
+ ; HAS-AGPR-NEXT: successors: %bb.1(0x80000000)
+ ; HAS-AGPR-NEXT: liveins: $vgpr0_vgpr1
+ ; HAS-AGPR-NEXT: {{ $}}
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+ ; HAS-AGPR-NEXT: {{ $}}
+ ; HAS-AGPR-NEXT: bb.1:
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+ ;
+ ; NO-AGPR-LABEL: name: func64
+ ; NO-AGPR: bb.0:
+ ; NO-AGPR-NEXT: successors: %bb.1(0x80000000)
+ ; NO-AGPR-NEXT: liveins: $vgpr0_vgpr1
+ ; NO-AGPR-NEXT: {{ $}}
+ ; NO-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+ ; NO-AGPR-NEXT: {{ $}}
+ ; NO-AGPR-NEXT: bb.1:
+ ; NO-AGPR-NEXT: [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ %0:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
+ %1:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
+ %2:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ %3:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
+ %4:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+ %5:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+ %6:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+ %7:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+ %8:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ %9:vreg_64_align2 = V_MOV_B64_PSEUDO 9223372036854775808, implicit $exec
+ %10:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ %11:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ %12:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+ %13:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+
+ bb.1:
+ %14:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+
+...
+
+---
+name: func64_no_agprs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; HAS-AGPR-LABEL: name: func64_no_agprs
+ ; HAS-AGPR: liveins: $vgpr0
+ ; HAS-AGPR-NEXT: {{ $}}
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ ;
+ ; NO-AGPR-LABEL: name: func64_no_agprs
+ ; NO-AGPR: liveins: $vgpr0
+ ; NO-AGPR-NEXT: {{ $}}
+ ; NO-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ ; NO-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ %0:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ %1:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ %2:vreg_64_align2 = V_MOV_B64_PSEUDO 9223372036854775808, implicit $exec
+ %3:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ %4:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir b/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir
index 1f9d49073da2c..272997cf1a347 100644
--- a/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir
+++ b/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir
@@ -1,6 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass=postrapseudos %s -o - | FileCheck %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx942 -run-pass=postrapseudos %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=postrapseudos %s -o - | FileCheck -check-prefixes=CHECK,GFX908 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass=postrapseudos %s -o - | FileCheck -check-prefixes=CHECK,GFX90A %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx942 -run-pass=postrapseudos %s -o - | FileCheck -check-prefixes=CHECK,GFX942 %s
---
name: av_mov_b32_imm_pseudo_agpr_0
@@ -54,3 +55,134 @@ body: |
; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec
$agpr1 = AV_MOV_B32_IMM_PSEUDO $vgpr0, implicit $exec
...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_0
+ ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+ $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_neg1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_neg1
+ ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 -1, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 -1, implicit $exec, implicit-def $agpr0_agpr1
+ $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO -1, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_64
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_64
+ ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 64, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+ $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_vgpr_0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX908-LABEL: name: av_mov_b64_imm_pseudo_vgpr_0
+ ; GFX908: $vgpr0 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ;
+ ; GFX90A-LABEL: name: av_mov_b64_imm_pseudo_vgpr_0
+ ; GFX90A: $vgpr0_vgpr1 = V_PK_MOV_B32 8, 0, 8, 0, 0, 0, 0, 0, 0, implicit $exec
+ ;
+ ; GFX942-LABEL: name: av_mov_b64_imm_pseudo_vgpr_0
+ ; GFX942: $vgpr0_vgpr1 = V_MOV_B64_e32 0, implicit $exec
+ $vgpr0_vgpr1 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_vgpr_64
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX908-LABEL: name: av_mov_b64_imm_pseudo_vgpr_64
+ ; GFX908: $vgpr0 = V_MOV_B32_e32 64, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ;
+ ; GFX90A-LABEL: name: av_mov_b64_imm_pseudo_vgpr_64
+ ; GFX90A: $vgpr0 = V_MOV_B32_e32 64, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX90A-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ;
+ ; GFX942-LABEL: name: av_mov_b64_imm_pseudo_vgpr_64
+ ; GFX942: $vgpr0_vgpr1 = V_MOV_B64_e32 64, implicit $exec
+ $vgpr0_vgpr1 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_64_hi_0_lo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_64_hi_0_lo
+ ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 64, implicit $exec, implicit-def $agpr0_agpr1
+ $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 274877906944, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_64_hi_2_lo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_64_hi_2_lo
+ ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 2, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 64, implicit $exec, implicit-def $agpr0_agpr1
+ $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 274877906946, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_neg16_hi_9_lo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_neg16_hi_9_lo
+ ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 9, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 -16, implicit $exec, implicit-def $agpr0_agpr1
+ $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 18446744004990074889, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX908-LABEL: name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+ ; GFX908: $vgpr0 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX908-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr2_vgpr3
+ ; GFX908-NEXT: $vgpr3 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr2_vgpr3
+ ; GFX908-NEXT: $vgpr4 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr4_vgpr5
+ ; GFX908-NEXT: $vgpr5 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr4_vgpr5
+ ;
+ ; GFX90A-LABEL: name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+ ; GFX90A: $vgpr0 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX90A-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; GFX90A-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr2_vgpr3
+ ; GFX90A-NEXT: $vgpr3 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr2_vgpr3
+ ; GFX90A-NEXT: $vgpr4_vgpr5 = V_PK_MOV_B32 8, 1042479491, 8, 1042479491, 0, 0, 0, 0, 0, implicit $exec
+ ;
+ ; GFX942-LABEL: name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+ ; GFX942: $vgpr0_vgpr1 = V_MOV_B64_e32 1042479491, implicit $exec
+ ; GFX942-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr2_vgpr3
+ ; GFX942-NEXT: $vgpr3 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr2_vgpr3
+ ; GFX942-NEXT: $vgpr4_vgpr5 = V_PK_MOV_B32 8, 1042479491, 8, 1042479491, 0, 0, 0, 0, 0, implicit $exec
+ $vgpr0_vgpr1 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec
+ $vgpr2_vgpr3 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec
+ $vgpr4_vgpr5 = AV_MOV_B64_IMM_PSEUDO 4477415321638205827, implicit $exec
+...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
index 74c4a2da50221..93c67ac18f769 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
@@ -781,3 +781,110 @@ body: |
S_ENDPGM 0
...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_agpr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_agpr
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ ; GCN-NEXT: $agpr0_agpr1 = COPY [[AV_MOV_]]
+ ; GCN-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ $agpr0_agpr1 = COPY %0
+ S_ENDPGM 0, implicit $agpr0_agpr1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_vgpr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_vgpr
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[AV_MOV_]]
+ ; GCN-NEXT: S_ENDPGM 0, implicit $vgpr0_vgpr1
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ $vgpr0_vgpr1 = COPY %0
+ S_ENDPGM 0, implicit $vgpr0_vgpr1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_agpr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_agpr
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64 = COPY [[AV_MOV_]]
+ ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ %1:areg_64 = COPY %0
+ S_ENDPGM 0, implicit %1
+
+...
+
+# Splat value across 2 halves of register
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_0
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY [[AV_MOV_]]
+ ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ %1:vreg_64 = COPY %0
+ S_ENDPGM 0, implicit %1
+
+...
+
+# Low and hi are different inline constants
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY [[AV_MOV_]]
+ ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+ %1:vreg_64 = COPY %0
+ S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub0
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]].sub0
+ ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+ %1:vgpr_32 = COPY %0.sub0
+ S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub1
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]].sub1
+ ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+ %1:vgpr_32 = COPY %0.sub1
+ S_ENDPGM 0, implicit %1
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir b/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir
index c34c9749d553a..4d8fb8db624f8 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir
@@ -105,3 +105,60 @@ body: |
...
+---
+name: av_mov_b64_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ scratchRSrcReg: '$sgpr72_sgpr73_sgpr74_sgpr75'
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 7
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr4_sgpr5
+
+ ; CHECK-LABEL: name: av_mov_b64_split
+ ; CHECK: liveins: $agpr6, $agpr7, $agpr8, $agpr9, $vgpr0, $sgpr4_sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+ ; CHECK-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 1, implicit $exec, implicit-def $agpr2_agpr3
+ ; CHECK-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr2_agpr3
+ ; CHECK-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 2, implicit $exec, implicit-def $agpr4_agpr5
+ ; CHECK-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr4_agpr5
+ ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 3, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; CHECK-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1
+ ; CHECK-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit killed $vgpr0_vgpr1
+ ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 4, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; CHECK-NEXT: $agpr9 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1
+ ; CHECK-NEXT: $agpr8 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit killed $vgpr0_vgpr1
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr2_agpr3
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr4_agpr5
+ ; CHECK-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr7, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; CHECK-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr6, implicit $exec, implicit $vgpr0_vgpr1
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $vgpr0_vgpr1
+ ; CHECK-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr9, implicit $exec, implicit-def $vgpr0_vgpr1
+ ; CHECK-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr8, implicit $exec, implicit $vgpr0_vgpr1
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $vgpr0_vgpr1
+ %0:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ %1:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec
+ %2:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 2, implicit $exec
+ %3:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 3, implicit $exec
+ %4:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4, implicit $exec
+
+ %5:areg_64_align2 = COPY %0
+ %6:areg_64_align2 = COPY %1
+ %7:areg_64_align2 = COPY %2
+ %8:areg_64_align2 = COPY %3
+ %9:areg_64_align2 = COPY %4
+
+ S_NOP 0, implicit %5
+ S_NOP 0, implicit %6
+ S_NOP 0, implicit %7
+ S_NOP 0, implicit %8
+ S_NOP 0, implicit %9
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir b/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
index 764a1e1090181..770e7c048620d 100644
--- a/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
@@ -767,3 +767,42 @@ body: |
%1:av_32 = COPY killed %0
SI_RETURN_TO_EPILOG implicit %1
...
+
+---
+name: fold_av_mov_b64_imm_pseudo_inlineimm_to_vgpr
+body: |
+ bb.0:
+ ; GCN-LABEL: name: fold_av_mov_b64_imm_pseudo_inlineimm_to_vgpr
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ ; GCN-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 64, implicit $exec
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[V_MOV_B]]
+ %0:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ %1:vreg_64_align2 = COPY killed %0
+ SI_RETURN_TO_EPILOG implicit %1
+...
+
+---
+name: fold_av_mov_b64_imm_pseudo_inlineimm_to_agpr
+body: |
+ bb.0:
+ ; GCN-LABEL: name: fold_av_mov_b64_imm_pseudo_inlineimm_to_agpr
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64_align2 = COPY killed [[AV_MOV_]]
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[COPY]]
+ %0:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ %1:areg_64_align2 = COPY killed %0
+ SI_RETURN_TO_EPILOG implicit %1
+...
+
+---
+name: fold_av_mov_b64_imm_pseudo_inlineimm_to_av
+body: |
+ bb.0:
+ ; GCN-LABEL: name: fold_av_mov_b64_imm_pseudo_inlineimm_to_av
+ ; GCN: [[AV_MOV_:%[0-9]+]]:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64_align2 = COPY killed [[AV_MOV_]]
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[COPY]]
+ %0:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ %1:av_64_align2 = COPY killed %0
+ SI_RETURN_TO_EPILOG implicit %1
+...
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir b/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir
index 9dad99f307c19..fa4461c1cc387 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir
@@ -135,3 +135,48 @@ body: |
S_ENDPGM 0, implicit %4
...
+
+---
+name: av_mov_imm_b64
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: av_mov_imm_b64
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $sgpr0_sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[AV_MOV_:%[0-9]+]].sub0_sub1:vreg_192 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]].sub2_sub3:vreg_192 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: $exec = S_MOV_B64_term [[COPY]]
+ ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]].sub0_sub1:vreg_192 = V_MUL_F64_e64 0, [[AV_MOV_]].sub0_sub1, 0, [[AV_MOV_]].sub0_sub1, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]].sub2_sub3:vreg_192 = V_MUL_F64_e64 0, [[AV_MOV_]].sub2_sub3, 0, [[AV_MOV_]].sub2_sub3, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[AV_MOV_]]
+ bb.0:
+ liveins: $sgpr0_sgpr1
+ %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+ %1:vreg_64 = COPY %0
+ %2:vreg_64 = COPY %0
+ %3:sreg_64 = COPY $sgpr0_sgpr1
+ $exec = S_MOV_B64_term %3:sreg_64
+ S_CBRANCH_EXECZ %bb.2, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ %1:vreg_64 = V_MUL_F64_e64 0, %1:vreg_64, 0, %1:vreg_64, 0, 0, implicit $mode, implicit $exec
+ %2:vreg_64 = V_MUL_F64_e64 0, %2:vreg_64, 0, %2:vreg_64, 0, 0, implicit $mode, implicit $exec
+
+ bb.2:
+ undef %4.sub0_sub1:vreg_192 = COPY %1:vreg_64
+ %4.sub2_sub3:vreg_192 = COPY %2:vreg_64
+ S_ENDPGM 0, implicit %4
+
+...
diff --git a/llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir b/llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir
new file mode 100644
index 0000000000000..be3cf3de24d51
--- /dev/null
+++ b/llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir
@@ -0,0 +1,25 @@
+# RUN: not --crash llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -run-pass=none -filetype=null %s 2>&1 | FileCheck %s
+
+# FIXME: Value error messages are misleading.
+---
+name: test
+body: |
+ bb.0:
+ %0:av_64 = IMPLICIT_DEF
+
+ ; CHECK: *** Bad machine code: Expected immediate, but got non-immediate ***
+ %1:av_64 = AV_MOV_B64_IMM_PSEUDO %1, implicit $exec
+
+ ; Low half isn't inline imm
+ ; CHECK: *** Bad machine code: VOP3 instruction uses literal ***
+ %2:av_64 = AV_MOV_B64_IMM_PSEUDO 65, implicit $exec
+
+ ; High half isn't inline imm
+ ; CHECK: *** Bad machine code: VOP3 instruction uses literal ***
+ %2:av_64 = AV_MOV_B64_IMM_PSEUDO 279172874240, implicit $exec
+
+ ; Neither half is inline imm
+ ; CHECK: *** Bad machine code: VOP3 instruction uses literal ***
+ %3:av_64 = AV_MOV_B64_IMM_PSEUDO 279172874306, implicit $exec
+
+...
>From f8e31e5e0bfc711c9d2ed7308e77d03a3d08b88b Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 21:24:13 +0900
Subject: [PATCH 2/5] Remove dead code
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 98ff9270391d2..df638bd65bdaa 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -5552,20 +5552,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
}
}
-#if 0
- if (Opcode == AMDGPU::AV_MOV_B64_IMM_PSEUDO) {
- const MachineOperand &SrcOp = MI.getOperand(1);
- uint64_t Imm = static_cast<uint64_t>(SrcOp.getImm());
-
- if (!AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) ||
- !AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm())) {
- ErrInfo = "AV_MOV_B64_IMM_PSEUDO only accepts a pair of 32-bit inline "
- "immediates";
- return false;
- }
- }
-#endif
-
if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
const MachineOperand &SrcOp = MI.getOperand(1);
if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
>From 235a72573278b99167b33711996f8f801f61d2aa Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 21:27:00 +0900
Subject: [PATCH 3/5] Fix comment
---
llvm/lib/Target/AMDGPU/SIInstructions.td | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 13dc68157197f..2d019cb04c353 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -150,7 +150,7 @@ def AV_MOV_B32_IMM_PSEUDO
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
- // Imprecise, technically if AGPR it's VOP3 and VOP1 for AGPR. But
+ // Imprecise, technically if AGPR it's VOP3 and VOP1 for VGPR. But
// this tricks the rematerialize logic into working for it.
let VOP3 = 1;
let isMoveImm = 1;
@@ -171,8 +171,9 @@ def AV_MOV_B64_IMM_PSEUDO
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
- // Imprecise, technically if AGPR it's VOP3 and VOP1 for AGPR. But
- // this tricks the rematerialize logic into working for it.
+ // Imprecise, technically if AGPR it's 2 x VOP3 and 2 x VOP1 for
+ // VGPR. But this tricks the rematerialize logic into working for
+ // it.
let VOP3 = 1;
let isMoveImm = 1;
let SchedRW = [Write32Bit, Write32Bit];
>From f8e02ce702188b6999cab8107edebad9f75b1ca5 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 18:21:20 +0900
Subject: [PATCH 4/5] AMDGPU: Start using AV_MOV_B64_IMM_PSEUDO
---
.../Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp | 22 +-
.../AMDGPU/amdgpu-prepare-agpr-alloc.mir | 53 +-
.../AMDGPU/av-split-dead-valno-crash.ll | 52 +-
.../AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll | 16 +-
.../CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll | 578 +++++++++---------
...m.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll | 8 +-
6 files changed, 388 insertions(+), 341 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp
index 3b06e9b00ac69..0137b3f5943d7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp
@@ -34,6 +34,8 @@ class AMDGPUPrepareAGPRAllocImpl {
const SIInstrInfo &TII;
MachineRegisterInfo &MRI;
+ bool isAV64Imm(const MachineOperand &MO) const;
+
public:
AMDGPUPrepareAGPRAllocImpl(const GCNSubtarget &ST, MachineRegisterInfo &MRI)
: TII(*ST.getInstrInfo()), MRI(MRI) {}
@@ -85,11 +87,16 @@ AMDGPUPrepareAGPRAllocPass::run(MachineFunction &MF,
return PreservedAnalyses::all();
}
+bool AMDGPUPrepareAGPRAllocImpl::isAV64Imm(const MachineOperand &MO) const {
+ return MO.isImm() && TII.isLegalAV64PseudoImm(MO.getImm());
+}
+
bool AMDGPUPrepareAGPRAllocImpl::run(MachineFunction &MF) {
if (MRI.isReserved(AMDGPU::AGPR0))
return false;
- const MCInstrDesc &AVImmPseudo = TII.get(AMDGPU::AV_MOV_B32_IMM_PSEUDO);
+ const MCInstrDesc &AVImmPseudo32 = TII.get(AMDGPU::AV_MOV_B32_IMM_PSEUDO);
+ const MCInstrDesc &AVImmPseudo64 = TII.get(AMDGPU::AV_MOV_B64_IMM_PSEUDO);
bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
@@ -98,8 +105,19 @@ bool AMDGPUPrepareAGPRAllocImpl::run(MachineFunction &MF) {
TII.isInlineConstant(MI, 1)) ||
(MI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
MI.getOperand(1).isImm())) {
- MI.setDesc(AVImmPseudo);
+ MI.setDesc(AVImmPseudo32);
+ Changed = true;
+ continue;
+ }
+
+ // TODO: If only half of the value is rewritable, is it worth splitting it
+ // up?
+ if ((MI.getOpcode() == AMDGPU::V_MOV_B64_e64 ||
+ MI.getOpcode() == AMDGPU::V_MOV_B64_PSEUDO) &&
+ isAV64Imm(MI.getOperand(1))) {
+ MI.setDesc(AVImmPseudo64);
Changed = true;
+ continue;
}
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
index d277c8104fe44..aaacf1d6f793b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
@@ -114,22 +114,22 @@ body: |
; HAS-AGPR-NEXT: liveins: $vgpr0_vgpr1
; HAS-AGPR-NEXT: {{ $}}
; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+ ; HAS-AGPR-NEXT: [[AV_MOV_:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 54, implicit $exec
+ ; HAS-AGPR-NEXT: [[AV_MOV_1:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec
+ ; HAS-AGPR-NEXT: [[AV_MOV_2:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; HAS-AGPR-NEXT: [[AV_MOV_3:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec
+ ; HAS-AGPR-NEXT: [[AV_MOV_4:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+ ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
; HAS-AGPR-NEXT: {{ $}}
; HAS-AGPR-NEXT: bb.1:
- ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+ ; HAS-AGPR-NEXT: [[AV_MOV_5:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 3, implicit $exec
;
; NO-AGPR-LABEL: name: func64
; NO-AGPR: bb.0:
@@ -181,14 +181,23 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0
- ; HAS-AGPR-LABEL: name: func64_no_agprs
- ; HAS-AGPR: liveins: $vgpr0
- ; HAS-AGPR-NEXT: {{ $}}
- ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
- ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ ; GFX90A-LABEL: name: func64_no_agprs
+ ; GFX90A: liveins: $vgpr0
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+ ; GFX90A-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+ ;
+ ; GFX908-LABEL: name: func64_no_agprs
+ ; GFX908: liveins: $vgpr0
+ ; GFX908-NEXT: {{ $}}
+ ; GFX908-NEXT: [[AV_MOV_:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec
+ ; GFX908-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+ ; GFX908-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+ ; GFX908-NEXT: [[AV_MOV_1:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec
+ ; GFX908-NEXT: [[AV_MOV_2:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec
;
; NO-AGPR-LABEL: name: func64_no_agprs
; NO-AGPR: liveins: $vgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
index 89fe0ab526a8a..614b1e38a530f 100644
--- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
@@ -16,10 +16,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: s_bitcmp1_b32 s0, 8
; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
-; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
-; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3]
; CHECK-NEXT: v_mov_b32_e32 v0, 0x9037ab78
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v1
+; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1
+; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3]
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v0
; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f
; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90
; CHECK-NEXT: v_mov_b32_e32 v5, 0x3efa01a0
@@ -34,14 +36,14 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: v_mov_b32_e32 v14, 0x8427b883
; CHECK-NEXT: v_mov_b32_e32 v15, 0x3fae1bb4
; CHECK-NEXT: s_mov_b64 s[22:23], 0
-; CHECK-NEXT: v_mov_b32_e32 v16, 0x57b87036
-; CHECK-NEXT: v_mov_b32_e32 v17, 0x3fb3b136
+; CHECK-NEXT: v_mov_b32_e32 v0, 0x57b87036
+; CHECK-NEXT: v_mov_b32_e32 v1, 0x3fb3b136
; CHECK-NEXT: s_and_b64 s[4:5], exec, s[16:17]
; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523
; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555
; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19]
; CHECK-NEXT: v_mov_b32_e32 v20, 0
-; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
+; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31
; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23
; CHECK-NEXT: s_branch .LBB0_2
; CHECK-NEXT: .LBB0_1: ; %Flow9
@@ -61,9 +63,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
; CHECK-NEXT: flat_load_dwordx2 v[24:25], v[24:25]
-; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[0:1]
+; CHECK-NEXT: v_accvgpr_read_b32 v27, a3
+; CHECK-NEXT: v_accvgpr_read_b32 v26, a2
; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[2:3]
-; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[16:17]
+; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[0:1]
+; CHECK-NEXT: v_accvgpr_write_b32 a0, 0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, 0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[24:25]
; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27]
@@ -79,10 +84,9 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[28:29]
; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[14:15]
; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27]
-; CHECK-NEXT: v_fmac_f64_e32 v[30:31], 0, v[28:29]
+; CHECK-NEXT: v_fmac_f64_e32 v[16:17], 0, v[28:29]
; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[18:19]
-; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[30:31]
-; CHECK-NEXT: v_mov_b64_e32 v[30:31], 0
+; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[16:17]
; CHECK-NEXT: s_branch .LBB0_6
; CHECK-NEXT: .LBB0_5: ; %Flow
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
@@ -91,30 +95,30 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: .LBB0_6: ; %.preheader1855.i.i.i3329
; CHECK-NEXT: ; Parent Loop BB0_2 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
-; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[30:31]
+; CHECK-NEXT: v_accvgpr_read_b32 v29, a1
+; CHECK-NEXT: v_accvgpr_read_b32 v28, a0
; CHECK-NEXT: s_mov_b64 s[24:25], -1
; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: s_mov_b64 vcc, s[2:3]
-; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31
+; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
; CHECK-NEXT: s_cbranch_vccz .LBB0_5
; CHECK-NEXT: ; %bb.7: ; %.lr.ph2070.i.i.i3291
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
-; CHECK-NEXT: v_accvgpr_read_b32 v31, a1
-; CHECK-NEXT: v_accvgpr_read_b32 v30, a0
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v30
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v31
; CHECK-NEXT: s_mov_b64 s[8:9], s[18:19]
; CHECK-NEXT: s_mov_b64 vcc, s[6:7]
; CHECK-NEXT: s_cbranch_vccz .LBB0_5
; CHECK-NEXT: ; %bb.8: ; %.preheader1856.preheader.i.i.i3325
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v26
; CHECK-NEXT: s_mov_b64 s[24:25], 0
-; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[26:27]
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v27
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: s_branch .LBB0_5
; CHECK-NEXT: .LBB0_9: ; in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[10:11]
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: s_mov_b64 s[22:23], 0
-; CHECK-NEXT: v_accvgpr_write_b32 a1, v25
+; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11]
; CHECK-NEXT: s_mov_b64 s[8:9], s[20:21]
; CHECK-NEXT: s_branch .LBB0_15
; CHECK-NEXT: .LBB0_10: ; in Loop: Header=BB0_2 Depth=1
@@ -128,24 +132,22 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; %bb.12: ; %._crit_edge2105.i.i.i2330.loopexit
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], 0, v[28:29]
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: v_cndmask_b32_e64 v23, v23, 0, s[16:17]
-; CHECK-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[8:9]
-; CHECK-NEXT: v_mov_b32_e32 v27, v26
-; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17]
; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17]
-; CHECK-NEXT: global_store_dwordx2 v20, v[26:27], s[12:13]
+; CHECK-NEXT: v_cndmask_b32_e64 v16, 0, 1, s[8:9]
+; CHECK-NEXT: v_mov_b32_e32 v17, v16
+; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17]
+; CHECK-NEXT: global_store_dwordx2 v20, v[16:17], s[12:13]
; CHECK-NEXT: s_cselect_b32 s23, s23, 0
; CHECK-NEXT: s_cselect_b32 s22, s22, 0
; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: s_branch .LBB0_14
; CHECK-NEXT: .LBB0_13: ; in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: v_mov_b64_e32 v[22:23], 0
; CHECK-NEXT: .LBB0_14: ; %Flow6
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: v_accvgpr_write_b32 a1, v25
+; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[24:25]
; CHECK-NEXT: .LBB0_15: ; %Flow6
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: s_mov_b64 s[24:25], -1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
index 8081a15b53bb7..284ced1727b7e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll
@@ -39,16 +39,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x
; GCN-NEXT: v_accvgpr_write_b32 a13, s21
; GCN-NEXT: v_accvgpr_write_b32 a14, s22
; GCN-NEXT: v_accvgpr_write_b32 a15, s23
-; GCN-NEXT: v_mov_b64_e32 v[14:15], 0
; GCN-NEXT: v_mov_b32_e32 v16, s16
+; GCN-NEXT: v_mov_b32_e32 v17, s17
; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15]
+; GCN-NEXT: v_mov_b32_e32 v18, s18
+; GCN-NEXT: v_mov_b32_e32 v19, s19
; GCN-NEXT: v_mov_b32_e32 v0, s20
; GCN-NEXT: v_mov_b32_e32 v1, s21
; GCN-NEXT: v_mov_b32_e32 v2, s22
; GCN-NEXT: v_mov_b32_e32 v3, s23
-; GCN-NEXT: v_mov_b32_e32 v17, s17
-; GCN-NEXT: v_mov_b32_e32 v18, s18
-; GCN-NEXT: v_mov_b32_e32 v19, s19
+; GCN-NEXT: v_mov_b64_e32 v[14:15], 0
; GCN-NEXT: s_nop 4
; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
@@ -112,16 +112,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__flags(<8 x bfloat> %arg0
; GCN-NEXT: v_accvgpr_write_b32 a13, s21
; GCN-NEXT: v_accvgpr_write_b32 a14, s22
; GCN-NEXT: v_accvgpr_write_b32 a15, s23
-; GCN-NEXT: v_mov_b64_e32 v[14:15], 0
; GCN-NEXT: v_mov_b32_e32 v16, s16
+; GCN-NEXT: v_mov_b32_e32 v17, s17
; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; GCN-NEXT: v_mov_b32_e32 v18, s18
+; GCN-NEXT: v_mov_b32_e32 v19, s19
; GCN-NEXT: v_mov_b32_e32 v0, s20
; GCN-NEXT: v_mov_b32_e32 v1, s21
; GCN-NEXT: v_mov_b32_e32 v2, s22
; GCN-NEXT: v_mov_b32_e32 v3, s23
-; GCN-NEXT: v_mov_b32_e32 v17, s17
-; GCN-NEXT: v_mov_b32_e32 v18, s18
-; GCN-NEXT: v_mov_b32_e32 v19, s19
+; GCN-NEXT: v_mov_b64_e32 v[14:15], 0
; GCN-NEXT: s_nop 4
; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
; GCN-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
index d81ec1c61634b..078a043b94604 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll
@@ -406,16 +406,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; SDAG-NEXT: v_accvgpr_write_b32 a13, s21
; SDAG-NEXT: v_accvgpr_write_b32 a14, s22
; SDAG-NEXT: v_accvgpr_write_b32 a15, s23
-; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0
; SDAG-NEXT: v_mov_b32_e32 v16, s16
+; SDAG-NEXT: v_mov_b32_e32 v17, s17
; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15]
+; SDAG-NEXT: v_mov_b32_e32 v18, s18
+; SDAG-NEXT: v_mov_b32_e32 v19, s19
; SDAG-NEXT: v_mov_b32_e32 v0, s20
; SDAG-NEXT: v_mov_b32_e32 v1, s21
; SDAG-NEXT: v_mov_b32_e32 v2, s22
; SDAG-NEXT: v_mov_b32_e32 v3, s23
-; SDAG-NEXT: v_mov_b32_e32 v17, s17
-; SDAG-NEXT: v_mov_b32_e32 v18, s18
-; SDAG-NEXT: v_mov_b32_e32 v19, s19
+; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0
; SDAG-NEXT: s_nop 4
; SDAG-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
@@ -449,9 +449,9 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; GISEL: ; %bb.0:
; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0
-; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48
-; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16
+; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0
+; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16
+; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
@@ -474,31 +474,34 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; GISEL-NEXT: v_accvgpr_write_b32 a14, s22
; GISEL-NEXT: v_accvgpr_write_b32 a15, s23
; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9]
-; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48
; GISEL-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15]
-; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
-; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17]
-; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
-; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
-; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
-; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19]
-; GISEL-NEXT: s_nop 4
-; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 7
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19]
+; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
+; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_endpgm
;
@@ -530,16 +533,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21
; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22
; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
-; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0
; HEURRC-NEXT: v_mov_b32_e32 v16, s16
+; HEURRC-NEXT: v_mov_b32_e32 v17, s17
; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15]
+; HEURRC-NEXT: v_mov_b32_e32 v18, s18
+; HEURRC-NEXT: v_mov_b32_e32 v19, s19
; HEURRC-NEXT: v_mov_b32_e32 v0, s20
; HEURRC-NEXT: v_mov_b32_e32 v1, s21
; HEURRC-NEXT: v_mov_b32_e32 v2, s22
; HEURRC-NEXT: v_mov_b32_e32 v3, s23
-; HEURRC-NEXT: v_mov_b32_e32 v17, s17
-; HEURRC-NEXT: v_mov_b32_e32 v18, s18
-; HEURRC-NEXT: v_mov_b32_e32 v19, s19
+; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0
; HEURRC-NEXT: s_nop 4
; HEURRC-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
@@ -589,12 +592,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19]
; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21]
; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
-; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0
; VGPRRC-NEXT: v_mov_b32_e32 v48, s16
-; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15]
; VGPRRC-NEXT: v_mov_b32_e32 v49, s17
+; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15]
; VGPRRC-NEXT: v_mov_b32_e32 v50, s18
; VGPRRC-NEXT: v_mov_b32_e32 v51, s19
+; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0
; VGPRRC-NEXT: s_nop 7
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[28:31], off sc0 sc1
@@ -605,12 +608,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[16:19], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1
-; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: v_mov_b32_e32 v0, s20
; VGPRRC-NEXT: v_mov_b32_e32 v1, s21
; VGPRRC-NEXT: v_mov_b32_e32 v2, s22
; VGPRRC-NEXT: v_mov_b32_e32 v3, s23
+; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1
+; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
@@ -788,16 +791,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; SDAG-NEXT: v_accvgpr_write_b32 a13, s21
; SDAG-NEXT: v_accvgpr_write_b32 a14, s22
; SDAG-NEXT: v_accvgpr_write_b32 a15, s23
-; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0
; SDAG-NEXT: v_mov_b32_e32 v16, s16
+; SDAG-NEXT: v_mov_b32_e32 v17, s17
; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; SDAG-NEXT: v_mov_b32_e32 v18, s18
+; SDAG-NEXT: v_mov_b32_e32 v19, s19
; SDAG-NEXT: v_mov_b32_e32 v0, s20
; SDAG-NEXT: v_mov_b32_e32 v1, s21
; SDAG-NEXT: v_mov_b32_e32 v2, s22
; SDAG-NEXT: v_mov_b32_e32 v3, s23
-; SDAG-NEXT: v_mov_b32_e32 v17, s17
-; SDAG-NEXT: v_mov_b32_e32 v18, s18
-; SDAG-NEXT: v_mov_b32_e32 v19, s19
+; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0
; SDAG-NEXT: s_nop 4
; SDAG-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
@@ -831,9 +834,9 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; GISEL: ; %bb.0:
; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0
-; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48
-; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16
+; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0
+; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16
+; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
@@ -856,31 +859,34 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; GISEL-NEXT: v_accvgpr_write_b32 a14, s22
; GISEL-NEXT: v_accvgpr_write_b32 a15, s23
; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9]
-; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48
; GISEL-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
-; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
-; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17]
-; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
-; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
-; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
-; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19]
-; GISEL-NEXT: s_nop 4
-; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 7
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19]
+; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
+; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_endpgm
;
@@ -912,16 +918,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21
; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22
; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
-; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0
; HEURRC-NEXT: v_mov_b32_e32 v16, s16
+; HEURRC-NEXT: v_mov_b32_e32 v17, s17
; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
+; HEURRC-NEXT: v_mov_b32_e32 v18, s18
+; HEURRC-NEXT: v_mov_b32_e32 v19, s19
; HEURRC-NEXT: v_mov_b32_e32 v0, s20
; HEURRC-NEXT: v_mov_b32_e32 v1, s21
; HEURRC-NEXT: v_mov_b32_e32 v2, s22
; HEURRC-NEXT: v_mov_b32_e32 v3, s23
-; HEURRC-NEXT: v_mov_b32_e32 v17, s17
-; HEURRC-NEXT: v_mov_b32_e32 v18, s18
-; HEURRC-NEXT: v_mov_b32_e32 v19, s19
+; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0
; HEURRC-NEXT: s_nop 4
; HEURRC-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
@@ -971,12 +977,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19]
; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21]
; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
-; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0
; VGPRRC-NEXT: v_mov_b32_e32 v48, s16
-; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] cbsz:2 abid:3 blgp:1
; VGPRRC-NEXT: v_mov_b32_e32 v49, s17
+; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] cbsz:2 abid:3 blgp:1
; VGPRRC-NEXT: v_mov_b32_e32 v50, s18
; VGPRRC-NEXT: v_mov_b32_e32 v51, s19
+; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0
; VGPRRC-NEXT: s_nop 7
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[28:31], off sc0 sc1
@@ -987,12 +993,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, <
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[16:19], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1
-; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: v_mov_b32_e32 v0, s20
; VGPRRC-NEXT: v_mov_b32_e32 v1, s21
; VGPRRC-NEXT: v_mov_b32_e32 v2, s22
; VGPRRC-NEXT: v_mov_b32_e32 v3, s23
+; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1
+; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
@@ -2978,47 +2984,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32>
; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; SDAG-NEXT: v_mov_b64_e32 v[0:1], 48
; SDAG-NEXT: v_mov_b64_e32 v[2:3], 32
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v8, s24
-; SDAG-NEXT: v_mov_b32_e32 v9, s25
-; SDAG-NEXT: v_mov_b32_e32 v10, s26
-; SDAG-NEXT: v_mov_b32_e32 v11, s27
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s8
-; SDAG-NEXT: v_mov_b32_e32 v12, s28
-; SDAG-NEXT: v_mov_b32_e32 v13, s29
-; SDAG-NEXT: v_mov_b32_e32 v14, s30
-; SDAG-NEXT: v_mov_b32_e32 v15, s31
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s9
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s10
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s11
-; SDAG-NEXT: v_accvgpr_write_b32 a4, s12
-; SDAG-NEXT: v_accvgpr_write_b32 a5, s13
-; SDAG-NEXT: v_accvgpr_write_b32 a6, s14
-; SDAG-NEXT: v_accvgpr_write_b32 a7, s15
-; SDAG-NEXT: v_accvgpr_write_b32 a8, s16
-; SDAG-NEXT: v_accvgpr_write_b32 a9, s17
-; SDAG-NEXT: v_accvgpr_write_b32 a10, s18
-; SDAG-NEXT: v_accvgpr_write_b32 a11, s19
-; SDAG-NEXT: v_accvgpr_write_b32 a12, s20
-; SDAG-NEXT: v_accvgpr_write_b32 a13, s21
-; SDAG-NEXT: v_accvgpr_write_b32 a14, s22
-; SDAG-NEXT: v_accvgpr_write_b32 a15, s23
+; SDAG-NEXT: v_mov_b32_e32 v4, s24
+; SDAG-NEXT: v_mov_b32_e32 v5, s25
+; SDAG-NEXT: v_mov_b32_e32 v6, s26
+; SDAG-NEXT: v_mov_b32_e32 v7, s27
+; SDAG-NEXT: v_accvgpr_write_b32 a31, s23
+; SDAG-NEXT: v_mov_b32_e32 v8, s28
+; SDAG-NEXT: v_mov_b32_e32 v9, s29
+; SDAG-NEXT: v_mov_b32_e32 v10, s30
+; SDAG-NEXT: v_mov_b32_e32 v11, s31
+; SDAG-NEXT: v_accvgpr_write_b32 a30, s22
+; SDAG-NEXT: v_accvgpr_write_b32 a29, s21
+; SDAG-NEXT: v_accvgpr_write_b32 a28, s20
+; SDAG-NEXT: v_accvgpr_write_b32 a27, s19
+; SDAG-NEXT: v_accvgpr_write_b32 a26, s18
+; SDAG-NEXT: v_accvgpr_write_b32 a25, s17
+; SDAG-NEXT: v_accvgpr_write_b32 a24, s16
+; SDAG-NEXT: v_accvgpr_write_b32 a23, s15
+; SDAG-NEXT: v_accvgpr_write_b32 a22, s14
+; SDAG-NEXT: v_accvgpr_write_b32 a21, s13
+; SDAG-NEXT: v_accvgpr_write_b32 a20, s12
+; SDAG-NEXT: v_accvgpr_write_b32 a19, s11
+; SDAG-NEXT: v_accvgpr_write_b32 a18, s10
+; SDAG-NEXT: v_accvgpr_write_b32 a17, s9
+; SDAG-NEXT: v_accvgpr_write_b32 a16, s8
+; SDAG-NEXT: s_nop 1
+; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31]
+; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16
; SDAG-NEXT: v_mov_b64_e32 v[6:7], 0
-; SDAG-NEXT: s_nop 0
-; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15]
; SDAG-NEXT: v_mov_b32_e32 v8, s16
; SDAG-NEXT: v_mov_b32_e32 v9, s17
; SDAG-NEXT: v_mov_b32_e32 v10, s18
; SDAG-NEXT: v_mov_b32_e32 v11, s19
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
+; SDAG-NEXT: s_nop 5
+; SDAG-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
@@ -3047,9 +3053,9 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32>
; GISEL: ; %bb.0:
; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0
-; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48
-; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16
+; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0
+; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16
+; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
@@ -3072,31 +3078,34 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32>
; GISEL-NEXT: v_accvgpr_write_b32 a14, s22
; GISEL-NEXT: v_accvgpr_write_b32 a15, s23
; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9]
-; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48
; GISEL-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[0:3], v[4:7], a[0:15]
-; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
-; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17]
-; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
-; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
-; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
-; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19]
-; GISEL-NEXT: s_nop 4
-; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 7
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19]
+; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
+; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_endpgm
;
@@ -3106,47 +3115,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32>
; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; HEURRC-NEXT: v_mov_b64_e32 v[0:1], 48
; HEURRC-NEXT: v_mov_b64_e32 v[2:3], 32
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b32_e32 v8, s24
-; HEURRC-NEXT: v_mov_b32_e32 v9, s25
-; HEURRC-NEXT: v_mov_b32_e32 v10, s26
-; HEURRC-NEXT: v_mov_b32_e32 v11, s27
-; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8
-; HEURRC-NEXT: v_mov_b32_e32 v12, s28
-; HEURRC-NEXT: v_mov_b32_e32 v13, s29
-; HEURRC-NEXT: v_mov_b32_e32 v14, s30
-; HEURRC-NEXT: v_mov_b32_e32 v15, s31
-; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9
-; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10
-; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11
-; HEURRC-NEXT: v_accvgpr_write_b32 a4, s12
-; HEURRC-NEXT: v_accvgpr_write_b32 a5, s13
-; HEURRC-NEXT: v_accvgpr_write_b32 a6, s14
-; HEURRC-NEXT: v_accvgpr_write_b32 a7, s15
-; HEURRC-NEXT: v_accvgpr_write_b32 a8, s16
-; HEURRC-NEXT: v_accvgpr_write_b32 a9, s17
-; HEURRC-NEXT: v_accvgpr_write_b32 a10, s18
-; HEURRC-NEXT: v_accvgpr_write_b32 a11, s19
-; HEURRC-NEXT: v_accvgpr_write_b32 a12, s20
-; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21
-; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22
-; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
+; HEURRC-NEXT: v_mov_b32_e32 v4, s24
+; HEURRC-NEXT: v_mov_b32_e32 v5, s25
+; HEURRC-NEXT: v_mov_b32_e32 v6, s26
+; HEURRC-NEXT: v_mov_b32_e32 v7, s27
+; HEURRC-NEXT: v_accvgpr_write_b32 a31, s23
+; HEURRC-NEXT: v_mov_b32_e32 v8, s28
+; HEURRC-NEXT: v_mov_b32_e32 v9, s29
+; HEURRC-NEXT: v_mov_b32_e32 v10, s30
+; HEURRC-NEXT: v_mov_b32_e32 v11, s31
+; HEURRC-NEXT: v_accvgpr_write_b32 a30, s22
+; HEURRC-NEXT: v_accvgpr_write_b32 a29, s21
+; HEURRC-NEXT: v_accvgpr_write_b32 a28, s20
+; HEURRC-NEXT: v_accvgpr_write_b32 a27, s19
+; HEURRC-NEXT: v_accvgpr_write_b32 a26, s18
+; HEURRC-NEXT: v_accvgpr_write_b32 a25, s17
+; HEURRC-NEXT: v_accvgpr_write_b32 a24, s16
+; HEURRC-NEXT: v_accvgpr_write_b32 a23, s15
+; HEURRC-NEXT: v_accvgpr_write_b32 a22, s14
+; HEURRC-NEXT: v_accvgpr_write_b32 a21, s13
+; HEURRC-NEXT: v_accvgpr_write_b32 a20, s12
+; HEURRC-NEXT: v_accvgpr_write_b32 a19, s11
+; HEURRC-NEXT: v_accvgpr_write_b32 a18, s10
+; HEURRC-NEXT: v_accvgpr_write_b32 a17, s9
+; HEURRC-NEXT: v_accvgpr_write_b32 a16, s8
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31]
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16
; HEURRC-NEXT: v_mov_b64_e32 v[6:7], 0
-; HEURRC-NEXT: s_nop 0
-; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15]
; HEURRC-NEXT: v_mov_b32_e32 v8, s16
; HEURRC-NEXT: v_mov_b32_e32 v9, s17
; HEURRC-NEXT: v_mov_b32_e32 v10, s18
; HEURRC-NEXT: v_mov_b32_e32 v11, s19
-; HEURRC-NEXT: s_nop 7
-; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
+; HEURRC-NEXT: s_nop 5
+; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
@@ -3177,37 +3186,40 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32>
; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], 48
; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], 32
-; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], 16
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b32_e32 v40, s24
-; VGPRRC-NEXT: v_mov_b32_e32 v41, s25
-; VGPRRC-NEXT: v_mov_b32_e32 v42, s26
-; VGPRRC-NEXT: v_mov_b32_e32 v43, s27
-; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b32_e32 v44, s28
-; VGPRRC-NEXT: v_mov_b32_e32 v45, s29
-; VGPRRC-NEXT: v_mov_b32_e32 v46, s30
-; VGPRRC-NEXT: v_mov_b32_e32 v47, s31
-; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[16:17]
-; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19]
-; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21]
-; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
-; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], 0
-; VGPRRC-NEXT: s_nop 0
-; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[16:31], v[40:43], v[44:47], v[0:15]
+; VGPRRC-NEXT: v_mov_b32_e32 v36, s24
+; VGPRRC-NEXT: v_mov_b32_e32 v37, s25
+; VGPRRC-NEXT: v_mov_b32_e32 v38, s26
+; VGPRRC-NEXT: v_mov_b32_e32 v39, s27
+; VGPRRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
+; VGPRRC-NEXT: v_mov_b32_e32 v40, s28
+; VGPRRC-NEXT: v_mov_b32_e32 v41, s29
+; VGPRRC-NEXT: v_mov_b32_e32 v42, s30
+; VGPRRC-NEXT: v_mov_b32_e32 v43, s31
+; VGPRRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
+; VGPRRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
+; VGPRRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
+; VGPRRC-NEXT: v_mov_b64_e32 v[22:23], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
+; VGPRRC-NEXT: s_nop 1
+; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[0:15], v[36:39], v[40:43], v[16:31]
; VGPRRC-NEXT: s_nop 7
; VGPRRC-NEXT: s_nop 3
-; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[28:31], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[12:15], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[24:27], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[8:11], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[20:23], off sc0 sc1
+; VGPRRC-NEXT: s_nop 0
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], 16
+; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[4:7], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[16:19], off sc0 sc1
+; VGPRRC-NEXT: s_nop 0
+; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], 0
+; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
+; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s16
; VGPRRC-NEXT: v_mov_b32_e32 v1, s17
; VGPRRC-NEXT: v_mov_b32_e32 v2, s18
@@ -3226,14 +3238,14 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32>
; VGPRRC-NEXT: v_mov_b32_e32 v1, s9
; VGPRRC-NEXT: v_mov_b32_e32 v2, s10
; VGPRRC-NEXT: v_mov_b32_e32 v3, s11
-; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s12
; VGPRRC-NEXT: v_mov_b32_e32 v1, s13
; VGPRRC-NEXT: v_mov_b32_e32 v2, s14
; VGPRRC-NEXT: v_mov_b32_e32 v3, s15
-; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_i32_32x32x32_i8:
@@ -3386,47 +3398,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4
; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; SDAG-NEXT: v_mov_b64_e32 v[0:1], 48
; SDAG-NEXT: v_mov_b64_e32 v[2:3], 32
-; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: v_mov_b32_e32 v8, s24
-; SDAG-NEXT: v_mov_b32_e32 v9, s25
-; SDAG-NEXT: v_mov_b32_e32 v10, s26
-; SDAG-NEXT: v_mov_b32_e32 v11, s27
-; SDAG-NEXT: v_accvgpr_write_b32 a0, s8
-; SDAG-NEXT: v_mov_b32_e32 v12, s28
-; SDAG-NEXT: v_mov_b32_e32 v13, s29
-; SDAG-NEXT: v_mov_b32_e32 v14, s30
-; SDAG-NEXT: v_mov_b32_e32 v15, s31
-; SDAG-NEXT: v_accvgpr_write_b32 a1, s9
-; SDAG-NEXT: v_accvgpr_write_b32 a2, s10
-; SDAG-NEXT: v_accvgpr_write_b32 a3, s11
-; SDAG-NEXT: v_accvgpr_write_b32 a4, s12
-; SDAG-NEXT: v_accvgpr_write_b32 a5, s13
-; SDAG-NEXT: v_accvgpr_write_b32 a6, s14
-; SDAG-NEXT: v_accvgpr_write_b32 a7, s15
-; SDAG-NEXT: v_accvgpr_write_b32 a8, s16
-; SDAG-NEXT: v_accvgpr_write_b32 a9, s17
-; SDAG-NEXT: v_accvgpr_write_b32 a10, s18
-; SDAG-NEXT: v_accvgpr_write_b32 a11, s19
-; SDAG-NEXT: v_accvgpr_write_b32 a12, s20
-; SDAG-NEXT: v_accvgpr_write_b32 a13, s21
-; SDAG-NEXT: v_accvgpr_write_b32 a14, s22
-; SDAG-NEXT: v_accvgpr_write_b32 a15, s23
+; SDAG-NEXT: v_mov_b32_e32 v4, s24
+; SDAG-NEXT: v_mov_b32_e32 v5, s25
+; SDAG-NEXT: v_mov_b32_e32 v6, s26
+; SDAG-NEXT: v_mov_b32_e32 v7, s27
+; SDAG-NEXT: v_accvgpr_write_b32 a31, s23
+; SDAG-NEXT: v_mov_b32_e32 v8, s28
+; SDAG-NEXT: v_mov_b32_e32 v9, s29
+; SDAG-NEXT: v_mov_b32_e32 v10, s30
+; SDAG-NEXT: v_mov_b32_e32 v11, s31
+; SDAG-NEXT: v_accvgpr_write_b32 a30, s22
+; SDAG-NEXT: v_accvgpr_write_b32 a29, s21
+; SDAG-NEXT: v_accvgpr_write_b32 a28, s20
+; SDAG-NEXT: v_accvgpr_write_b32 a27, s19
+; SDAG-NEXT: v_accvgpr_write_b32 a26, s18
+; SDAG-NEXT: v_accvgpr_write_b32 a25, s17
+; SDAG-NEXT: v_accvgpr_write_b32 a24, s16
+; SDAG-NEXT: v_accvgpr_write_b32 a23, s15
+; SDAG-NEXT: v_accvgpr_write_b32 a22, s14
+; SDAG-NEXT: v_accvgpr_write_b32 a21, s13
+; SDAG-NEXT: v_accvgpr_write_b32 a20, s12
+; SDAG-NEXT: v_accvgpr_write_b32 a19, s11
+; SDAG-NEXT: v_accvgpr_write_b32 a18, s10
+; SDAG-NEXT: v_accvgpr_write_b32 a17, s9
+; SDAG-NEXT: v_accvgpr_write_b32 a16, s8
+; SDAG-NEXT: s_nop 1
+; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31] cbsz:2 abid:3 blgp:1
+; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16
; SDAG-NEXT: v_mov_b64_e32 v[6:7], 0
-; SDAG-NEXT: s_nop 0
-; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1
; SDAG-NEXT: v_mov_b32_e32 v8, s16
; SDAG-NEXT: v_mov_b32_e32 v9, s17
; SDAG-NEXT: v_mov_b32_e32 v10, s18
; SDAG-NEXT: v_mov_b32_e32 v11, s19
-; SDAG-NEXT: s_nop 7
-; SDAG-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
+; SDAG-NEXT: s_nop 5
+; SDAG-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
+; SDAG-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1
; SDAG-NEXT: s_waitcnt vmcnt(0)
@@ -3455,9 +3467,9 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4
; GISEL: ; %bb.0:
; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24
; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
-; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0
-; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48
-; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16
+; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0
+; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16
+; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27]
@@ -3480,31 +3492,34 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4
; GISEL-NEXT: v_accvgpr_write_b32 a14, s22
; GISEL-NEXT: v_accvgpr_write_b32 a15, s23
; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9]
-; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48
; GISEL-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1
-; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
-; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17]
-; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
-; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15]
; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
-; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
-; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19]
-; GISEL-NEXT: s_nop 4
-; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 7
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1
+; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19]
+; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
-; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1
+; GISEL-NEXT: s_nop 0
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23]
+; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_endpgm
;
@@ -3514,47 +3529,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4
; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; HEURRC-NEXT: v_mov_b64_e32 v[0:1], 48
; HEURRC-NEXT: v_mov_b64_e32 v[2:3], 32
-; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16
; HEURRC-NEXT: s_waitcnt lgkmcnt(0)
-; HEURRC-NEXT: v_mov_b32_e32 v8, s24
-; HEURRC-NEXT: v_mov_b32_e32 v9, s25
-; HEURRC-NEXT: v_mov_b32_e32 v10, s26
-; HEURRC-NEXT: v_mov_b32_e32 v11, s27
-; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8
-; HEURRC-NEXT: v_mov_b32_e32 v12, s28
-; HEURRC-NEXT: v_mov_b32_e32 v13, s29
-; HEURRC-NEXT: v_mov_b32_e32 v14, s30
-; HEURRC-NEXT: v_mov_b32_e32 v15, s31
-; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9
-; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10
-; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11
-; HEURRC-NEXT: v_accvgpr_write_b32 a4, s12
-; HEURRC-NEXT: v_accvgpr_write_b32 a5, s13
-; HEURRC-NEXT: v_accvgpr_write_b32 a6, s14
-; HEURRC-NEXT: v_accvgpr_write_b32 a7, s15
-; HEURRC-NEXT: v_accvgpr_write_b32 a8, s16
-; HEURRC-NEXT: v_accvgpr_write_b32 a9, s17
-; HEURRC-NEXT: v_accvgpr_write_b32 a10, s18
-; HEURRC-NEXT: v_accvgpr_write_b32 a11, s19
-; HEURRC-NEXT: v_accvgpr_write_b32 a12, s20
-; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21
-; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22
-; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23
+; HEURRC-NEXT: v_mov_b32_e32 v4, s24
+; HEURRC-NEXT: v_mov_b32_e32 v5, s25
+; HEURRC-NEXT: v_mov_b32_e32 v6, s26
+; HEURRC-NEXT: v_mov_b32_e32 v7, s27
+; HEURRC-NEXT: v_accvgpr_write_b32 a31, s23
+; HEURRC-NEXT: v_mov_b32_e32 v8, s28
+; HEURRC-NEXT: v_mov_b32_e32 v9, s29
+; HEURRC-NEXT: v_mov_b32_e32 v10, s30
+; HEURRC-NEXT: v_mov_b32_e32 v11, s31
+; HEURRC-NEXT: v_accvgpr_write_b32 a30, s22
+; HEURRC-NEXT: v_accvgpr_write_b32 a29, s21
+; HEURRC-NEXT: v_accvgpr_write_b32 a28, s20
+; HEURRC-NEXT: v_accvgpr_write_b32 a27, s19
+; HEURRC-NEXT: v_accvgpr_write_b32 a26, s18
+; HEURRC-NEXT: v_accvgpr_write_b32 a25, s17
+; HEURRC-NEXT: v_accvgpr_write_b32 a24, s16
+; HEURRC-NEXT: v_accvgpr_write_b32 a23, s15
+; HEURRC-NEXT: v_accvgpr_write_b32 a22, s14
+; HEURRC-NEXT: v_accvgpr_write_b32 a21, s13
+; HEURRC-NEXT: v_accvgpr_write_b32 a20, s12
+; HEURRC-NEXT: v_accvgpr_write_b32 a19, s11
+; HEURRC-NEXT: v_accvgpr_write_b32 a18, s10
+; HEURRC-NEXT: v_accvgpr_write_b32 a17, s9
+; HEURRC-NEXT: v_accvgpr_write_b32 a16, s8
+; HEURRC-NEXT: s_nop 1
+; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31] cbsz:2 abid:3 blgp:1
+; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16
; HEURRC-NEXT: v_mov_b64_e32 v[6:7], 0
-; HEURRC-NEXT: s_nop 0
-; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1
; HEURRC-NEXT: v_mov_b32_e32 v8, s16
; HEURRC-NEXT: v_mov_b32_e32 v9, s17
; HEURRC-NEXT: v_mov_b32_e32 v10, s18
; HEURRC-NEXT: v_mov_b32_e32 v11, s19
-; HEURRC-NEXT: s_nop 7
-; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1
+; HEURRC-NEXT: s_nop 5
+; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
-; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1
+; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
; HEURRC-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1
; HEURRC-NEXT: s_waitcnt vmcnt(0)
@@ -3585,37 +3600,40 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4
; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64
; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], 48
; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], 32
-; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], 16
; VGPRRC-NEXT: s_waitcnt lgkmcnt(0)
-; VGPRRC-NEXT: v_mov_b32_e32 v40, s24
-; VGPRRC-NEXT: v_mov_b32_e32 v41, s25
-; VGPRRC-NEXT: v_mov_b32_e32 v42, s26
-; VGPRRC-NEXT: v_mov_b32_e32 v43, s27
-; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; VGPRRC-NEXT: v_mov_b32_e32 v44, s28
-; VGPRRC-NEXT: v_mov_b32_e32 v45, s29
-; VGPRRC-NEXT: v_mov_b32_e32 v46, s30
-; VGPRRC-NEXT: v_mov_b32_e32 v47, s31
-; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
-; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
-; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
-; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[16:17]
-; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19]
-; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21]
-; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
-; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], 0
-; VGPRRC-NEXT: s_nop 0
-; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[16:31], v[40:43], v[44:47], v[0:15] cbsz:2 abid:3 blgp:1
+; VGPRRC-NEXT: v_mov_b32_e32 v36, s24
+; VGPRRC-NEXT: v_mov_b32_e32 v37, s25
+; VGPRRC-NEXT: v_mov_b32_e32 v38, s26
+; VGPRRC-NEXT: v_mov_b32_e32 v39, s27
+; VGPRRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23]
+; VGPRRC-NEXT: v_mov_b32_e32 v40, s28
+; VGPRRC-NEXT: v_mov_b32_e32 v41, s29
+; VGPRRC-NEXT: v_mov_b32_e32 v42, s30
+; VGPRRC-NEXT: v_mov_b32_e32 v43, s31
+; VGPRRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21]
+; VGPRRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19]
+; VGPRRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17]
+; VGPRRC-NEXT: v_mov_b64_e32 v[22:23], s[14:15]
+; VGPRRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
+; VGPRRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
+; VGPRRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9]
+; VGPRRC-NEXT: s_nop 1
+; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[0:15], v[36:39], v[40:43], v[16:31] cbsz:2 abid:3 blgp:1
; VGPRRC-NEXT: s_nop 7
; VGPRRC-NEXT: s_nop 3
-; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[28:31], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[12:15], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[24:27], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[8:11], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[20:23], off sc0 sc1
+; VGPRRC-NEXT: s_nop 0
+; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], 16
+; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[4:7], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
-; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[16:19], off sc0 sc1
+; VGPRRC-NEXT: s_nop 0
+; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], 0
+; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
+; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s16
; VGPRRC-NEXT: v_mov_b32_e32 v1, s17
; VGPRRC-NEXT: v_mov_b32_e32 v2, s18
@@ -3634,14 +3652,14 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4
; VGPRRC-NEXT: v_mov_b32_e32 v1, s9
; VGPRRC-NEXT: v_mov_b32_e32 v2, s10
; VGPRRC-NEXT: v_mov_b32_e32 v3, s11
-; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_nop 0
; VGPRRC-NEXT: v_mov_b32_e32 v0, s12
; VGPRRC-NEXT: v_mov_b32_e32 v1, s13
; VGPRRC-NEXT: v_mov_b32_e32 v2, s14
; VGPRRC-NEXT: v_mov_b32_e32 v3, s15
-; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[0:3], off sc0 sc1
+; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1
; VGPRRC-NEXT: s_waitcnt vmcnt(0)
; VGPRRC-NEXT: s_endpgm
; AGPR-LABEL: test_mfma_i32_32x32x32_i8__flags:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll
index 0b2818f38149d..24af3fa5ff9b7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll
@@ -4784,8 +4784,8 @@ define amdgpu_kernel void @test_mfma_scale_f32_32x32x64_f8f6f4_0_0__nonmac(<8 x
; GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0x0
; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x40
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x80
+; GISEL-NEXT: v_mov_b64_e32 v[16:17], 0
; GISEL-NEXT: v_mov_b64_e32 v[18:19], 16
-; GISEL-NEXT: v_mov_b64_e32 v[20:21], 32
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[36:37]
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[38:39]
@@ -4811,16 +4811,16 @@ define amdgpu_kernel void @test_mfma_scale_f32_32x32x64_f8f6f4_0_0__nonmac(<8 x
; GISEL-NEXT: v_accvgpr_write_b32 a13, s21
; GISEL-NEXT: v_accvgpr_write_b32 a14, s22
; GISEL-NEXT: v_accvgpr_write_b32 a15, s23
-; GISEL-NEXT: v_mov_b32_e32 v16, s1
+; GISEL-NEXT: v_mov_b32_e32 v20, s1
; GISEL-NEXT: v_mov_b64_e32 v[22:23], 48
; GISEL-NEXT: s_nop 0
-; GISEL-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], s0, v16 op_sel_hi:[0,0,0]
+; GISEL-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], s0, v20 op_sel_hi:[0,0,0]
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
-; GISEL-NEXT: v_mov_b64_e32 v[16:17], 0
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[16:17]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[20:21]
+; GISEL-NEXT: v_mov_b64_e32 v[20:21], 32
; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[14:15]
; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[18:19]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[22:23]
>From 2e19b422c5659d86710e8f983b04e583f7553917 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Fri, 22 Aug 2025 12:17:41 +0900
Subject: [PATCH 5/5] Simply immediate handling
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index df638bd65bdaa..b59acdd5de57a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2137,15 +2137,15 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
Register Dst = MI.getOperand(0).getReg();
if (SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst))) {
- uint64_t Imm = static_cast<uint64_t>(MI.getOperand(1).getImm());
+ int64_t Imm = MI.getOperand(1).getImm();
Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstLo)
- .addImm(SignExtend64<32>(Lo_32(Imm)))
+ .addImm(SignExtend64<32>(Imm))
.addReg(Dst, RegState::Implicit | RegState::Define);
BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstHi)
- .addImm(SignExtend64<32>(Hi_32(Imm)))
+ .addImm(SignExtend64<32>(Imm >> 32))
.addReg(Dst, RegState::Implicit | RegState::Define);
MI.eraseFromParent();
break;
More information about the llvm-commits
mailing list