[llvm] AMDGPU: Add pseudoinstruction for 64-bit agpr or vgpr constants (PR #154499)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 20 05:27:27 PDT 2025


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/154499

>From 9d9396c0554d6d755312628e521a357e145707a1 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 12:05:02 +0900
Subject: [PATCH 1/3] AMDGPU: Add pseudoinstruction for 64-bit agpr or vgpr
 constants

64-bit version of 7425af4b7aaa31da10bd1bc7996d3bb212c79d88. We
still need to lower to 32-bit v_accagpr_write_b32s, so this has
a unique value restriction that requires both halves of the constant
to be 32-bit inline immediates. This only introduces the new
pseudo definitions, but doesn't try to use them yet.
---
 llvm/lib/Target/AMDGPU/SIDefines.h            |   4 +
 llvm/lib/Target/AMDGPU/SIInstrInfo.cpp        |  48 +++++++
 llvm/lib/Target/AMDGPU/SIInstrInfo.h          |   3 +
 llvm/lib/Target/AMDGPU/SIInstrInfo.td         |   7 +
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  20 +++
 .../AMDGPU/amdgpu-prepare-agpr-alloc.mir      | 112 +++++++++++++++
 .../AMDGPU/av_movimm_pseudo_expansion.mir     | 136 +++++++++++++++++-
 llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir    | 107 ++++++++++++++
 .../CodeGen/AMDGPU/inflate-av-remat-imm.mir   |  57 ++++++++
 .../test/CodeGen/AMDGPU/peephole-fold-imm.mir |  39 +++++
 llvm/test/CodeGen/AMDGPU/vgpr-remat.mir       |  45 ++++++
 .../AMDGPU/av_mov_b64_imm_pseudo.mir          |  25 ++++
 12 files changed, 601 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir

diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 7c019031ff249..268b153c6c924 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -243,6 +243,10 @@ enum OperandType : unsigned {
   // Operand for SDWA instructions
   OPERAND_SDWA_VOPC_DST,
 
+  // Operand for AV_MOV_B64_IMM_PSEUDO, which is a pair of 32-bit inline
+  // constants.
+  OPERAND_INLINE_C_AV64_PSEUDO,
+
   OPERAND_REG_IMM_FIRST = OPERAND_REG_IMM_INT32,
   OPERAND_REG_IMM_LAST = OPERAND_REG_IMM_V2FP32,
 
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index cc4bee0f1f454..98ff9270391d2 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1349,6 +1349,7 @@ bool SIInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
   case AMDGPU::V_MOV_B64_e32:
   case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
   case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
+  case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
   case AMDGPU::S_MOV_B64_IMM_PSEUDO:
   case AMDGPU::V_MOV_B64_PSEUDO: {
     const MachineOperand &Src0 = MI.getOperand(1);
@@ -2133,6 +2134,25 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
         get(IsAGPR ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::V_MOV_B32_e32));
     break;
   }
+  case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
+    Register Dst = MI.getOperand(0).getReg();
+    if (SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst))) {
+      uint64_t Imm = static_cast<uint64_t>(MI.getOperand(1).getImm());
+
+      Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
+      Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
+      BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstLo)
+          .addImm(SignExtend64<32>(Lo_32(Imm)))
+          .addReg(Dst, RegState::Implicit | RegState::Define);
+      BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstHi)
+          .addImm(SignExtend64<32>(Hi_32(Imm)))
+          .addReg(Dst, RegState::Implicit | RegState::Define);
+      MI.eraseFromParent();
+      break;
+    }
+
+    [[fallthrough]];
+  }
   case AMDGPU::V_MOV_B64_PSEUDO: {
     Register Dst = MI.getOperand(0).getReg();
     Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
@@ -3425,6 +3445,11 @@ bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
   case AMDGPU::V_ACCVGPR_MOV_B32:
   case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
     return true;
+  case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
+    // TODO: We could fold this, but it's a strange case. The immediate value
+    // can't be directly folded into any real use. We would have to spread new
+    // immediate legality checks around and only accept subregister extracts for
+    // profitability.
   default:
     return false;
   }
@@ -4471,6 +4496,8 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
   case AMDGPU::OPERAND_KIMM16:
   case AMDGPU::OPERAND_KIMM64:
     return false;
+  case AMDGPU::OPERAND_INLINE_C_AV64_PSEUDO:
+    return isLegalAV64PseudoImm(Imm);
   case AMDGPU::OPERAND_INPUT_MODS:
   case MCOI::OPERAND_IMMEDIATE:
     // Always embedded in the instruction for free.
@@ -4536,6 +4563,12 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
   return ST.hasVOP3Literal();
 }
 
+bool SIInstrInfo::isLegalAV64PseudoImm(uint64_t Imm) const {
+  // 2 32-bit inline constants packed into one.
+  return AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) &&
+         AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm());
+}
+
 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
   // GFX90A does not have V_MUL_LEGACY_F32_e32.
   if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
@@ -4896,6 +4929,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
     case MCOI::OPERAND_IMMEDIATE:
     case AMDGPU::OPERAND_KIMM32:
     case AMDGPU::OPERAND_KIMM64:
+    case AMDGPU::OPERAND_INLINE_C_AV64_PSEUDO:
       // Check if this operand is an immediate.
       // FrameIndex operands will be replaced by immediates, so they are
       // allowed.
@@ -5518,6 +5552,20 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
     }
   }
 
+#if 0
+  if (Opcode == AMDGPU::AV_MOV_B64_IMM_PSEUDO) {
+    const MachineOperand &SrcOp = MI.getOperand(1);
+    uint64_t Imm = static_cast<uint64_t>(SrcOp.getImm());
+
+    if (!AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) ||
+        !AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm())) {
+      ErrInfo = "AV_MOV_B64_IMM_PSEUDO only accepts a pair of 32-bit inline "
+                "immediates";
+      return false;
+    }
+  }
+#endif
+
   if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
     const MachineOperand &SrcOp = MI.getOperand(1);
     if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 12ffae78f7b2c..f7c7bb509c9ef 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1183,6 +1183,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
                          const MachineOperand &MO) const;
 
+  /// Check if this immediate value can be used for AV_MOV_B64_IMM_PSEUDO.
+  bool isLegalAV64PseudoImm(uint64_t Imm) const;
+
   /// Return true if this 64-bit VALU instruction has a 32-bit encoding.
   /// This function will return false if you pass it a 32-bit instruction.
   bool hasVALU32BitEncoding(unsigned Opcode) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index c425d9753dd1e..0374526e35c44 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1068,6 +1068,13 @@ def SplitBarrier : ImmOperand<i32> {
   let PrintMethod = "printOperand";
 }
 
+// Pseudo-operand type. This is a pair of 32-bit inline constants
+// packed into a single 64-bit value.
+def AV_64_PSEUDO_IMM : Operand<i64> {
+  let OperandNamespace = "AMDGPU";
+  let OperandType = "OPERAND_INLINE_C_AV64_PSEUDO";
+}
+
 def VReg32OrOffClass : AsmOperandClass {
   let Name = "VReg32OrOff";
   let ParserMethod = "parseVReg32OrOff";
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d3c15bd8f672a..13dc68157197f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -160,6 +160,26 @@ def AV_MOV_B32_IMM_PSEUDO
   let UseNamedOperandTable = 1;
 }
 
+// 64-bit materialize immediate which supports AGPR or VGPR. This has
+// an unusual operand restriction which requires the two halves of the
+// immediate to each be 32-bit inline immediate values.
+//
+// FIXME: This unnecessarily has the even aligned vector register
+// requirement applied.
+def AV_MOV_B64_IMM_PSEUDO
+    : VPseudoInstSI<(outs AV_64:$vdst), (ins AV_64_PSEUDO_IMM:$src0)> {
+  let isReMaterializable = 1;
+  let isAsCheapAsAMove = 1;
+
+  // Imprecise, technically if AGPR it's VOP3 and VOP1 for AGPR. But
+  // this tricks the rematerialize logic into working for it.
+  let VOP3 = 1;
+  let isMoveImm = 1;
+  let SchedRW = [Write32Bit, Write32Bit];
+  let Size = 16; // 2 x v_accwrite_write_b32 in the worst case
+  let UseNamedOperandTable = 1;
+}
+
 // 64-bit vector move with dpp. Expanded post-RA.
 def V_MOV_B64_DPP_PSEUDO : VOP_DPP_Pseudo <"v_mov_b64_dpp", VOP_I64_I64> {
   let Size = 16; // Requires two 8-byte v_mov_b32_dpp to complete.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
index 69bdb1f5066f0..d277c8104fe44 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir
@@ -13,6 +13,14 @@
     ret void
   }
 
+  define void @func64() {
+    ret void
+  }
+
+  define void @func64_no_agprs() "amdgpu-agpr-alloc"="0,0" {
+    ret void
+  }
+
 ...
 ---
 name: func
@@ -93,3 +101,107 @@ body:             |
     %1:agpr_32 = V_ACCVGPR_WRITE_B32_e64 2, implicit $exec
 
 ...
+
+---
+name: func64
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 4 }
+body:             |
+  ; HAS-AGPR-LABEL: name: func64
+  ; HAS-AGPR: bb.0:
+  ; HAS-AGPR-NEXT:   successors: %bb.1(0x80000000)
+  ; HAS-AGPR-NEXT:   liveins: $vgpr0_vgpr1
+  ; HAS-AGPR-NEXT: {{  $}}
+  ; HAS-AGPR-NEXT:   [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+  ; HAS-AGPR-NEXT:   [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+  ; HAS-AGPR-NEXT: {{  $}}
+  ; HAS-AGPR-NEXT: bb.1:
+  ; HAS-AGPR-NEXT:   [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+  ;
+  ; NO-AGPR-LABEL: name: func64
+  ; NO-AGPR: bb.0:
+  ; NO-AGPR-NEXT:   successors: %bb.1(0x80000000)
+  ; NO-AGPR-NEXT:   liveins: $vgpr0_vgpr1
+  ; NO-AGPR-NEXT: {{  $}}
+  ; NO-AGPR-NEXT:   [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+  ; NO-AGPR-NEXT:   [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+  ; NO-AGPR-NEXT: {{  $}}
+  ; NO-AGPR-NEXT: bb.1:
+  ; NO-AGPR-NEXT:   [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+  bb.0:
+    liveins: $vgpr0_vgpr1
+    %0:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
+    %1:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec
+    %2:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+    %3:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec
+    %4:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec
+    %5:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec
+    %6:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec
+    %7:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec
+    %8:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+    %9:vreg_64_align2 = V_MOV_B64_PSEUDO 9223372036854775808, implicit $exec
+    %10:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+    %11:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+    %12:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec
+    %13:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec
+
+  bb.1:
+    %14:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec
+
+...
+
+---
+name: func64_no_agprs
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+    ; HAS-AGPR-LABEL: name: func64_no_agprs
+    ; HAS-AGPR: liveins: $vgpr0
+    ; HAS-AGPR-NEXT: {{  $}}
+    ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+    ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+    ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+    ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+    ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+    ;
+    ; NO-AGPR-LABEL: name: func64_no_agprs
+    ; NO-AGPR: liveins: $vgpr0
+    ; NO-AGPR-NEXT: {{  $}}
+    ; NO-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+    ; NO-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+    ; NO-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec
+    ; NO-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+    ; NO-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+    %0:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec
+    %1:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec
+    %2:vreg_64_align2 = V_MOV_B64_PSEUDO 9223372036854775808, implicit $exec
+    %3:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec
+    %4:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir b/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir
index 1f9d49073da2c..272997cf1a347 100644
--- a/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir
+++ b/llvm/test/CodeGen/AMDGPU/av_movimm_pseudo_expansion.mir
@@ -1,6 +1,7 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass=postrapseudos %s -o - | FileCheck %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx942 -run-pass=postrapseudos %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -run-pass=postrapseudos %s -o - | FileCheck -check-prefixes=CHECK,GFX908 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass=postrapseudos %s -o - | FileCheck -check-prefixes=CHECK,GFX90A %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx942 -run-pass=postrapseudos %s -o - | FileCheck -check-prefixes=CHECK,GFX942 %s
 
 ---
 name: av_mov_b32_imm_pseudo_agpr_0
@@ -54,3 +55,134 @@ body: |
     ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec
     $agpr1 = AV_MOV_B32_IMM_PSEUDO $vgpr0, implicit $exec
 ...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_0
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_0
+    ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+    $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_neg1
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_neg1
+    ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 -1, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 -1, implicit $exec, implicit-def $agpr0_agpr1
+    $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO -1, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_64
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_64
+    ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 64, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+    $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_vgpr_0
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GFX908-LABEL: name: av_mov_b64_imm_pseudo_vgpr_0
+    ; GFX908: $vgpr0 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ;
+    ; GFX90A-LABEL: name: av_mov_b64_imm_pseudo_vgpr_0
+    ; GFX90A: $vgpr0_vgpr1 = V_PK_MOV_B32 8, 0, 8, 0, 0, 0, 0, 0, 0, implicit $exec
+    ;
+    ; GFX942-LABEL: name: av_mov_b64_imm_pseudo_vgpr_0
+    ; GFX942: $vgpr0_vgpr1 = V_MOV_B64_e32 0, implicit $exec
+    $vgpr0_vgpr1 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_vgpr_64
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GFX908-LABEL: name: av_mov_b64_imm_pseudo_vgpr_64
+    ; GFX908: $vgpr0 = V_MOV_B32_e32 64, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ;
+    ; GFX90A-LABEL: name: av_mov_b64_imm_pseudo_vgpr_64
+    ; GFX90A: $vgpr0 = V_MOV_B32_e32 64, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX90A-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ;
+    ; GFX942-LABEL: name: av_mov_b64_imm_pseudo_vgpr_64
+    ; GFX942: $vgpr0_vgpr1 = V_MOV_B64_e32 64, implicit $exec
+    $vgpr0_vgpr1 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_64_hi_0_lo
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_64_hi_0_lo
+    ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 64, implicit $exec, implicit-def $agpr0_agpr1
+    $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 274877906944, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_64_hi_2_lo
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_64_hi_2_lo
+    ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 2, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 64, implicit $exec, implicit-def $agpr0_agpr1
+    $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 274877906946, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_agpr_neg16_hi_9_lo
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: av_mov_b64_imm_pseudo_agpr_neg16_hi_9_lo
+    ; CHECK: $agpr0 = V_ACCVGPR_WRITE_B32_e64 9, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 -16, implicit $exec, implicit-def $agpr0_agpr1
+    $agpr0_agpr1 = AV_MOV_B64_IMM_PSEUDO 18446744004990074889, implicit $exec
+...
+
+---
+name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GFX908-LABEL: name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+    ; GFX908: $vgpr0 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX908-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr2_vgpr3
+    ; GFX908-NEXT: $vgpr3 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr2_vgpr3
+    ; GFX908-NEXT: $vgpr4 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr4_vgpr5
+    ; GFX908-NEXT: $vgpr5 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr4_vgpr5
+    ;
+    ; GFX90A-LABEL: name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+    ; GFX90A: $vgpr0 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX90A-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; GFX90A-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr2_vgpr3
+    ; GFX90A-NEXT: $vgpr3 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr2_vgpr3
+    ; GFX90A-NEXT: $vgpr4_vgpr5 = V_PK_MOV_B32 8, 1042479491, 8, 1042479491, 0, 0, 0, 0, 0, implicit $exec
+    ;
+    ; GFX942-LABEL: name: av_mov_b64_imm_pseudo_vgpr_inv2pi
+    ; GFX942: $vgpr0_vgpr1 = V_MOV_B64_e32 1042479491, implicit $exec
+    ; GFX942-NEXT: $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr2_vgpr3
+    ; GFX942-NEXT: $vgpr3 = V_MOV_B32_e32 1042479491, implicit $exec, implicit-def $vgpr2_vgpr3
+    ; GFX942-NEXT: $vgpr4_vgpr5 = V_PK_MOV_B32 8, 1042479491, 8, 1042479491, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr0_vgpr1 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec
+    $vgpr2_vgpr3 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec
+    $vgpr4_vgpr5 = AV_MOV_B64_IMM_PSEUDO 4477415321638205827, implicit $exec
+...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
index 74c4a2da50221..93c67ac18f769 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
@@ -781,3 +781,110 @@ body:             |
     S_ENDPGM 0
 
 ...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_agpr
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_agpr
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    ; GCN-NEXT: $agpr0_agpr1 = COPY [[AV_MOV_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    $agpr0_agpr1 = COPY %0
+    S_ENDPGM 0, implicit $agpr0_agpr1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_vgpr
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_physreg_vgpr
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[AV_MOV_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit $vgpr0_vgpr1
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    $vgpr0_vgpr1 = COPY %0
+    S_ENDPGM 0, implicit $vgpr0_vgpr1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_agpr
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_agpr
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64 = COPY [[AV_MOV_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    %1:areg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+# Splat value across 2 halves of register
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_0
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_0
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY [[AV_MOV_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    %1:vreg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+# Low and hi are different inline constants
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY [[AV_MOV_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+    %1:vreg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub0
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub0
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]].sub0
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+    %1:vgpr_32 = COPY %0.sub0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub1
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: av_mov_b64_imm_pseudo_copy_av_64_to_virtreg_vgpr_nonsplat_value_copy_sub1
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]].sub1
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 274877906961, implicit $exec
+    %1:vgpr_32 = COPY %0.sub1
+    S_ENDPGM 0, implicit %1
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir b/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir
index c34c9749d553a..4d8fb8db624f8 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-av-remat-imm.mir
@@ -105,3 +105,60 @@ body:             |
 
 ...
 
+---
+name: av_mov_b64_split
+tracksRegLiveness: true
+machineFunctionInfo:
+  isEntryFunction: true
+  scratchRSrcReg:  '$sgpr72_sgpr73_sgpr74_sgpr75'
+  stackPtrOffsetReg: '$sgpr32'
+  occupancy:       7
+body:             |
+  bb.0:
+    liveins: $vgpr0, $sgpr4_sgpr5
+
+    ; CHECK-LABEL: name: av_mov_b64_split
+    ; CHECK: liveins: $agpr6, $agpr7, $agpr8, $agpr9, $vgpr0, $sgpr4_sgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr0_agpr1
+    ; CHECK-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 1, implicit $exec, implicit-def $agpr2_agpr3
+    ; CHECK-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr2_agpr3
+    ; CHECK-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 2, implicit $exec, implicit-def $agpr4_agpr5
+    ; CHECK-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec, implicit-def $agpr4_agpr5
+    ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 3, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; CHECK-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1
+    ; CHECK-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit killed $vgpr0_vgpr1
+    ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 4, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; CHECK-NEXT: $agpr9 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1
+    ; CHECK-NEXT: $agpr8 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit killed $vgpr0_vgpr1
+    ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1
+    ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr2_agpr3
+    ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr4_agpr5
+    ; CHECK-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr7, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; CHECK-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr6, implicit $exec, implicit $vgpr0_vgpr1
+    ; CHECK-NEXT: S_NOP 0, implicit killed renamable $vgpr0_vgpr1
+    ; CHECK-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr9, implicit $exec, implicit-def $vgpr0_vgpr1
+    ; CHECK-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr8, implicit $exec, implicit $vgpr0_vgpr1
+    ; CHECK-NEXT: S_NOP 0, implicit killed renamable $vgpr0_vgpr1
+    %0:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    %1:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec
+    %2:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 2, implicit $exec
+    %3:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 3, implicit $exec
+    %4:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4, implicit $exec
+
+    %5:areg_64_align2 = COPY %0
+    %6:areg_64_align2 = COPY %1
+    %7:areg_64_align2 = COPY %2
+    %8:areg_64_align2 = COPY %3
+    %9:areg_64_align2 = COPY %4
+
+    S_NOP 0, implicit %5
+    S_NOP 0, implicit %6
+    S_NOP 0, implicit %7
+    S_NOP 0, implicit %8
+    S_NOP 0, implicit %9
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir b/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
index 764a1e1090181..770e7c048620d 100644
--- a/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
@@ -767,3 +767,42 @@ body:             |
     %1:av_32 = COPY killed %0
     SI_RETURN_TO_EPILOG implicit %1
 ...
+
+---
+name:            fold_av_mov_b64_imm_pseudo_inlineimm_to_vgpr
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: fold_av_mov_b64_imm_pseudo_inlineimm_to_vgpr
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+    ; GCN-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 64, implicit $exec
+    ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[V_MOV_B]]
+    %0:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+    %1:vreg_64_align2 = COPY killed %0
+    SI_RETURN_TO_EPILOG implicit %1
+...
+
+---
+name:            fold_av_mov_b64_imm_pseudo_inlineimm_to_agpr
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: fold_av_mov_b64_imm_pseudo_inlineimm_to_agpr
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64_align2 = COPY killed [[AV_MOV_]]
+    ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[COPY]]
+    %0:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+    %1:areg_64_align2 = COPY killed %0
+    SI_RETURN_TO_EPILOG implicit %1
+...
+
+---
+name:            fold_av_mov_b64_imm_pseudo_inlineimm_to_av
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: fold_av_mov_b64_imm_pseudo_inlineimm_to_av
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64_align2 = COPY killed [[AV_MOV_]]
+    ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[COPY]]
+    %0:av_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec
+    %1:av_64_align2 = COPY killed %0
+    SI_RETURN_TO_EPILOG implicit %1
+...
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir b/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir
index 9dad99f307c19..fa4461c1cc387 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-remat.mir
@@ -135,3 +135,48 @@ body: |
     S_ENDPGM 0, implicit %4
 
 ...
+
+---
+name: av_mov_imm_b64
+tracksRegLiveness: true
+body: |
+  ; CHECK-LABEL: name: av_mov_imm_b64
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $sgpr0_sgpr1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   undef [[AV_MOV_:%[0-9]+]].sub0_sub1:vreg_192 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+  ; CHECK-NEXT:   [[AV_MOV_:%[0-9]+]].sub2_sub3:vreg_192 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; CHECK-NEXT:   $exec = S_MOV_B64_term [[COPY]]
+  ; CHECK-NEXT:   S_CBRANCH_EXECZ %bb.2, implicit $exec
+  ; CHECK-NEXT:   S_BRANCH %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[AV_MOV_:%[0-9]+]].sub0_sub1:vreg_192 = V_MUL_F64_e64 0, [[AV_MOV_]].sub0_sub1, 0, [[AV_MOV_]].sub0_sub1, 0, 0, implicit $mode, implicit $exec
+  ; CHECK-NEXT:   [[AV_MOV_:%[0-9]+]].sub2_sub3:vreg_192 = V_MUL_F64_e64 0, [[AV_MOV_]].sub2_sub3, 0, [[AV_MOV_]].sub2_sub3, 0, 0, implicit $mode, implicit $exec
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   S_ENDPGM 0, implicit [[AV_MOV_]]
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    %0:av_64 = AV_MOV_B64_IMM_PSEUDO 0, implicit $exec
+    %1:vreg_64 = COPY %0
+    %2:vreg_64 = COPY %0
+    %3:sreg_64 = COPY $sgpr0_sgpr1
+    $exec = S_MOV_B64_term %3:sreg_64
+    S_CBRANCH_EXECZ %bb.2, implicit $exec
+    S_BRANCH %bb.1
+
+  bb.1:
+    %1:vreg_64 = V_MUL_F64_e64 0, %1:vreg_64, 0, %1:vreg_64, 0, 0, implicit $mode, implicit $exec
+    %2:vreg_64 = V_MUL_F64_e64 0, %2:vreg_64, 0, %2:vreg_64, 0, 0, implicit $mode, implicit $exec
+
+  bb.2:
+    undef %4.sub0_sub1:vreg_192 = COPY %1:vreg_64
+    %4.sub2_sub3:vreg_192 = COPY %2:vreg_64
+    S_ENDPGM 0, implicit %4
+
+...
diff --git a/llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir b/llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir
new file mode 100644
index 0000000000000..be3cf3de24d51
--- /dev/null
+++ b/llvm/test/MachineVerifier/AMDGPU/av_mov_b64_imm_pseudo.mir
@@ -0,0 +1,25 @@
+# RUN: not --crash llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -run-pass=none -filetype=null %s 2>&1 | FileCheck %s
+
+# FIXME: Value error messages are misleading.
+---
+name: test
+body: |
+  bb.0:
+    %0:av_64 = IMPLICIT_DEF
+
+    ; CHECK: *** Bad machine code: Expected immediate, but got non-immediate ***
+    %1:av_64 = AV_MOV_B64_IMM_PSEUDO %1, implicit $exec
+
+    ; Low half isn't inline imm
+    ; CHECK: *** Bad machine code: VOP3 instruction uses literal ***
+    %2:av_64 = AV_MOV_B64_IMM_PSEUDO 65, implicit $exec
+
+    ; High half isn't inline imm
+    ; CHECK: *** Bad machine code: VOP3 instruction uses literal ***
+    %2:av_64 = AV_MOV_B64_IMM_PSEUDO 279172874240, implicit $exec
+
+    ; Neither half is inline imm
+    ; CHECK: *** Bad machine code: VOP3 instruction uses literal ***
+    %3:av_64 = AV_MOV_B64_IMM_PSEUDO 279172874306, implicit $exec
+
+...

>From f8e31e5e0bfc711c9d2ed7308e77d03a3d08b88b Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 21:24:13 +0900
Subject: [PATCH 2/3] Remove dead code

---
 llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 98ff9270391d2..df638bd65bdaa 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -5552,20 +5552,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
     }
   }
 
-#if 0
-  if (Opcode == AMDGPU::AV_MOV_B64_IMM_PSEUDO) {
-    const MachineOperand &SrcOp = MI.getOperand(1);
-    uint64_t Imm = static_cast<uint64_t>(SrcOp.getImm());
-
-    if (!AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) ||
-        !AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm())) {
-      ErrInfo = "AV_MOV_B64_IMM_PSEUDO only accepts a pair of 32-bit inline "
-                "immediates";
-      return false;
-    }
-  }
-#endif
-
   if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
     const MachineOperand &SrcOp = MI.getOperand(1);
     if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {

>From 235a72573278b99167b33711996f8f801f61d2aa Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 20 Aug 2025 21:27:00 +0900
Subject: [PATCH 3/3] Fix comment

---
 llvm/lib/Target/AMDGPU/SIInstructions.td | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 13dc68157197f..2d019cb04c353 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -150,7 +150,7 @@ def AV_MOV_B32_IMM_PSEUDO
   let isReMaterializable = 1;
   let isAsCheapAsAMove = 1;
 
-  // Imprecise, technically if AGPR it's VOP3 and VOP1 for AGPR. But
+  // Imprecise, technically if AGPR it's VOP3 and VOP1 for VGPR. But
   // this tricks the rematerialize logic into working for it.
   let VOP3 = 1;
   let isMoveImm = 1;
@@ -171,8 +171,9 @@ def AV_MOV_B64_IMM_PSEUDO
   let isReMaterializable = 1;
   let isAsCheapAsAMove = 1;
 
-  // Imprecise, technically if AGPR it's VOP3 and VOP1 for AGPR. But
-  // this tricks the rematerialize logic into working for it.
+  // Imprecise, technically if AGPR it's 2 x VOP3 and 2 x VOP1 for
+  // VGPR. But this tricks the rematerialize logic into working for
+  // it.
   let VOP3 = 1;
   let isMoveImm = 1;
   let SchedRW = [Write32Bit, Write32Bit];



More information about the llvm-commits mailing list