[llvm] [AMDGPU] Fold multiple aligned v_mov_b32 to v_mov_b64 on gfx942 (PR #138843)

Janek van Oirschot via llvm-commits llvm-commits at lists.llvm.org
Mon May 19 04:51:39 PDT 2025


https://github.com/JanekvO updated https://github.com/llvm/llvm-project/pull/138843

>From 9ea79f3093f7827021e29d205ff5a0ad0530d3ac Mon Sep 17 00:00:00 2001
From: Janek van Oirschot <Janek.vanOirschot at amd.com>
Date: Wed, 7 May 2025 02:52:10 -0700
Subject: [PATCH 1/2] [AMDGPU] Fold multiple aligned v_mov_b32 to v_mov_b64 on
 gfx942

---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp     | 102 +++++++++++++++++-
 llvm/test/CodeGen/AMDGPU/flat-scratch.ll      |  12 +--
 .../CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll |  30 ++++--
 .../CodeGen/AMDGPU/masked-load-vectortypes.ll |  87 +++++++--------
 llvm/test/CodeGen/AMDGPU/mfma-loop.ll         |  65 +++++------
 .../si-fold-operands-subreg-imm-gfx942.mir    |  99 +++++++++++++++++
 llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll   |  25 ++---
 .../test/CodeGen/AMDGPU/vni8-across-blocks.ll |   5 +-
 8 files changed, 305 insertions(+), 120 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/si-fold-operands-subreg-imm-gfx942.mir

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 92937e33fd500..80d9977fbffd1 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -160,6 +160,7 @@ class SIFoldOperandsImpl {
   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
   bool tryFoldOMod(MachineInstr &MI);
   bool tryFoldRegSequence(MachineInstr &MI);
+  bool tryFoldImmRegSequence(MachineInstr &MI);
   bool tryFoldPhiAGPR(MachineInstr &MI);
   bool tryFoldLoad(MachineInstr &MI);
 
@@ -2189,6 +2190,99 @@ bool SIFoldOperandsImpl::tryFoldOMod(MachineInstr &MI) {
   return true;
 }
 
+// gfx942+ can use V_MOV_B64 for materializing constant immediates.
+// For example:
+// %0:vgpr_32 = V_MOV_B32 0, implicit $exec
+// %1:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
+//  ->
+// %1:vreg_64_align2 = V_MOV_B64_PSEUDO 0, implicit $exec
+bool SIFoldOperandsImpl::tryFoldImmRegSequence(MachineInstr &MI) {
+  assert(MI.isRegSequence());
+  auto Reg = MI.getOperand(0).getReg();
+  const TargetRegisterClass *DefRC = MRI->getRegClass(Reg);
+
+  if (!ST->hasMovB64() || !TRI->isVGPR(*MRI, Reg) ||
+      !MRI->hasOneNonDBGUse(Reg) || !TRI->isProperlyAlignedRC(*DefRC))
+    return false;
+
+  SmallVector<std::pair<MachineOperand *, unsigned>, 32> Defs;
+  if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER))
+    return false;
+
+  // Only attempting to fold immediate materializations.
+  if (!Defs.empty() &&
+      !std::all_of(Defs.begin(), Defs.end(),
+                   [](const std::pair<MachineOperand *, unsigned> &Op) {
+                     return Op.first->isImm();
+                   }))
+    return false;
+
+  SmallVector<uint64_t, 8> ImmVals;
+  uint64_t ImmVal = 0;
+  uint64_t ImmSize = 0;
+  for (unsigned i = 0; i < Defs.size(); ++i) {
+    auto &[Op, SubIdx] = Defs[i];
+    unsigned SubRegSize = TRI->getSubRegIdxSize(SubIdx);
+    unsigned Shift = (TRI->getChannelFromSubReg(SubIdx) % 2) * SubRegSize;
+    ImmSize += SubRegSize;
+    ImmVal |= Op->getImm() << Shift;
+
+    if (ImmSize > 64 || SubRegSize == 64)
+      return false;
+
+    if (ImmSize == 64) {
+      // Only 32 bit literals can be encoded.
+      if (!isUInt<32>(ImmVal))
+        return false;
+      ImmVals.push_back(ImmVal);
+      ImmVal = 0;
+      ImmSize = 0;
+    }
+  }
+
+  assert(ImmVals.size() > 0 &&
+         "REG_SEQUENCE should have at least 1 operand pair");
+
+  // Can only combine REG_SEQUENCE into one 64b immediate materialization mov.
+  if (DefRC == TRI->getVGPR64Class()) {
+    BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+            TII->get(AMDGPU::V_MOV_B64_PSEUDO), Reg)
+        .addImm(ImmVals[0]);
+    MI.eraseFromParent();
+    return true;
+  }
+
+  if (ImmVals.size() == 1)
+    return false;
+
+  // Can't bail from here on out: modifying the MI.
+
+  // Remove source operands.
+  for (unsigned i = MI.getNumOperands() - 1; i > 0; --i)
+    MI.removeOperand(i);
+
+  for (unsigned i = 0; i < ImmVals.size(); ++i) {
+    const TargetRegisterClass *RC = TRI->getVGPR64Class();
+    auto MovReg = MRI->createVirtualRegister(RC);
+    // Duplicate vmov imm materializations (e.g., splatted operands) should get
+    // combined by MachineCSE pass.
+    BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+            TII->get(AMDGPU::V_MOV_B64_PSEUDO), MovReg)
+        .addImm(ImmVals[i]);
+
+    // 2 subregs with no overlap (i.e., sub0_sub1, sub2_sub3, etc.).
+    unsigned SubReg64B =
+        SIRegisterInfo::getSubRegFromChannel(/*Channel=*/i * 2, /*SubRegs=*/2);
+
+    MI.addOperand(MachineOperand::CreateReg(MovReg, /*isDef=*/false));
+    MI.addOperand(MachineOperand::CreateImm(SubReg64B));
+  }
+
+  LLVM_DEBUG(dbgs() << "Folded into " << MI);
+
+  return true;
+}
+
 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
 // instruction which can take an agpr. So far that means a store.
 bool SIFoldOperandsImpl::tryFoldRegSequence(MachineInstr &MI) {
@@ -2618,9 +2712,11 @@ bool SIFoldOperandsImpl::run(MachineFunction &MF) {
         continue;
       }
 
-      if (MI.isRegSequence() && tryFoldRegSequence(MI)) {
-        Changed = true;
-        continue;
+      if (MI.isRegSequence()) {
+        if (tryFoldImmRegSequence(MI) || tryFoldRegSequence(MI)) {
+          Changed = true;
+          continue;
+        }
       }
 
       if (MI.isPHI() && tryFoldPhiAGPR(MI)) {
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index b5e579b78a59c..a43d9657cfb24 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -4139,8 +4139,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
 ; GFX942-LABEL: store_load_i64_aligned:
 ; GFX942:       ; %bb.0: ; %bb
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT:    v_mov_b32_e32 v2, 15
-; GFX942-NEXT:    v_mov_b32_e32 v3, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], 15
 ; GFX942-NEXT:    scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
@@ -4250,8 +4249,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
 ; GFX942-LABEL: store_load_i64_unaligned:
 ; GFX942:       ; %bb.0: ; %bb
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT:    v_mov_b32_e32 v2, 15
-; GFX942-NEXT:    v_mov_b32_e32 v3, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], 15
 ; GFX942-NEXT:    scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
@@ -5010,10 +5008,8 @@ define amdgpu_ps void @large_offset() {
 ;
 ; GFX942-LABEL: large_offset:
 ; GFX942:       ; %bb.0: ; %bb
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
 ; GFX942-NEXT:    scratch_store_dwordx4 off, v[0:3], off offset:3024 sc0 sc1
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    scratch_load_dwordx4 v[0:3], off, off offset:3024 sc0 sc1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll
index 352e5eecd7bfe..783f4d3fdaae9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx90a.ll
@@ -13,8 +13,10 @@ declare i32 @llvm.amdgcn.workitem.id.x()
 ; GCN-LABEL: {{^}}test_mfma_f32_32x32x4bf16_1k:
 ; GCN-DAG:     s_load_dwordx16
 ; GCN-DAG:     s_load_dwordx16
-; GCN-DAG:     v_mov_b32_e32 v[[TWO:[0-9]+]], 2
-; GCN-DAG:     v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX90A-DAG:  v_mov_b32_e32 v[[TWO:[0-9]+]], 2
+; GFX90A-DAG:  v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX942-DAG:  v_mov_b64_e32 v[[[ONE:[0-9]+]]:{{[0-9]+}}], 1
+; GFX942-DAG:  v_mov_b64_e32 v[[[TWO:[0-9]+]]:{{[0-9]+}}], 2
 ; GCN-COUNT-32: v_accvgpr_write_b32 a{{[0-9]+}}, s{{[0-9]+}}
 ; GFX90A:      v_mfma_f32_32x32x4bf16_1k a[{{[0-9]+:[0-9]+}}], v[[[ONE]]:{{[0-9]+}}], v[[[TWO]]:{{[0-9]+}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
 ; GFX942:      v_mfma_f32_32x32x4_2b_bf16 a[{{[0-9]+:[0-9]+}}], v[[[ONE]]:{{[0-9+]}}], v[[[TWO]]:{{[0-9+]}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
@@ -32,8 +34,10 @@ bb:
 
 ; GCN-LABEL: {{^}}test_mfma_f32_16x16x4bf16_1k:
 ; GCN-DAG:      s_load_dwordx16
-; GCN-DAG:      v_mov_b32_e32 v[[TWO:[0-9]+]], 2
-; GCN-DAG:      v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX90A-DAG:   v_mov_b32_e32 v[[TWO:[0-9]+]], 2
+; GFX90A-DAG:   v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX942-DAG:   v_mov_b64_e32 v[[[ONE:[0-9]+]]:{{[0-9]+}}], 1
+; GFX942-DAG:   v_mov_b64_e32 v[[[TWO:[0-9]+]]:{{[0-9]+}}], 2
 ; GCN-COUNT-16: v_accvgpr_write_b32 a{{[0-9]+}}, s{{[0-9]+}}
 ; GFX90A:       v_mfma_f32_16x16x4bf16_1k a[{{[0-9]+:[0-9]+}}], v[[[ONE]]:{{[0-9]+}}], v[[[TWO]]:{{[0-9]+}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
 ; GFX942:       v_mfma_f32_16x16x4_4b_bf16 a[{{[0-9]+:[0-9]+}}], v[[[ONE]]:{{[0-9+]}}], v[[[TWO]]:{{[0-9+]}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
@@ -51,8 +55,10 @@ bb:
 
 ; GCN-LABEL: {{^}}test_mfma_f32_4x4x4bf16_1k:
 ; GCN-DAG:     s_load_dwordx4
-; GCN-DAG:     v_mov_b32_e32 v[[TWO:[0-9]+]], 2
-; GCN-DAG:     v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX90A-DAG:  v_mov_b32_e32 v[[TWO:[0-9]+]], 2
+; GFX90A-DAG:  v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX942-DAG:  v_mov_b64_e32 v[[[ONE:[0-9]+]]:{{[0-9]+}}], 1
+; GFX942-DAG:  v_mov_b64_e32 v[[[TWO:[0-9]+]]:{{[0-9]+}}], 2
 ; GCN-COUNT-4: v_accvgpr_write_b32 a{{[0-9]+}}, s{{[0-9]+}}
 ; GFX90A:      v_mfma_f32_4x4x4bf16_1k [[RES:a\[[0-9]+:[0-9]+\]]], v[[[ONE]]:{{[0-9]+}}], v[[[TWO]]:{{[0-9]+}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
 ; GFX942:      v_mfma_f32_4x4x4_16b_bf16 [[RES:a\[[0-9]+:[0-9]+\]]], v[[[ONE]]:{{[0-9+]}}], v[[[TWO]]:{{[0-9+]}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
@@ -70,8 +76,10 @@ bb:
 
 ; GCN-LABEL: {{^}}test_mfma_f32_32x32x8bf16_1k:
 ; GCN-DAG:      s_load_dwordx16
-; GCN-DAG:      v_mov_b32_e32 v[[TWO:[0-9]+]], 2
-; GCN-DAG:      v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX90A-DAG:   v_mov_b32_e32 v[[TWO:[0-9]+]], 2
+; GFX90A-DAG:   v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX942-DAG:   v_mov_b64_e32 v[[[ONE:[0-9]+]]:{{[0-9]+}}], 1
+; GFX942-DAG:   v_mov_b64_e32 v[[[TWO:[0-9]+]]:{{[0-9]+}}], 2
 ; GCN-COUNT-16: v_accvgpr_write_b32 a{{[0-9]+}}, s{{[0-9]+}}
 ; GFX90A:       v_mfma_f32_32x32x8bf16_1k a[{{[0-9]+:[0-9]+}}], v[[[ONE]]:{{[0-9]+}}], v[[[TWO]]:{{[0-9]+}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
 ; GFX942:       v_mfma_f32_32x32x8_bf16 a[{{[0-9]+:[0-9]+}}], v[[[ONE]]:{{[0-9+]}}], v[[[TWO]]:{{[0-9+]}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
@@ -89,8 +97,10 @@ bb:
 
 ; GCN-LABEL: {{^}}test_mfma_f32_16x16x16bf16_1k:
 ; GCN-DAG:     s_load_dwordx4
-; GCN-DAG:     v_mov_b32_e32 v[[TWO:[0-9]+]], 2
-; GCN-DAG:     v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX90A-DAG:     v_mov_b32_e32 v[[TWO:[0-9]+]], 2
+; GFX90A-DAG:     v_mov_b32_e32 v[[ONE:[0-9]+]], 1
+; GFX942-DAG:   v_mov_b64_e32 v[[[ONE:[0-9]+]]:{{[0-9]+}}], 1
+; GFX942-DAG:   v_mov_b64_e32 v[[[TWO:[0-9]+]]:{{[0-9]+}}], 2
 ; GCN-COUNT-4: v_accvgpr_write_b32 a{{[0-9]+}}, s{{[0-9]+}}
 ; GFX90A:      v_mfma_f32_16x16x16bf16_1k [[RES:a\[[0-9]+:[0-9]+\]]], v[[[ONE]]:{{[0-9]+}}], v[[[TWO]]:{{[0-9]+}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
 ; GFX942:      v_mfma_f32_16x16x16_bf16 [[RES:a\[[0-9]+:[0-9]+\]]], v[[[ONE]]:{{[0-9+]}}], v[[[TWO]]:{{[0-9+]}}], a[{{[0-9]+:[0-9]+}}] cbsz:1 abid:2 blgp:3
diff --git a/llvm/test/CodeGen/AMDGPU/masked-load-vectortypes.ll b/llvm/test/CodeGen/AMDGPU/masked-load-vectortypes.ll
index 3b855a56a5abb..e076afca370ef 100644
--- a/llvm/test/CodeGen/AMDGPU/masked-load-vectortypes.ll
+++ b/llvm/test/CodeGen/AMDGPU/masked-load-vectortypes.ll
@@ -7,12 +7,12 @@ define <2 x i32> @uniform_masked_load_ptr1_mask_v2i32(ptr addrspace(1) inreg noc
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB0_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx2 v[0:1], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx2 v[0:1], v2, s[0:1]
 ; GFX942-NEXT:  .LBB0_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -30,14 +30,13 @@ define <4 x i32> @uniform_masked_load_ptr1_mask_v4i32(ptr addrspace(1) inreg noc
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB1_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v4, s[0:1]
 ; GFX942-NEXT:  .LBB1_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -55,14 +54,13 @@ define <4 x float> @uniform_masked_load_ptr1_mask_v4f32(ptr addrspace(1) inreg n
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB2_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v4, s[0:1]
 ; GFX942-NEXT:  .LBB2_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -80,20 +78,16 @@ define <8 x i32> @uniform_masked_load_ptr1_mask_v8i32(ptr addrspace(1) inreg noc
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
-; GFX942-NEXT:    v_mov_b32_e32 v4, v0
-; GFX942-NEXT:    v_mov_b32_e32 v5, v0
-; GFX942-NEXT:    v_mov_b32_e32 v6, v0
-; GFX942-NEXT:    v_mov_b32_e32 v7, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v8, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
+; GFX942-NEXT:    v_mov_b64_e32 v[4:5], v[0:1]
+; GFX942-NEXT:    v_mov_b64_e32 v[6:7], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB3_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[4:7], v0, s[0:1] offset:16
-; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[4:7], v8, s[0:1] offset:16
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v8, s[0:1]
 ; GFX942-NEXT:  .LBB3_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -111,20 +105,16 @@ define <8 x float> @uniform_masked_load_ptr1_mask_v8f32(ptr addrspace(1) inreg n
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
-; GFX942-NEXT:    v_mov_b32_e32 v4, v0
-; GFX942-NEXT:    v_mov_b32_e32 v5, v0
-; GFX942-NEXT:    v_mov_b32_e32 v6, v0
-; GFX942-NEXT:    v_mov_b32_e32 v7, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v8, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
+; GFX942-NEXT:    v_mov_b64_e32 v[4:5], v[0:1]
+; GFX942-NEXT:    v_mov_b64_e32 v[6:7], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB4_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[4:7], v0, s[0:1] offset:16
-; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[4:7], v8, s[0:1] offset:16
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v8, s[0:1]
 ; GFX942-NEXT:  .LBB4_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -142,14 +132,13 @@ define <8 x i16> @uniform_masked_load_ptr1_mask_v8i16(ptr addrspace(1) inreg noc
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB5_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v4, s[0:1]
 ; GFX942-NEXT:  .LBB5_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -167,14 +156,13 @@ define <8 x half> @uniform_masked_load_ptr1_mask_v8f16(ptr addrspace(1) inreg no
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB6_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v4, s[0:1]
 ; GFX942-NEXT:  .LBB6_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
@@ -192,14 +180,13 @@ define <8 x bfloat> @uniform_masked_load_ptr1_mask_v8bf16(ptr addrspace(1) inreg
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], v[0:1]
 ; GFX942-NEXT:    s_and_saveexec_b64 s[2:3], vcc
 ; GFX942-NEXT:    s_cbranch_execz .LBB7_2
 ; GFX942-NEXT:  ; %bb.1: ; %cond.load
-; GFX942-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; GFX942-NEXT:    global_load_dwordx4 v[0:3], v4, s[0:1]
 ; GFX942-NEXT:  .LBB7_2:
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/mfma-loop.ll b/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
index 244b68c703809..6edd8c0b16ba9 100644
--- a/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
@@ -2498,39 +2498,40 @@ define amdgpu_kernel void @test_mfma_nested_loop_zeroinit(ptr addrspace(1) %arg)
 ;
 ; GFX942-LABEL: test_mfma_nested_loop_zeroinit:
 ; GFX942:       ; %bb.0: ; %entry
-; GFX942-NEXT:    v_accvgpr_write_b32 a0, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
+; GFX942-NEXT:    v_accvgpr_write_b32 a0, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a3, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a5, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a7, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a9, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a11, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a13, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a15, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a17, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a19, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a21, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a23, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a25, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a27, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a29, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a31, v1
 ; GFX942-NEXT:    s_mov_b32 s0, 0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a1, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a2, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a3, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a4, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a5, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a6, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a7, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a8, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a9, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a10, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a11, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a12, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a13, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a14, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a15, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a16, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a17, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a18, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a19, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a20, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a21, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a22, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a23, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a24, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a25, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a26, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a27, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a28, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a29, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a30, a0
-; GFX942-NEXT:    v_accvgpr_mov_b32 a31, a0
+; GFX942-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX942-NEXT:    v_accvgpr_write_b32 a2, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a4, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a6, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a8, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a10, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a12, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a14, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a16, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a18, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a20, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a22, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a24, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a26, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a28, v0
+; GFX942-NEXT:    v_accvgpr_write_b32 a30, v0
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 2.0
 ; GFX942-NEXT:    v_mov_b32_e32 v1, 1.0
 ; GFX942-NEXT:  .LBB9_1: ; %for.cond.preheader
diff --git a/llvm/test/CodeGen/AMDGPU/si-fold-operands-subreg-imm-gfx942.mir b/llvm/test/CodeGen/AMDGPU/si-fold-operands-subreg-imm-gfx942.mir
new file mode 100644
index 0000000000000..a0721b06dc2cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/si-fold-operands-subreg-imm-gfx942.mir
@@ -0,0 +1,99 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -run-pass=si-fold-operands -o - %s | FileCheck %s
+
+---
+name:            v_mov_b64_pseudo_ignore
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: v_mov_b64_pseudo_ignore
+    ; CHECK: [[MAT_B64:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1234, implicit $exec
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[MAT_B64]], %subreg.sub0, [[MAT_B64]], %subreg.sub1
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
+    %0:vreg_64_align2 = V_MOV_B64_PSEUDO 1234, implicit $exec
+    %1:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name:            v_mov_b32_splatted_zero
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: v_mov_b32_splatted_zero
+    ; CHECK: {{%[0-9]}}:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    ; CHECK-NEXT: [[MAT_B64:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 0, implicit $exec
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[MAT_B64]]
+    %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %1:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name:            multi_v_mov_b32_fits
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: multi_v_mov_b32_fits
+    ; CHECK: [[MAT_B32:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 12345, implicit $exec
+    ; CHECK-NEXT: [[MAT_B32_ZERO:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    ; CHECK-NEXT: [[MAT_B64:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 12345, implicit $exec
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[MAT_B64]]
+    %0:vgpr_32 = V_MOV_B32_e32 12345, implicit $exec
+    %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %2:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name:            multi_v_mov_b32_no_fold
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: multi_v_mov_b32_no_fold
+    ; CHECK: [[MAT_B32_ONE:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+    ; CHECK-NEXT: [[MAT_B32:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 12345, implicit $exec
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[MAT_B32_ONE]], %subreg.sub0, [[MAT_B32]], %subreg.sub1
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
+    %0:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+    %1:vgpr_32 = V_MOV_B32_e32 12345, implicit $exec
+    %2:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name:            v_mov_b32_to_b128
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: v_mov_b32_to_b128
+    ; CHECK: [[MAT_B32:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    ; CHECK-NEXT: [[MAT_B64_FIRST:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 0, implicit $exec
+    ; CHECK-NEXT: [[MAT_B64_SECOND:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 0, implicit $exec
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[MAT_B64_FIRST]], %subreg.sub0_sub1, [[MAT_B64_SECOND]], %subreg.sub2_sub3
+    %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %1:vreg_128_align2 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1, %0, %subreg.sub2, %0, %subreg.sub3
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name:            multi_v_mov_b32_to_b128
+tracksRegLiveness: true
+body:             |
+  bb.0:
+
+    ; CHECK-LABEL: name: multi_v_mov_b32_to_b128
+    ; CHECK: [[MAT_B32_FIRST:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 12345, implicit $exec
+    ; CHECK-NEXT: [[MAT_B32_SECOND:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    ; CHECK-NEXT: [[MAT_B64_FIRST:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 12345, implicit $exec
+    ; CHECK-NEXT: [[MAT_B64_SECOND:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 12345, implicit $exec
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[MAT_B64_FIRST]], %subreg.sub0_sub1, [[MAT_B64_SECOND]], %subreg.sub2_sub3
+    %0:vgpr_32 = V_MOV_B32_e32 12345, implicit $exec
+    %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %2:vreg_128_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %0, %subreg.sub2, %1, %subreg.sub3
+    S_ENDPGM 0, implicit %2
+...
diff --git a/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll b/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
index d8c015b85584a..f3a9b665e2d99 100644
--- a/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
+++ b/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
@@ -6,25 +6,22 @@ define protected amdgpu_kernel void @test(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX942-LABEL: test:
 ; GFX942:       ; %bb.0: ; %entry
 ; GFX942-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
-; GFX942-NEXT:    v_mov_b32_e32 v2, v0
-; GFX942-NEXT:    v_mov_b32_e32 v3, v0
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[4:5], 0
+; GFX942-NEXT:    v_mov_b64_e32 v[6:7], v[4:5]
+; GFX942-NEXT:    v_mov_b32_e32 v10, 0
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
-; GFX942-NEXT:    v_mov_b64_e32 v[10:11], v[2:3]
-; GFX942-NEXT:    v_mov_b64_e32 v[8:9], v[0:1]
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX942-NEXT:    v_mov_b32_e32 v12, s4
-; GFX942-NEXT:    v_mov_b32_e32 v13, s5
-; GFX942-NEXT:    v_mov_b32_e32 v4, s6
-; GFX942-NEXT:    v_mov_b32_e32 v5, s7
-; GFX942-NEXT:    v_mov_b32_e32 v6, s7
-; GFX942-NEXT:    v_mov_b32_e32 v7, s7
+; GFX942-NEXT:    v_mov_b32_e32 v8, s4
+; GFX942-NEXT:    v_mov_b32_e32 v9, s5
+; GFX942-NEXT:    v_mov_b32_e32 v0, s6
+; GFX942-NEXT:    v_mov_b32_e32 v1, s7
+; GFX942-NEXT:    v_mov_b32_e32 v2, s7
+; GFX942-NEXT:    v_mov_b32_e32 v3, s7
 ; GFX942-NEXT:    s_nop 1
-; GFX942-NEXT:    v_smfmac_i32_16x16x64_i8 v[8:11], v[12:13], v[4:7], v13
+; GFX942-NEXT:    v_smfmac_i32_16x16x64_i8 v[4:7], v[8:9], v[0:3], v9
 ; GFX942-NEXT:    s_nop 6
-; GFX942-NEXT:    global_store_dword v0, v11, s[2:3] offset:12
+; GFX942-NEXT:    global_store_dword v10, v7, s[2:3] offset:12
 ; GFX942-NEXT:    s_endpgm
 entry:
   %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %in, i64 0
diff --git a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
index a401f989a2507..9867cd9495005 100644
--- a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll
@@ -459,11 +459,10 @@ define amdgpu_kernel void @v8i8_phi_zeroinit(ptr addrspace(1) %src1, ptr addrspa
 ; GFX942-NEXT:  ; %bb.1: ; %bb.1
 ; GFX942-NEXT:    global_load_dwordx2 v[2:3], v5, s[10:11]
 ; GFX942-NEXT:    v_cmp_gt_u32_e32 vcc, 7, v4
-; GFX942-NEXT:    s_waitcnt vmcnt(1)
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_andn2_b64 s[0:1], s[0:1], exec
 ; GFX942-NEXT:    s_and_b64 s[4:5], vcc, exec
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
+; GFX942-NEXT:    s_waitcnt vmcnt(1)
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
 ; GFX942-NEXT:    s_or_b64 s[0:1], s[0:1], s[4:5]
 ; GFX942-NEXT:  .LBB9_2: ; %Flow
 ; GFX942-NEXT:    s_or_b64 exec, exec, s[2:3]

>From 6b393a85e7018d4e04fc5c08cf155f0c30afa1e6 Mon Sep 17 00:00:00 2001
From: Janek van Oirschot <Janek.vanOirschot at amd.com>
Date: Tue, 13 May 2025 12:06:23 -0700
Subject: [PATCH 2/2] feedback

---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 80d9977fbffd1..4baf6c7b6ebef 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -2220,8 +2220,7 @@ bool SIFoldOperandsImpl::tryFoldImmRegSequence(MachineInstr &MI) {
   SmallVector<uint64_t, 8> ImmVals;
   uint64_t ImmVal = 0;
   uint64_t ImmSize = 0;
-  for (unsigned i = 0; i < Defs.size(); ++i) {
-    auto &[Op, SubIdx] = Defs[i];
+  for (auto &[Op, SubIdx] : Defs) {
     unsigned SubRegSize = TRI->getSubRegIdxSize(SubIdx);
     unsigned Shift = (TRI->getChannelFromSubReg(SubIdx) % 2) * SubRegSize;
     ImmSize += SubRegSize;
@@ -2240,9 +2239,6 @@ bool SIFoldOperandsImpl::tryFoldImmRegSequence(MachineInstr &MI) {
     }
   }
 
-  assert(ImmVals.size() > 0 &&
-         "REG_SEQUENCE should have at least 1 operand pair");
-
   // Can only combine REG_SEQUENCE into one 64b immediate materialization mov.
   if (DefRC == TRI->getVGPR64Class()) {
     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
@@ -2263,7 +2259,7 @@ bool SIFoldOperandsImpl::tryFoldImmRegSequence(MachineInstr &MI) {
 
   for (unsigned i = 0; i < ImmVals.size(); ++i) {
     const TargetRegisterClass *RC = TRI->getVGPR64Class();
-    auto MovReg = MRI->createVirtualRegister(RC);
+    Register MovReg = MRI->createVirtualRegister(RC);
     // Duplicate vmov imm materializations (e.g., splatted operands) should get
     // combined by MachineCSE pass.
     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),



More information about the llvm-commits mailing list