[llvm-branch-commits] [llvm] AMDGPU: Add baseline test for vgpr fma with copied-from AGPR (PR #153020)

Matt Arsenault via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Aug 18 08:31:10 PDT 2025


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/153020

>From 9e68d80779760e9a218ef798c3818e863f0b087d Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 11 Aug 2025 09:10:41 +0900
Subject: [PATCH] AMDGPU: Add baseline test for vgpr fma with copied-from AGPR

We currently handle the case where an MFMA is copied to an AGPR,
but not the case where the MFMA is copying from an AGPR.
---
 .../rewrite-vgpr-mfma-to-agpr-copy-from.mir   | 261 ++++++++++++
 .../AMDGPU/rewrite-vgpr-mfma-to-agpr.ll       | 387 ++++++++++++++++++
 2 files changed, 648 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir

diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir
new file mode 100644
index 0000000000000..7fdc8c0d8019b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir
@@ -0,0 +1,261 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -run-pass=greedy,amdgpu-rewrite-agpr-copy-mfma -verify-machineinstrs -o - %s | FileCheck %s
+
+---
+name:  test_rewrite_mfma_copy_from_agpr_physreg
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $agpr0_agpr1
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_physreg
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $agpr0_agpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_64_align2 = COPY $agpr0_agpr1
+    ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:vreg_64_align2 = COPY $agpr0_agpr1
+    %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX2 %0, %4, 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    SI_RETURN
+...
+
+---
+name:  test_rewrite_mfma_copy_from_agpr_unrewritable_use
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_unrewritable_use
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
+    ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3866633 /* reguse:VReg_64_Align2 */, [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]]
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    %4:vreg_128_align2 = COPY %3
+    %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
+    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3866633 /* reguse:VReg_64_Align2 */, %5
+    SI_RETURN
+...
+
+---
+name:  test_rewrite_mfma_copy_from_agpr_src2_subreg_use
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_src2_subreg_use
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
+    ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    %4:vreg_128_align2 = COPY %3
+    %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX2 %0, %5, 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    SI_RETURN
+...
+
+---
+name:  test_rewrite_mfma_copy_from_agpr_vdst_subreg_use
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_vdst_subreg_use
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    %4:vreg_128_align2 = COPY %3
+    %4.sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX4 %0, %4, 0, 0, implicit $exec :: (store (s128), addrspace 1)
+    SI_RETURN
+...
+
+# A-to-V copy is performed subregister at a time instead.
+---
+name:  test_rewrite_mfma_copy_from_agpr_split_copy
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_split_copy
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:areg_64_align2 = GLOBAL_LOAD_DWORDX2 [[COPY]], 0, 0, implicit $exec :: (load (s64), addrspace 1)
+    ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub1:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
+    ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_64_align2 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+    undef %4.sub0:vreg_64_align2 = COPY %3.sub0
+    %4.sub1:vreg_64_align2 = COPY %3.sub1
+    %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX2 %0, %5, 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    SI_RETURN
+...
+
+---
+name:  test_rewrite_mfma_copy_from_agpr_copyback
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_copyback
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:areg_64_align2 = GLOBAL_LOAD_DWORDX2 [[COPY]], 0, 0, implicit $exec :: (load (s64), addrspace 1)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX2_]]
+    ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[COPY3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY4]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_64_align2 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+    %4:vreg_64_align2 = COPY %3
+    %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4, 0, 0, 0, implicit $mode, implicit $exec
+    %6:areg_64_align2 = COPY %5
+    GLOBAL_STORE_DWORDX2 %0, %6, 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    SI_RETURN
+...
+
+# There is a read of the coy-from-agpr in the dst operand of the MFMA.
+---
+name:  test_rewrite_mfma_copy_from_agpr_vdst_subreg_use_imm_src2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_vdst_subreg_use_imm_src2
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    %4:vreg_128_align2 = COPY %3
+    %4.sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, 0, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX4 %0, %4, 0, 0, implicit $exec :: (store (s128), addrspace 1)
+    SI_RETURN
+...
+
+# Degenerate case. Copy from AGPR to VGPR is dead undef subreg def
+---
+name:  test_rewrite_mfma_copy_from_agpr_undef_vdst_subreg_use_imm_src2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_undef_vdst_subreg_use_imm_src2
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    ; CHECK-NEXT: dead [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
+    ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]].sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+    %4:vreg_128_align2 = COPY %3
+    undef %4.sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, 0, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX4 %0, %4, 0, 0, implicit $exec :: (store (s128), addrspace 1)
+    SI_RETURN
+...
+
+# Degenerate case. Copy from AGPR to VGPR is dead, but same register
+# is redefined as whole register.
+---
+name:  test_rewrite_mfma_copy_from_agpr_to_vdst_def_imm_src2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+
+    ; CHECK-LABEL: name: test_rewrite_mfma_copy_from_agpr_to_vdst_def_imm_src2
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY $vgpr4_vgpr5
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
+    ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:areg_64_align2 = GLOBAL_LOAD_DWORDX2 [[COPY]], 0, 0, implicit $exec :: (load (s64), addrspace 1)
+    ; CHECK-NEXT: dead [[COPY3:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX2_]]
+    ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    ; CHECK-NEXT: SI_RETURN
+    %0:vreg_64_align2 = COPY $vgpr4_vgpr5
+    %1:av_64_align2 = COPY $vgpr0_vgpr1
+    %2:av_64_align2 = COPY $vgpr2_vgpr3
+    %3:areg_64_align2 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+    %4:vreg_64_align2 = COPY %3
+    %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, 0, 0, 0, 0, implicit $mode, implicit $exec
+    GLOBAL_STORE_DWORDX2 %0, %4, 0, 0, implicit $exec :: (store (s64), addrspace 1)
+    SI_RETURN
+...
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
index 58ec41e3928bd..81613f69c982b 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
@@ -522,7 +522,394 @@ define void @test_rewrite_mfma_subreg_insert2(double %arg0, double %arg1, ptr ad
   ret void
 }
 
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class(ptr addrspace(1) %arg0, ptr addrspace(1) %arg1) #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_from_agpr_class:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:31]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_accvgpr_write_b32 a32, v0
+; CHECK-NEXT:    v_accvgpr_read_b32 v63, a31
+; CHECK-NEXT:    v_accvgpr_read_b32 v62, a30
+; CHECK-NEXT:    v_accvgpr_read_b32 v61, a29
+; CHECK-NEXT:    v_accvgpr_read_b32 v60, a28
+; CHECK-NEXT:    v_accvgpr_read_b32 v59, a27
+; CHECK-NEXT:    v_accvgpr_read_b32 v58, a26
+; CHECK-NEXT:    v_accvgpr_read_b32 v57, a25
+; CHECK-NEXT:    v_accvgpr_read_b32 v56, a24
+; CHECK-NEXT:    v_accvgpr_read_b32 v55, a23
+; CHECK-NEXT:    v_accvgpr_read_b32 v54, a22
+; CHECK-NEXT:    v_accvgpr_read_b32 v53, a21
+; CHECK-NEXT:    v_accvgpr_read_b32 v52, a20
+; CHECK-NEXT:    v_accvgpr_read_b32 v51, a19
+; CHECK-NEXT:    v_accvgpr_read_b32 v50, a18
+; CHECK-NEXT:    v_accvgpr_read_b32 v49, a17
+; CHECK-NEXT:    v_accvgpr_read_b32 v48, a16
+; CHECK-NEXT:    v_accvgpr_read_b32 v47, a15
+; CHECK-NEXT:    v_accvgpr_read_b32 v46, a14
+; CHECK-NEXT:    v_accvgpr_read_b32 v45, a13
+; CHECK-NEXT:    v_accvgpr_read_b32 v44, a12
+; CHECK-NEXT:    v_accvgpr_read_b32 v43, a11
+; CHECK-NEXT:    v_accvgpr_read_b32 v42, a10
+; CHECK-NEXT:    v_accvgpr_read_b32 v41, a9
+; CHECK-NEXT:    v_accvgpr_read_b32 v40, a8
+; CHECK-NEXT:    v_accvgpr_read_b32 v39, a7
+; CHECK-NEXT:    v_accvgpr_read_b32 v38, a6
+; CHECK-NEXT:    v_accvgpr_read_b32 v37, a5
+; CHECK-NEXT:    v_accvgpr_read_b32 v36, a4
+; CHECK-NEXT:    v_accvgpr_read_b32 v35, a3
+; CHECK-NEXT:    v_accvgpr_read_b32 v34, a2
+; CHECK-NEXT:    v_accvgpr_read_b32 v33, a1
+; CHECK-NEXT:    v_accvgpr_read_b32 v32, a0
+; CHECK-NEXT:    v_accvgpr_write_b32 a0, 2.0
+; CHECK-NEXT:    v_accvgpr_write_b32 a1, 4.0
+; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    v_mfma_f32_32x32x1_2b_f32 v[0:31], a0, a1, v[32:63]
+; CHECK-NEXT:    v_accvgpr_write_b32 a0, v32
+; CHECK-NEXT:    v_accvgpr_write_b32 a1, v33
+; CHECK-NEXT:    v_accvgpr_write_b32 a2, v34
+; CHECK-NEXT:    v_accvgpr_write_b32 a3, v35
+; CHECK-NEXT:    v_accvgpr_write_b32 a4, v36
+; CHECK-NEXT:    v_accvgpr_write_b32 a5, v37
+; CHECK-NEXT:    v_accvgpr_write_b32 a6, v38
+; CHECK-NEXT:    v_accvgpr_write_b32 a7, v39
+; CHECK-NEXT:    v_accvgpr_write_b32 a8, v40
+; CHECK-NEXT:    v_accvgpr_write_b32 a9, v41
+; CHECK-NEXT:    v_accvgpr_write_b32 a10, v42
+; CHECK-NEXT:    v_accvgpr_write_b32 a11, v43
+; CHECK-NEXT:    v_accvgpr_write_b32 a12, v44
+; CHECK-NEXT:    v_accvgpr_write_b32 a13, v45
+; CHECK-NEXT:    v_accvgpr_write_b32 a14, v46
+; CHECK-NEXT:    v_accvgpr_write_b32 a15, v47
+; CHECK-NEXT:    v_accvgpr_write_b32 a16, v48
+; CHECK-NEXT:    v_accvgpr_write_b32 a17, v49
+; CHECK-NEXT:    v_accvgpr_write_b32 a18, v50
+; CHECK-NEXT:    v_accvgpr_write_b32 a19, v51
+; CHECK-NEXT:    v_accvgpr_write_b32 a20, v52
+; CHECK-NEXT:    v_accvgpr_write_b32 a21, v53
+; CHECK-NEXT:    v_accvgpr_write_b32 a22, v54
+; CHECK-NEXT:    v_accvgpr_write_b32 a23, v55
+; CHECK-NEXT:    v_accvgpr_write_b32 a24, v56
+; CHECK-NEXT:    v_accvgpr_write_b32 a25, v57
+; CHECK-NEXT:    v_accvgpr_write_b32 a26, v58
+; CHECK-NEXT:    v_accvgpr_write_b32 a27, v59
+; CHECK-NEXT:    v_accvgpr_write_b32 a28, v60
+; CHECK-NEXT:    v_accvgpr_write_b32 a29, v61
+; CHECK-NEXT:    v_accvgpr_write_b32 a30, v62
+; CHECK-NEXT:    v_accvgpr_write_b32 a31, v63
+; CHECK-NEXT:    v_accvgpr_read_b32 v32, a32
+; CHECK-NEXT:    v_mov_b32_e32 v33, 0x41000000
+; CHECK-NEXT:    v_and_b32_e32 v32, 0x3ff, v32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v32, 7, v32
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT:    global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT:    global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT:    global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT:    global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT:    global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT:    global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT:    global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v34, 0x41800000
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    v_accvgpr_read_b32 v0, a0
+; CHECK-NEXT:    v_accvgpr_read_b32 v1, a1
+; CHECK-NEXT:    v_accvgpr_read_b32 v2, a2
+; CHECK-NEXT:    v_accvgpr_read_b32 v3, a3
+; CHECK-NEXT:    v_accvgpr_read_b32 v4, a4
+; CHECK-NEXT:    v_accvgpr_read_b32 v5, a5
+; CHECK-NEXT:    v_accvgpr_read_b32 v6, a6
+; CHECK-NEXT:    v_accvgpr_read_b32 v7, a7
+; CHECK-NEXT:    v_accvgpr_read_b32 v8, a8
+; CHECK-NEXT:    v_accvgpr_read_b32 v9, a9
+; CHECK-NEXT:    v_accvgpr_read_b32 v10, a10
+; CHECK-NEXT:    v_accvgpr_read_b32 v11, a11
+; CHECK-NEXT:    v_accvgpr_read_b32 v12, a12
+; CHECK-NEXT:    v_accvgpr_read_b32 v13, a13
+; CHECK-NEXT:    v_accvgpr_read_b32 v14, a14
+; CHECK-NEXT:    v_accvgpr_read_b32 v15, a15
+; CHECK-NEXT:    v_accvgpr_read_b32 v16, a16
+; CHECK-NEXT:    v_accvgpr_read_b32 v17, a17
+; CHECK-NEXT:    v_accvgpr_read_b32 v18, a18
+; CHECK-NEXT:    v_accvgpr_read_b32 v19, a19
+; CHECK-NEXT:    v_accvgpr_read_b32 v20, a20
+; CHECK-NEXT:    v_accvgpr_read_b32 v21, a21
+; CHECK-NEXT:    v_accvgpr_read_b32 v22, a22
+; CHECK-NEXT:    v_accvgpr_read_b32 v23, a23
+; CHECK-NEXT:    v_accvgpr_read_b32 v24, a24
+; CHECK-NEXT:    v_accvgpr_read_b32 v25, a25
+; CHECK-NEXT:    v_accvgpr_read_b32 v26, a26
+; CHECK-NEXT:    v_accvgpr_read_b32 v27, a27
+; CHECK-NEXT:    v_accvgpr_read_b32 v28, a28
+; CHECK-NEXT:    v_accvgpr_read_b32 v29, a29
+; CHECK-NEXT:    v_accvgpr_read_b32 v30, a30
+; CHECK-NEXT:    v_accvgpr_read_b32 v31, a31
+; CHECK-NEXT:    s_nop 1
+; CHECK-NEXT:    v_mfma_f32_32x32x1_2b_f32 v[0:31], v33, v34, v[0:31]
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    s_nop 1
+; CHECK-NEXT:    global_store_dwordx4 v32, v[24:27], s[2:3] offset:96
+; CHECK-NEXT:    global_store_dwordx4 v32, v[28:31], s[2:3] offset:112
+; CHECK-NEXT:    global_store_dwordx4 v32, v[16:19], s[2:3] offset:64
+; CHECK-NEXT:    global_store_dwordx4 v32, v[20:23], s[2:3] offset:80
+; CHECK-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:32
+; CHECK-NEXT:    global_store_dwordx4 v32, v[12:15], s[2:3] offset:48
+; CHECK-NEXT:    global_store_dwordx4 v32, v[0:3], s[2:3]
+; CHECK-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:16
+; CHECK-NEXT:    s_endpgm
+  %src2 = call <32 x float> asm sideeffect "; def $0", "=a"()
+  %mai0 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %src2, i32 0, i32 0, i32 0)
+  %mai1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 8.0, float 16.0, <32 x float> %src2, i32 0, i32 0, i32 0)
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep0 = getelementptr <32 x float>, ptr addrspace(1) %arg0, i32 %id
+  store <32 x float> %mai0, ptr addrspace(1) %gep0, align 128
+  %gep1 = getelementptr <32 x float>, ptr addrspace(1) %arg1, i32 %id
+  store <32 x float> %mai1, ptr addrspace(1) %gep1, align 128
+  ret void
+}
+
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class_chain(ptr addrspace(1) %arg0) #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_from_agpr_class_chain:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    v_mov_b32_e32 v1, 2.0
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:31]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_mov_b32_e32 v34, 4.0
+; CHECK-NEXT:    v_accvgpr_read_b32 v33, a31
+; CHECK-NEXT:    v_accvgpr_read_b32 v32, a30
+; CHECK-NEXT:    v_accvgpr_read_b32 v31, a29
+; CHECK-NEXT:    v_accvgpr_read_b32 v30, a28
+; CHECK-NEXT:    v_accvgpr_read_b32 v29, a27
+; CHECK-NEXT:    v_accvgpr_read_b32 v28, a26
+; CHECK-NEXT:    v_accvgpr_read_b32 v27, a25
+; CHECK-NEXT:    v_accvgpr_read_b32 v26, a24
+; CHECK-NEXT:    v_accvgpr_read_b32 v25, a23
+; CHECK-NEXT:    v_accvgpr_read_b32 v24, a22
+; CHECK-NEXT:    v_accvgpr_read_b32 v23, a21
+; CHECK-NEXT:    v_accvgpr_read_b32 v22, a20
+; CHECK-NEXT:    v_accvgpr_read_b32 v21, a19
+; CHECK-NEXT:    v_accvgpr_read_b32 v20, a18
+; CHECK-NEXT:    v_accvgpr_read_b32 v19, a17
+; CHECK-NEXT:    v_accvgpr_read_b32 v18, a16
+; CHECK-NEXT:    v_accvgpr_read_b32 v17, a15
+; CHECK-NEXT:    v_accvgpr_read_b32 v16, a14
+; CHECK-NEXT:    v_accvgpr_read_b32 v15, a13
+; CHECK-NEXT:    v_accvgpr_read_b32 v14, a12
+; CHECK-NEXT:    v_accvgpr_read_b32 v13, a11
+; CHECK-NEXT:    v_accvgpr_read_b32 v12, a10
+; CHECK-NEXT:    v_accvgpr_read_b32 v11, a9
+; CHECK-NEXT:    v_accvgpr_read_b32 v10, a8
+; CHECK-NEXT:    v_accvgpr_read_b32 v9, a7
+; CHECK-NEXT:    v_accvgpr_read_b32 v8, a6
+; CHECK-NEXT:    v_accvgpr_read_b32 v7, a5
+; CHECK-NEXT:    v_accvgpr_read_b32 v6, a4
+; CHECK-NEXT:    v_accvgpr_read_b32 v5, a3
+; CHECK-NEXT:    v_accvgpr_read_b32 v4, a2
+; CHECK-NEXT:    v_accvgpr_read_b32 v3, a1
+; CHECK-NEXT:    v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    v_mfma_f32_32x32x1_2b_f32 v[2:33], v1, v34, v[2:33]
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0x41000000
+; CHECK-NEXT:    v_mov_b32_e32 v34, 0x41800000
+; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 7, v0
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    v_mfma_f32_32x32x1_2b_f32 v[2:33], v1, v34, v[2:33]
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    global_store_dwordx4 v0, v[30:33], s[0:1] offset:112
+; CHECK-NEXT:    global_store_dwordx4 v0, v[26:29], s[0:1] offset:96
+; CHECK-NEXT:    global_store_dwordx4 v0, v[22:25], s[0:1] offset:80
+; CHECK-NEXT:    global_store_dwordx4 v0, v[18:21], s[0:1] offset:64
+; CHECK-NEXT:    global_store_dwordx4 v0, v[14:17], s[0:1] offset:48
+; CHECK-NEXT:    global_store_dwordx4 v0, v[10:13], s[0:1] offset:32
+; CHECK-NEXT:    global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
+; CHECK-NEXT:    global_store_dwordx4 v0, v[2:5], s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %src2 = call <32 x float> asm sideeffect "; def $0", "=a"()
+  %mai0 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %src2, i32 0, i32 0, i32 0)
+  %mai1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 8.0, float 16.0, <32 x float> %mai0, i32 0, i32 0, i32 0)
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep0 = getelementptr <32 x float>, ptr addrspace(1) %arg0, i32 %id
+  store <32 x float> %mai1, ptr addrspace(1) %gep0, align 128
+  ret void
+}
+
+; Untied case
+define void @test_rewrite_mfma_copy_from_agpr_class_f64_4x4x4f64(double %arg0, double %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_copy_from_agpr_class_f64_4x4x4f64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:1]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_and_b32_e32 v8, 0x3ff, v31
+; CHECK-NEXT:    v_accvgpr_read_b32 v7, a1
+; CHECK-NEXT:    v_accvgpr_read_b32 v6, a0
+; CHECK-NEXT:    s_nop 1
+; CHECK-NEXT:    v_mfma_f64_4x4x4_4b_f64 v[0:1], v[0:1], v[2:3], v[6:7]
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 3, v8
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    v_lshl_add_u64 v[2:3], v[4:5], 0, v[2:3]
+; CHECK-NEXT:    s_nop 5
+; CHECK-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %src2 = call double asm sideeffect "; def $0", "=a"()
+  %mai = call double @llvm.amdgcn.mfma.f64.4x4x4f64(double %arg0, double %arg1, double %src2, i32 0, i32 0, i32 0)
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep0 = getelementptr double, ptr addrspace(1) %ptr, i32 %id
+  store double %mai, ptr addrspace(1) %gep0, align 8
+  ret void
+}
+
+define void @test_rewrite_mfma_copy_from_agpr_class_f64_4x4x4f64_chain(double %arg0, double %arg1, double %arg2, double %arg3, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_copy_from_agpr_class_f64_4x4x4f64_chain:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:1]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    v_accvgpr_read_b32 v11, a1
+; CHECK-NEXT:    v_accvgpr_read_b32 v10, a0
+; CHECK-NEXT:    s_nop 1
+; CHECK-NEXT:    v_mfma_f64_4x4x4_4b_f64 v[0:1], v[0:1], v[2:3], v[10:11]
+; CHECK-NEXT:    v_and_b32_e32 v2, 0x3ff, v31
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 3, v2
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    v_lshl_add_u64 v[2:3], v[8:9], 0, v[2:3]
+; CHECK-NEXT:    v_mfma_f64_4x4x4_4b_f64 v[0:1], v[4:5], v[6:7], v[0:1]
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %src2 = call double asm sideeffect "; def $0", "=a"()
+  %mai0 = call double @llvm.amdgcn.mfma.f64.4x4x4f64(double %arg0, double %arg1, double %src2, i32 0, i32 0, i32 0)
+  %mai1 = call double @llvm.amdgcn.mfma.f64.4x4x4f64(double %arg2, double %arg3, double %mai0, i32 0, i32 0, i32 0)
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep0 = getelementptr double, ptr addrspace(1) %ptr, i32 %id
+  store double %mai1, ptr addrspace(1) %gep0, align 8
+  ret void
+}
+
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class_subreg(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_from_agpr_class_subreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    v_mov_b32_e32 v1, 2.0
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:31]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_mov_b32_e32 v18, 4.0
+; CHECK-NEXT:    v_accvgpr_read_b32 v17, a15
+; CHECK-NEXT:    v_accvgpr_read_b32 v16, a14
+; CHECK-NEXT:    v_accvgpr_read_b32 v15, a13
+; CHECK-NEXT:    v_accvgpr_read_b32 v14, a12
+; CHECK-NEXT:    v_accvgpr_read_b32 v13, a11
+; CHECK-NEXT:    v_accvgpr_read_b32 v12, a10
+; CHECK-NEXT:    v_accvgpr_read_b32 v11, a9
+; CHECK-NEXT:    v_accvgpr_read_b32 v10, a8
+; CHECK-NEXT:    v_accvgpr_read_b32 v9, a7
+; CHECK-NEXT:    v_accvgpr_read_b32 v8, a6
+; CHECK-NEXT:    v_accvgpr_read_b32 v7, a5
+; CHECK-NEXT:    v_accvgpr_read_b32 v6, a4
+; CHECK-NEXT:    v_accvgpr_read_b32 v5, a3
+; CHECK-NEXT:    v_accvgpr_read_b32 v4, a2
+; CHECK-NEXT:    v_accvgpr_read_b32 v3, a1
+; CHECK-NEXT:    v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    v_mfma_f32_16x16x1_4b_f32 v[2:17], v1, v18, v[2:17]
+; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 6, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    global_store_dwordx4 v0, v[14:17], s[0:1] offset:48
+; CHECK-NEXT:    global_store_dwordx4 v0, v[10:13], s[0:1] offset:32
+; CHECK-NEXT:    global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
+; CHECK-NEXT:    global_store_dwordx4 v0, v[2:5], s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %def = call <32 x float> asm sideeffect "; def $0", "=a"()
+  %src2 = shufflevector <32 x float> %def, <32 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %mai = call <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float 2.0, float 4.0, <16 x float> %src2, i32 0, i32 0, i32 0)
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr <16 x float>, ptr addrspace(1) %arg, i32 %id
+  store <16 x float> %mai, ptr addrspace(1) %gep, align 64
+  ret void
+}
+
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class_subreg_odd(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_from_agpr_class_subreg_odd:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    v_mov_b32_e32 v1, 2.0
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:31]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_mov_b32_e32 v18, 4.0
+; CHECK-NEXT:    v_accvgpr_read_b32 v17, a16
+; CHECK-NEXT:    v_accvgpr_read_b32 v16, a15
+; CHECK-NEXT:    v_accvgpr_read_b32 v15, a14
+; CHECK-NEXT:    v_accvgpr_read_b32 v14, a13
+; CHECK-NEXT:    v_accvgpr_read_b32 v13, a12
+; CHECK-NEXT:    v_accvgpr_read_b32 v12, a11
+; CHECK-NEXT:    v_accvgpr_read_b32 v11, a10
+; CHECK-NEXT:    v_accvgpr_read_b32 v10, a9
+; CHECK-NEXT:    v_accvgpr_read_b32 v9, a8
+; CHECK-NEXT:    v_accvgpr_read_b32 v8, a7
+; CHECK-NEXT:    v_accvgpr_read_b32 v7, a6
+; CHECK-NEXT:    v_accvgpr_read_b32 v6, a5
+; CHECK-NEXT:    v_accvgpr_read_b32 v5, a4
+; CHECK-NEXT:    v_accvgpr_read_b32 v4, a3
+; CHECK-NEXT:    v_accvgpr_read_b32 v3, a2
+; CHECK-NEXT:    v_accvgpr_read_b32 v2, a1
+; CHECK-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    v_mfma_f32_16x16x1_4b_f32 v[2:17], v1, v18, v[2:17]
+; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 6, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_nop 7
+; CHECK-NEXT:    global_store_dwordx4 v0, v[14:17], s[0:1] offset:48
+; CHECK-NEXT:    global_store_dwordx4 v0, v[10:13], s[0:1] offset:32
+; CHECK-NEXT:    global_store_dwordx4 v0, v[6:9], s[0:1] offset:16
+; CHECK-NEXT:    global_store_dwordx4 v0, v[2:5], s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %def = call <32 x float> asm sideeffect "; def $0", "=a"()
+  %src2 = shufflevector <32 x float> %def, <32 x float> poison, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+  %mai = call <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float 2.0, float 4.0, <16 x float> %src2, i32 0, i32 0, i32 0)
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr <16 x float>, ptr addrspace(1) %arg, i32 %id
+  store <16 x float> %mai, ptr addrspace(1) %gep, align 64
+  ret void
+}
+
+; a->v->mfma->a
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class_copy_back() #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_from_agpr_class_copy_back:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    v_mov_b32_e32 v32, 2.0
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def a[0:31]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_mov_b32_e32 v33, 4.0
+; CHECK-NEXT:    s_nop 1
+; CHECK-NEXT:    v_mfma_f32_32x32x1_2b_f32 a[0:31], v32, v33, a[0:31]
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; use a[0:31]
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    s_endpgm
+  %src2 = call <32 x float> asm sideeffect "; def $0", "=a"()
+  %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %src2, i32 0, i32 0, i32 0)
+  call void asm sideeffect "; use $0", "a"(<32 x float> %mai)
+  ret void
+}
+
 declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2
+declare <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float, float, <16 x float>, i32 immarg, i32 immarg, i32 immarg) #2
 declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2
 declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #3
 



More information about the llvm-branch-commits mailing list