[llvm] [AMDGPU] Force the third source operand of the MAI instructions to VGPR if no AGPRs are used. (PR #69720)

via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 23 10:15:21 PDT 2023


https://github.com/alex-t updated https://github.com/llvm/llvm-project/pull/69720

>From 97a70a658c4fc997f4f78540d87040e5e7b14d4b Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Fri, 20 Oct 2023 14:47:37 +0200
Subject: [PATCH 1/3] [AMDGPU] Force the third source operand of the MAI
 instructions to VGPR if no AGPRs are used.

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp   | 34 ++++++++-----
 llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll | 53 +++++++++++++++++++++
 2 files changed, 74 insertions(+), 13 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a7d8e11461733fc..99569c8071bebee 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -14136,7 +14136,9 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
                                                      SDNode *Node) const {
   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
 
-  MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+  MachineFunction *MF = MI.getParent()->getParent();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
 
   if (TII->isVOP3(MI.getOpcode())) {
     // Make sure constant bus requirements are respected.
@@ -14147,9 +14149,14 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
     // use between vgpr and agpr as agpr tuples tend to be big.
     if (!MI.getDesc().operands().empty()) {
       unsigned Opc = MI.getOpcode();
+      bool NoAGPRs = !Info->mayNeedAGPRs();
+      SmallVector<int> Opnds;
+      Opnds.push_back(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0));
+      Opnds.push_back(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
+      if (NoAGPRs)
+        Opnds.push_back(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
-      for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
-                      AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
+      for (auto I : Opnds) {
         if (I == -1)
           break;
         MachineOperand &Op = MI.getOperand(I);
@@ -14169,18 +14176,19 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
         MRI.setRegClass(Op.getReg(), NewRC);
       }
 
-      // Resolve the rest of AV operands to AGPRs.
-      if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
-        if (Src2->isReg() && Src2->getReg().isVirtual()) {
-          auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
-          if (TRI->isVectorSuperClass(RC)) {
-            auto *NewRC = TRI->getEquivalentAGPRClass(RC);
-            MRI.setRegClass(Src2->getReg(), NewRC);
-            if (Src2->isTied())
-              MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
+      if (!NoAGPRs)
+        // Resolve the rest of AV operands to AGPRs.
+        if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
+          if (Src2->isReg() && Src2->getReg().isVirtual()) {
+            auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
+            if (TRI->isVectorSuperClass(RC)) {
+              auto *NewRC = TRI->getEquivalentAGPRClass(RC);
+              MRI.setRegClass(Src2->getReg(), NewRC);
+              if (Src2->isTied())
+                MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
+            }
           }
         }
-      }
     }
 
     return;
diff --git a/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll b/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
new file mode 100644
index 000000000000000..a8bd0afe3ce16fd
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=MI300 %s
+
+
+define protected amdgpu_kernel void @test(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; MI300-LABEL: test:
+; MI300:       ; %bb.0: ; %entry
+; MI300-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; MI300-NEXT:    v_mov_b32_e32 v0, 0
+; MI300-NEXT:    v_mov_b32_e32 v2, v0
+; MI300-NEXT:    v_mov_b32_e32 v3, v0
+; MI300-NEXT:    v_mov_b32_e32 v1, v0
+; MI300-NEXT:    s_waitcnt lgkmcnt(0)
+; MI300-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; MI300-NEXT:    v_mov_b64_e32 v[10:11], v[2:3]
+; MI300-NEXT:    v_mov_b64_e32 v[8:9], v[0:1]
+; MI300-NEXT:    s_waitcnt lgkmcnt(0)
+; MI300-NEXT:    v_mov_b32_e32 v12, s4
+; MI300-NEXT:    v_mov_b32_e32 v13, s5
+; MI300-NEXT:    v_mov_b32_e32 v4, s6
+; MI300-NEXT:    v_mov_b32_e32 v5, s7
+; MI300-NEXT:    v_mov_b32_e32 v6, s7
+; MI300-NEXT:    v_mov_b32_e32 v7, s7
+; MI300-NEXT:    s_nop 1
+; MI300-NEXT:    v_smfmac_i32_16x16x64_i8 v[8:11], v[12:13], v[4:7], v13
+; MI300-NEXT:    s_nop 6
+; MI300-NEXT:    global_store_dword v0, v11, s[2:3] offset:12 sc0 sc1
+; MI300-NEXT:    s_endpgm
+entry:
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %in, i64 0
+  %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 1
+  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 2
+  %arrayidx3 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 3
+  %0 = load i32, ptr addrspace(1) %arrayidx
+  %1 = load i32, ptr addrspace(1) %arrayidx1
+  %2 = load i32, ptr addrspace(1) %arrayidx2
+  %3 = load i32, ptr addrspace(1) %arrayidx3
+  %src1.0 = insertelement <2 x i32> undef, i32 %0, i64 0
+  %src1 = insertelement <2 x i32> %src1.0, i32 %1, i64 1
+  %src2.0 = insertelement <4 x i32> undef, i32 %2, i64 0
+  %src2.1 = insertelement <4 x i32> %src2.0, i32 %3, i64 1
+  %src2.2 = insertelement <4 x i32> %src2.1, i32 %3, i64 2
+  %src2 = insertelement <4 x i32> %src2.2, i32 %3, i64 3
+  %4 = tail call <4 x i32> @llvm.amdgcn.smfmac.i32.16x16x64.i8(<2 x i32> %src1, <4 x i32> %src2, <4 x i32> zeroinitializer, i32 %1, i32 0, i32 0)
+  %vecext = extractelement <4 x i32> %4, i64 0
+  %vecext.1 = extractelement <4 x i32> %4, i64 1
+  %vecext.2 = extractelement <4 x i32> %4, i64 2
+  %vecext.3 = extractelement <4 x i32> %4, i64 3
+  %arrayidx4 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 3
+  store i32 %vecext.3, ptr addrspace(1) %arrayidx4
+  ret void
+}
+declare <4 x i32> @llvm.amdgcn.smfmac.i32.16x16x64.i8(<2 x i32>, <4 x i32>, <4 x i32>, i32, i32 immarg, i32 immarg)

>From b1b172053ff41fe4013420b8278eba631526c1ed Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Mon, 23 Oct 2023 19:00:06 +0200
Subject: [PATCH 2/3] [AMDGPU] Force the third source operand of the MAI
 instructions to VGPR if no AGPRs are used. Changes for review

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp   | 38 ++++++++--------
 llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll | 48 ++++++++++-----------
 2 files changed, 44 insertions(+), 42 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 99569c8071bebee..3ec704c2d3d981e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -14149,16 +14149,16 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
     // use between vgpr and agpr as agpr tuples tend to be big.
     if (!MI.getDesc().operands().empty()) {
       unsigned Opc = MI.getOpcode();
-      bool NoAGPRs = !Info->mayNeedAGPRs();
-      SmallVector<int> Opnds;
-      Opnds.push_back(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0));
-      Opnds.push_back(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
-      if (NoAGPRs)
-        Opnds.push_back(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
+      bool HasAGPRs = Info->mayNeedAGPRs();
       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
-      for (auto I : Opnds) {
+      int16_t Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
+      for (auto I : {AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
+                     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
+                     Src2Idx}) {
         if (I == -1)
           break;
+        if ((I == Src2Idx) && (HasAGPRs))
+          break;
         MachineOperand &Op = MI.getOperand(I);
         if (!Op.isReg() || !Op.getReg().isVirtual())
           continue;
@@ -14176,19 +14176,21 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
         MRI.setRegClass(Op.getReg(), NewRC);
       }
 
-      if (!NoAGPRs)
-        // Resolve the rest of AV operands to AGPRs.
-        if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
-          if (Src2->isReg() && Src2->getReg().isVirtual()) {
-            auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
-            if (TRI->isVectorSuperClass(RC)) {
-              auto *NewRC = TRI->getEquivalentAGPRClass(RC);
-              MRI.setRegClass(Src2->getReg(), NewRC);
-              if (Src2->isTied())
-                MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
-            }
+      if (!HasAGPRs)
+        return;
+
+      // Resolve the rest of AV operands to AGPRs.
+      if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
+        if (Src2->isReg() && Src2->getReg().isVirtual()) {
+          auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
+          if (TRI->isVectorSuperClass(RC)) {
+            auto *NewRC = TRI->getEquivalentAGPRClass(RC);
+            MRI.setRegClass(Src2->getReg(), NewRC);
+            if (Src2->isTied())
+              MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
           }
         }
+      }
     }
 
     return;
diff --git a/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll b/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
index a8bd0afe3ce16fd..a0a4f277db8c5b6 100644
--- a/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
+++ b/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
@@ -1,31 +1,31 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=MI300 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=gfx940 %s
 
 
 define protected amdgpu_kernel void @test(ptr addrspace(1) %in, ptr addrspace(1) %out) {
-; MI300-LABEL: test:
-; MI300:       ; %bb.0: ; %entry
-; MI300-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
-; MI300-NEXT:    v_mov_b32_e32 v0, 0
-; MI300-NEXT:    v_mov_b32_e32 v2, v0
-; MI300-NEXT:    v_mov_b32_e32 v3, v0
-; MI300-NEXT:    v_mov_b32_e32 v1, v0
-; MI300-NEXT:    s_waitcnt lgkmcnt(0)
-; MI300-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
-; MI300-NEXT:    v_mov_b64_e32 v[10:11], v[2:3]
-; MI300-NEXT:    v_mov_b64_e32 v[8:9], v[0:1]
-; MI300-NEXT:    s_waitcnt lgkmcnt(0)
-; MI300-NEXT:    v_mov_b32_e32 v12, s4
-; MI300-NEXT:    v_mov_b32_e32 v13, s5
-; MI300-NEXT:    v_mov_b32_e32 v4, s6
-; MI300-NEXT:    v_mov_b32_e32 v5, s7
-; MI300-NEXT:    v_mov_b32_e32 v6, s7
-; MI300-NEXT:    v_mov_b32_e32 v7, s7
-; MI300-NEXT:    s_nop 1
-; MI300-NEXT:    v_smfmac_i32_16x16x64_i8 v[8:11], v[12:13], v[4:7], v13
-; MI300-NEXT:    s_nop 6
-; MI300-NEXT:    global_store_dword v0, v11, s[2:3] offset:12 sc0 sc1
-; MI300-NEXT:    s_endpgm
+; gfx940-LABEL: test:
+; gfx940:       ; %bb.0: ; %entry
+; gfx940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; gfx940-NEXT:    v_mov_b32_e32 v0, 0
+; gfx940-NEXT:    v_mov_b32_e32 v2, v0
+; gfx940-NEXT:    v_mov_b32_e32 v3, v0
+; gfx940-NEXT:    v_mov_b32_e32 v1, v0
+; gfx940-NEXT:    s_waitcnt lgkmcnt(0)
+; gfx940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; gfx940-NEXT:    v_mov_b64_e32 v[10:11], v[2:3]
+; gfx940-NEXT:    v_mov_b64_e32 v[8:9], v[0:1]
+; gfx940-NEXT:    s_waitcnt lgkmcnt(0)
+; gfx940-NEXT:    v_mov_b32_e32 v12, s4
+; gfx940-NEXT:    v_mov_b32_e32 v13, s5
+; gfx940-NEXT:    v_mov_b32_e32 v4, s6
+; gfx940-NEXT:    v_mov_b32_e32 v5, s7
+; gfx940-NEXT:    v_mov_b32_e32 v6, s7
+; gfx940-NEXT:    v_mov_b32_e32 v7, s7
+; gfx940-NEXT:    s_nop 1
+; gfx940-NEXT:    v_smfmac_i32_16x16x64_i8 v[8:11], v[12:13], v[4:7], v13
+; gfx940-NEXT:    s_nop 6
+; gfx940-NEXT:    global_store_dword v0, v11, s[2:3] offset:12 sc0 sc1
+; gfx940-NEXT:    s_endpgm
 entry:
   %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %in, i64 0
   %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 1

>From 68ea90548f94de3a1e14312aa0ddf300931040e0 Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Mon, 23 Oct 2023 19:14:53 +0200
Subject: [PATCH 3/3] [AMDGPU] Force the third source operand of the MAI
 instructions to VGPR if no AGPRs are used. Formatting

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3ec704c2d3d981e..ff5d0e27277267b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -14152,9 +14152,9 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
       bool HasAGPRs = Info->mayNeedAGPRs();
       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
       int16_t Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
-      for (auto I : {AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
-                     AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
-                     Src2Idx}) {
+      for (auto I :
+           {AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
+            AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), Src2Idx}) {
         if (I == -1)
           break;
         if ((I == Src2Idx) && (HasAGPRs))



More information about the llvm-commits mailing list