[llvm] 2877b87 - [AMDGPU] Lower VGPR to physical SGPR COPY to S_MOV_B32 if VGPR contains the compile time constant

Alexander Timofeev via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 15 15:38:21 PST 2022


Author: Alexander Timofeev
Date: 2022-12-16T00:38:10+01:00
New Revision: 2877b876666079b3af9714692790fd5e1b5f038c

URL: https://github.com/llvm/llvm-project/commit/2877b876666079b3af9714692790fd5e1b5f038c
DIFF: https://github.com/llvm/llvm-project/commit/2877b876666079b3af9714692790fd5e1b5f038c.diff

LOG: [AMDGPU] Lower VGPR to physical SGPR COPY to S_MOV_B32 if VGPR contains the compile time constant

Sometimes we have a constant value loaded to VGPR. In case we further
need to rematrerialize it in the physical scalar register we may avoid VGPR to
SGPR copy replacing it with S_MOV_B32.

Reviewed By: JonChesterfield, arsenm

Differential Revision: https://reviews.llvm.org/D139874

Added: 
    llvm/test/CodeGen/AMDGPU/vgpr_constant64_to_sgpr.mir
    llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
    llvm/lib/Target/AMDGPU/SIInstructions.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index f38b5d6fa3cf4..f0dfa5f8827d1 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -148,7 +148,7 @@ class SIFixSGPRCopies : public MachineFunctionPass {
   // 1. Physical register
   // 2. AGPR
   // 3. Defined by the instruction the merely moves the immediate
-  bool lowerSpecialCase(MachineInstr &MI);
+  bool lowerSpecialCase(MachineInstr &MI, MachineBasicBlock::iterator &I);
 
   void processPHINode(MachineInstr &MI);
 
@@ -638,7 +638,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
         }
         if (!isVGPRToSGPRCopy(SrcRC, DstRC, *TRI))
           continue;
-        if (lowerSpecialCase(MI))
+        if (lowerSpecialCase(MI, I))
           continue;
 
         analyzeVGPRToSGPRCopy(&MI);
@@ -829,7 +829,8 @@ void SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
   }
 }
 
-bool SIFixSGPRCopies::lowerSpecialCase(MachineInstr &MI) {
+bool SIFixSGPRCopies::lowerSpecialCase(MachineInstr &MI,
+                                       MachineBasicBlock::iterator &I) {
   Register DstReg = MI.getOperand(0).getReg();
   Register SrcReg = MI.getOperand(1).getReg();
   if (!DstReg.isVirtual()) {
@@ -845,6 +846,25 @@ bool SIFixSGPRCopies::lowerSpecialCase(MachineInstr &MI) {
               TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)
           .add(MI.getOperand(1));
       MI.getOperand(1).setReg(TmpReg);
+    } else {
+      MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
+      if (DefMI && DefMI->isMoveImmediate()) {
+        MachineOperand SrcConst = DefMI->getOperand(AMDGPU::getNamedOperandIdx(
+            DefMI->getOpcode(), AMDGPU::OpName::src0));
+        if (!SrcConst.isReg()) {
+          const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
+          unsigned MoveSize = TRI->getRegSizeInBits(*SrcRC);
+          unsigned MoveOp =
+              MoveSize == 64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
+          BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(MoveOp),
+                  DstReg)
+              .add(SrcConst);
+          I = std::next(I);
+          if (MRI->hasOneUse(SrcReg))
+            DefMI->eraseFromParent();
+          MI.eraseFromParent();
+        }
+      }
     }
     return true;
   }

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 723a38b2ecc69..24384aeea21ff 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -122,6 +122,7 @@ def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
   let isMoveImm = 1;
   let SchedRW = [Write64Bit];
   let Size = 16; // Needs maximum 2 v_mov_b32 instructions 8 byte long each.
+  let UseNamedOperandTable = 1;
 }
 
 // 64-bit vector move with dpp. Expanded post-RA.

diff  --git a/llvm/test/CodeGen/AMDGPU/vgpr_constant64_to_sgpr.mir b/llvm/test/CodeGen/AMDGPU/vgpr_constant64_to_sgpr.mir
new file mode 100644
index 0000000000000..4729d9d8e4648
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/vgpr_constant64_to_sgpr.mir
@@ -0,0 +1,18 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa --global-isel=0 --run-pass=si-fix-sgpr-copies -verify-machineinstrs -o - %s | FileCheck --check-prefix=GCN %s
+
+---
+name:            test_64imm
+registers:
+  - { id: 1, class: vreg_64_align2 }
+liveins:
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' }
+body:             |
+  bb.0:
+
+    ; GCN-LABEL: name: test_64imm
+    ; GCN: $sgpr8_sgpr9 = S_MOV_B64 4607182418800017408
+    %1 = V_MOV_B64_PSEUDO 4607182418800017408, implicit $exec
+    $sgpr8_sgpr9 = COPY %1
+...
+

diff  --git a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
new file mode 100644
index 0000000000000..09106f0da591a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mcpu=gfx1030 < %s | FileCheck %s
+
+target triple = "amdgcn-amd-amdhsa"
+
+; Unknown functions are conservatively passed all implicit parameters
+declare void @unknown_call()
+; Use the same constant as a sgpr parameter (for the kernel id) and for a vector operation
+define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id !0 {
+; CHECK-LABEL: kern:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_add_u32 s12, s12, s17
+; CHECK-NEXT:    s_addc_u32 s13, s13, 0
+; CHECK-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
+; CHECK-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
+; CHECK-NEXT:    s_add_u32 s0, s0, s17
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_writelane_b32 v40, s16, 0
+; CHECK-NEXT:    s_mov_b32 s13, s15
+; CHECK-NEXT:    s_mov_b32 s12, s14
+; CHECK-NEXT:    v_readlane_b32 s14, v40, 0
+; CHECK-NEXT:    s_mov_b64 s[16:17], s[8:9]
+; CHECK-NEXT:    s_load_dwordx2 s[8:9], s[16:17], 0x0
+; CHECK-NEXT:    v_mov_b32_e32 v5, 42
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_mov_b32_e32 v3, s8
+; CHECK-NEXT:    v_mov_b32_e32 v4, s9
+; CHECK-NEXT:    flat_store_dword v[3:4], v5
+; CHECK-NEXT:    s_mov_b64 s[18:19], 8
+; CHECK-NEXT:    s_mov_b32 s8, s16
+; CHECK-NEXT:    s_mov_b32 s9, s17
+; CHECK-NEXT:    s_mov_b32 s16, s18
+; CHECK-NEXT:    s_mov_b32 s15, s19
+; CHECK-NEXT:    s_add_u32 s8, s8, s16
+; CHECK-NEXT:    s_addc_u32 s15, s9, s15
+; CHECK-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9
+; CHECK-NEXT:    s_mov_b32 s9, s15
+; CHECK-NEXT:    s_getpc_b64 s[16:17]
+; CHECK-NEXT:    s_add_u32 s16, s16, unknown_call at gotpcrel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s17, s17, unknown_call at gotpcrel32@hi+12
+; CHECK-NEXT:    s_load_dwordx2 s[16:17], s[16:17], 0x0
+; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1]
+; CHECK-NEXT:    s_mov_b32 s15, 20
+; CHECK-NEXT:    v_lshlrev_b32_e64 v2, s15, v2
+; CHECK-NEXT:    s_mov_b32 s15, 10
+; CHECK-NEXT:    v_lshlrev_b32_e64 v1, s15, v1
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s15, 42
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT:    s_endpgm
+  store i32 42, ptr %addr
+  call fastcc void @unknown_call()
+  ret void
+}
+
+!0 = !{i32 42}


        


More information about the llvm-commits mailing list