[llvm] [X86] X86LowerTileCopy - Find dead register to use to prevent save-reload (PR #83628)
Phoebe Wang via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 20 02:33:10 PDT 2024
https://github.com/phoebewang updated https://github.com/llvm/llvm-project/pull/83628
>From edef42d5bf749d24176ed3b6072dd3a0acd40154 Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Fri, 1 Mar 2024 17:47:18 -0500
Subject: [PATCH 1/2] Find dead register to use to prevent save-reload
---
llvm/lib/Target/X86/X86LowerTileCopy.cpp | 63 ++++++++++++++++++------
1 file changed, 48 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/X86/X86LowerTileCopy.cpp b/llvm/lib/Target/X86/X86LowerTileCopy.cpp
index e7afc49240e547..95fb5c19920219 100644
--- a/llvm/lib/Target/X86/X86LowerTileCopy.cpp
+++ b/llvm/lib/Target/X86/X86LowerTileCopy.cpp
@@ -20,6 +20,7 @@
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -90,22 +91,52 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
unsigned Size = TRI->getSpillSize(X86::TILERegClass);
Align Alignment = TRI->getSpillAlign(X86::TILERegClass);
int TileSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
- // Allocate stack slot for stride register
- Size = TRI->getSpillSize(X86::GR64RegClass);
- Alignment = TRI->getSpillAlign(X86::GR64RegClass);
- int StrideSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
- // TODO: Pick a killed regiter to avoid save/reload. There is problem
+ int StrideSS = 0;
+
+ LiveRegUnits UsedRegs(*TRI);
+ UsedRegs.addLiveOuts(MBB);
+
+ auto InstUpToMI = MBB.end();
+ while (InstUpToMI != MI)
+ // The pre-decrement is on purpose here.
+ // We want to have the liveness right before I.
+ UsedRegs.stepBackward(*--InstUpToMI);
+
+ // Look for a temporary register to use.
+ BitVector GR64Regs =
+ TRI->getAllocatableSet(MF, TRI->getRegClass(X86::GR64RegClassID));
+ // Pick a killed regiter to avoid save/reload. There is problem
// to get live interval in this stage.
Register GR64Cand = X86::RAX;
+ // Find the first available-register that is available
+ bool found = false;
+ for (auto RegT : GR64Regs.set_bits()) {
+ if (UsedRegs.available(RegT)) {
+ GR64Cand = RegT;
+ break;
+ }
+ }
+
const DebugLoc &DL = MI.getDebugLoc();
- // mov %rax (%sp)
- BuildMI(MBB, MI, DL, TII->get(X86::IMPLICIT_DEF), GR64Cand);
- addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::MOV64mr)), StrideSS)
- .addReg(GR64Cand);
- // mov 64 %rax
- BuildMI(MBB, MI, DL, TII->get(X86::MOV64ri), GR64Cand).addImm(64);
+ if (found) {
+ // mov 64 %reg
+ BuildMI(MBB, MI, DL, TII->get(X86::MOV64ri), GR64Cand).addImm(64);
+ } else {
+ // Allocate stack slot for stride register
+ Size = TRI->getSpillSize(X86::GR64RegClass);
+ Alignment = TRI->getSpillAlign(X86::GR64RegClass);
+ StrideSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
+
+ // mov %reg (%sp)
+ BuildMI(MBB, MI, DL, TII->get(X86::IMPLICIT_DEF), GR64Cand);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::MOV64mr)),
+ StrideSS)
+ .addReg(GR64Cand);
+ // mov 64 %reg
+ BuildMI(MBB, MI, DL, TII->get(X86::MOV64ri), GR64Cand).addImm(64);
+ }
// tilestored %tmm, (%sp, %idx)
#define GET_EGPR_IF_ENABLED(OPC) (ST.hasEGPR() ? OPC##_EVEX : OPC)
unsigned Opc = GET_EGPR_IF_ENABLED(X86::TILESTORED);
@@ -120,10 +151,12 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
#undef GET_EGPR_IF_ENABLED
NewMI = addFrameReference(BuildMI(MBB, MI, DL, TII->get(Opc), DstReg),
TileSS);
- // restore %rax
- // mov (%sp) %rax
- addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm), GR64Cand),
- StrideSS);
+ if (!found) {
+ // restore %rax
+ // mov (%sp) %rax
+ addFrameReference(
+ BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm), GR64Cand), StrideSS);
+ }
MI.eraseFromParent();
Changed = true;
}
>From d1367d6523a82d61cb24c3b152280d70bee7848a Mon Sep 17 00:00:00 2001
From: Phoebe Wang <phoebe.wang at intel.com>
Date: Sat, 20 Apr 2024 17:05:20 +0800
Subject: [PATCH 2/2] Add missing `found = true;` and optimize a bit
---
llvm/lib/Target/X86/X86LowerTileCopy.cpp | 23 +++++++------------
.../CodeGen/X86/AMX/amx-lower-tile-copy.ll | 10 --------
2 files changed, 8 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/X86/X86LowerTileCopy.cpp b/llvm/lib/Target/X86/X86LowerTileCopy.cpp
index 95fb5c19920219..201cf559d7d51d 100644
--- a/llvm/lib/Target/X86/X86LowerTileCopy.cpp
+++ b/llvm/lib/Target/X86/X86LowerTileCopy.cpp
@@ -73,10 +73,16 @@ FunctionPass *llvm::createX86LowerTileCopyPass() {
bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
const X86InstrInfo *TII = ST.getInstrInfo();
+ const TargetRegisterInfo *TRI = ST.getRegisterInfo();
+ BitVector GR64Regs =
+ TRI->getAllocatableSet(MF, TRI->getRegClass(X86::GR64RegClassID));
bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
- for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+ LiveRegUnits UsedRegs(*TRI);
+ UsedRegs.addLiveOuts(MBB);
+ for (MachineInstr &MI : llvm::make_early_inc_range(reverse(MBB))) {
+ UsedRegs.stepBackward(MI);
if (!MI.isCopy())
continue;
MachineOperand &DstMO = MI.getOperand(0);
@@ -86,7 +92,6 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
if (!X86::TILERegClass.contains(DstReg, SrcReg))
continue;
- const TargetRegisterInfo *TRI = ST.getRegisterInfo();
// Allocate stack slot for tile register
unsigned Size = TRI->getSpillSize(X86::TILERegClass);
Align Alignment = TRI->getSpillAlign(X86::TILERegClass);
@@ -94,18 +99,6 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
int StrideSS = 0;
- LiveRegUnits UsedRegs(*TRI);
- UsedRegs.addLiveOuts(MBB);
-
- auto InstUpToMI = MBB.end();
- while (InstUpToMI != MI)
- // The pre-decrement is on purpose here.
- // We want to have the liveness right before I.
- UsedRegs.stepBackward(*--InstUpToMI);
-
- // Look for a temporary register to use.
- BitVector GR64Regs =
- TRI->getAllocatableSet(MF, TRI->getRegClass(X86::GR64RegClassID));
// Pick a killed regiter to avoid save/reload. There is problem
// to get live interval in this stage.
Register GR64Cand = X86::RAX;
@@ -115,6 +108,7 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
for (auto RegT : GR64Regs.set_bits()) {
if (UsedRegs.available(RegT)) {
GR64Cand = RegT;
+ found = true;
break;
}
}
@@ -130,7 +124,6 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
StrideSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
// mov %reg (%sp)
- BuildMI(MBB, MI, DL, TII->get(X86::IMPLICIT_DEF), GR64Cand);
addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::MOV64mr)),
StrideSS)
.addReg(GR64Cand);
diff --git a/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll b/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
index 4686361ad2fcfa..a0085afbaf0253 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
@@ -44,12 +44,8 @@ define dso_local void @test1(ptr%buf) nounwind {
; CHECK-NEXT: tileloadd 3024(%rsp,%rax), %tmm3 # 1024-byte Folded Reload
; CHECK-NEXT: tileloadd (%rbx,%r15), %tmm0
; CHECK-NEXT: tileloadd (%rbx,%r15), %tmm1
-; CHECK-NEXT: # implicit-def: $rax
-; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movabsq $64, %rax
; CHECK-NEXT: tilestored %tmm3, 1024(%rsp,%rax) # 1024-byte Folded Spill
; CHECK-NEXT: tileloadd {{[-0-9]+}}(%r{{[sb]}}p), %tmm2 # 1024-byte Folded Reload
-; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; CHECK-NEXT: tdpbssd %tmm1, %tmm0, %tmm2
; CHECK-NEXT: tilestored %tmm2, (%rbx,%r15)
; CHECK-NEXT: incl %r14d
@@ -111,16 +107,10 @@ define dso_local void @test1(ptr%buf) nounwind {
; EGPR-NEXT: # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7b,0x4b,0x9c,0x04,0xd0,0x0b,0x00,0x00]
; EGPR-NEXT: tileloadd (%rbx,%r15), %tmm0 # EVEX TO VEX Compression encoding: [0xc4,0xa2,0x7b,0x4b,0x04,0x3b]
; EGPR-NEXT: tileloadd (%rbx,%r15), %tmm1 # EVEX TO VEX Compression encoding: [0xc4,0xa2,0x7b,0x4b,0x0c,0x3b]
-; EGPR-NEXT: # implicit-def: $rax
-; EGPR-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; EGPR-NEXT: # encoding: [0x48,0x89,0x84,0x24,0xb8,0x03,0x00,0x00]
-; EGPR-NEXT: movabsq $64, %rax # encoding: [0x48,0xb8,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
; EGPR-NEXT: tilestored %tmm3, 1024(%rsp,%rax) # 1024-byte Folded Spill
; EGPR-NEXT: # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7a,0x4b,0x9c,0x04,0x00,0x04,0x00,0x00]
; EGPR-NEXT: tileloadd {{[-0-9]+}}(%r{{[sb]}}p), %tmm2 # 1024-byte Folded Reload
; EGPR-NEXT: # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7b,0x4b,0x94,0x24,0x00,0x04,0x00,0x00]
-; EGPR-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; EGPR-NEXT: # encoding: [0x48,0x8b,0x84,0x24,0xb8,0x03,0x00,0x00]
; EGPR-NEXT: tdpbssd %tmm1, %tmm0, %tmm2 # encoding: [0xc4,0xe2,0x73,0x5e,0xd0]
; EGPR-NEXT: tilestored %tmm2, (%rbx,%r15) # EVEX TO VEX Compression encoding: [0xc4,0xa2,0x7a,0x4b,0x14,0x3b]
; EGPR-NEXT: incl %r14d # encoding: [0x41,0xff,0xc6]
More information about the llvm-commits
mailing list