[llvm] 496156a - [X86][AMX] Multiple configure for AMX register.
via llvm-commits
llvm-commits at lists.llvm.org
Mon May 23 22:44:25 PDT 2022
Author: Luo, Yuanke
Date: 2022-05-24T13:18:42+08:00
New Revision: 496156ac57da3abd9c8a6dc422852b7bdfaa448f
URL: https://github.com/llvm/llvm-project/commit/496156ac57da3abd9c8a6dc422852b7bdfaa448f
DIFF: https://github.com/llvm/llvm-project/commit/496156ac57da3abd9c8a6dc422852b7bdfaa448f.diff
LOG: [X86][AMX] Multiple configure for AMX register.
The previous solution depends on variable name to record the shape
information. However it is not reliable, because in release build
compiler would not set the variable name. It can be accomplished with an
additional option `fno-discard-value-names`, but it is not acceptable
for users.
This patch is to preconfigure the tile register with machine
instruction. It follow the same way what sigle configure does. In the
future we can fall back to multiple configure when single configure
fails due to the shape dependency issue.
The algorithm to configure the tile register is simple in the patch. We
may improve it in the future. It configure tile register based on basic
block. Compiler would spill the tile register if it live out the basic
block. After the configure there should be no spill across tile
confgiure in the register alloction. Just like fast register allocation
the algorithm walk the instruction in reverse order. When the shape
dependency doesn't meet, it insert ldtilecfg after the last instruction
that define the shape.
In post configuration compiler also walk the basic block to collect the
physical tile register number and generate instruction to fill the stack
slot for the correponding shape information.
TODO: There is some following work in D125602. The risk is modifying the
fast RA may cause regression as fast RA is usded for different targets.
We may create an independent RA for tile register.
Differential Revision: https://reviews.llvm.org/D125075
Added:
llvm/lib/Target/X86/X86FastPreTileConfig.cpp
llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi.mir
llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi2.mir
llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi4.mir
llvm/test/CodeGen/X86/AMX/amx-fastconfig-spill.mir
llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
llvm/test/CodeGen/X86/AMX/amx-fastpreconfig.mir
Modified:
llvm/lib/Target/X86/CMakeLists.txt
llvm/lib/Target/X86/X86.h
llvm/lib/Target/X86/X86FastTileConfig.cpp
llvm/lib/Target/X86/X86InstrAMX.td
llvm/lib/Target/X86/X86TargetMachine.cpp
llvm/test/CodeGen/X86/AMX/amx-across-func.ll
llvm/test/CodeGen/X86/AMX/amx-configO0toO0.ll
llvm/test/CodeGen/X86/AMX/amx-configO2toO0.ll
llvm/test/CodeGen/X86/AMX/amx-zero-config.ll
llvm/test/CodeGen/X86/O0-pipeline.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 5a1d04e2d8356..fadd272049f41 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -38,6 +38,7 @@ set(sources
X86PreAMXConfig.cpp
X86LowerAMXIntrinsics.cpp
X86TileConfig.cpp
+ X86FastPreTileConfig.cpp
X86FastTileConfig.cpp
X86PreTileConfig.cpp
X86ExpandPseudo.cpp
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index 10e1c5d6ed38e..7344900f2e312 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -79,6 +79,9 @@ FunctionPass *createX86DynAllocaExpander();
/// Return a pass that config the tile registers.
FunctionPass *createX86TileConfigPass();
+/// Return a pass that preconfig the tile registers before fast reg allocation.
+FunctionPass *createX86FastPreTileConfigPass();
+
/// Return a pass that config the tile registers after fast reg allocation.
FunctionPass *createX86FastTileConfigPass();
@@ -175,6 +178,7 @@ void initializeX86PartialReductionPass(PassRegistry &);
void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &);
void initializeX86PreTileConfigPass(PassRegistry &);
+void initializeX86FastPreTileConfigPass(PassRegistry &);
void initializeX86FastTileConfigPass(PassRegistry &);
void initializeX86TileConfigPass(PassRegistry &);
void initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &);
diff --git a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
new file mode 100644
index 0000000000000..08ccfeda5a4ab
--- /dev/null
+++ b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
@@ -0,0 +1,697 @@
+//===-- X86FastPreTileConfig.cpp - Fast Tile Register Configure------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Pass to preconfig the shape of physical tile registers
+/// It inserts ldtilecfg ahead of each group of tile registers. The algorithm
+/// walk each instruction of basic block in reverse order. All the tile
+/// registers that live out the basic block would be spilled and reloaded
+/// before its user. It also check the depenedency of the shape to ensure
+/// the shape is defined before ldtilecfg.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86RegisterInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "fastpretileconfig"
+
+STATISTIC(NumStores, "Number of stores added");
+STATISTIC(NumLoads, "Number of loads added");
+
+namespace {
+
+class X86FastPreTileConfig : public MachineFunctionPass {
+ MachineFunction *MF = nullptr;
+ const X86Subtarget *ST = nullptr;
+ const TargetInstrInfo *TII = nullptr;
+ MachineRegisterInfo *MRI = nullptr;
+ X86MachineFunctionInfo *X86FI = nullptr;
+ MachineFrameInfo *MFI = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ MachineBasicBlock *MBB = nullptr;
+ int CfgSS = -1;
+ struct PHIInfo {
+ Register Row;
+ Register Col;
+ Register StackAddr;
+ };
+ DenseMap<MachineInstr *, struct PHIInfo> VisitedPHIs;
+
+ /// Maps virtual regs to the frame index where these values are spilled.
+ IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
+
+ /// Has a bit set for tile virtual register for which it was determined
+ /// that it is alive across blocks.
+ BitVector MayLiveAcrossBlocks;
+
+ int getStackSpaceFor(Register VirtReg);
+ void InitializeTileConfigStackSpace();
+ bool mayLiveOut(Register VirtReg, MachineInstr *CfgMI);
+ void spill(MachineBasicBlock::iterator Before, Register VirtReg, bool Kill);
+ void reload(MachineBasicBlock::iterator UseMI, Register VirtReg,
+ MachineOperand *RowMO, MachineOperand *ColMO);
+ void canonicalizePHIs(MachineBasicBlock &MBB);
+ void convertPHI(MachineBasicBlock *MBB, MachineInstr &PHI);
+ void convertPHIs(MachineBasicBlock &MBB);
+ bool configBasicBlock(MachineBasicBlock &MBB);
+
+public:
+ X86FastPreTileConfig() : MachineFunctionPass(ID), StackSlotForVirtReg(-1) {}
+
+ /// Return the pass name.
+ StringRef getPassName() const override {
+ return "Fast Tile Register Preconfigure";
+ }
+
+ /// Perform tile register configure.
+ bool runOnMachineFunction(MachineFunction &MFunc) override;
+
+ static char ID;
+};
+
+} // end anonymous namespace
+
+char X86FastPreTileConfig::ID = 0;
+
+INITIALIZE_PASS_BEGIN(X86FastPreTileConfig, DEBUG_TYPE,
+ "Fast Tile Register Preconfigure", false, false)
+INITIALIZE_PASS_END(X86FastPreTileConfig, DEBUG_TYPE,
+ "Fast Tile Register Preconfigure", false, false)
+
+static bool dominates(MachineBasicBlock &MBB,
+ MachineBasicBlock::const_iterator A,
+ MachineBasicBlock::const_iterator B) {
+ auto MBBEnd = MBB.end();
+ if (B == MBBEnd)
+ return true;
+
+ MachineBasicBlock::const_iterator I = MBB.begin();
+ for (; &*I != A && &*I != B; ++I)
+ ;
+
+ return &*I == A;
+}
+
+/// This allocates space for the specified virtual register to be held on the
+/// stack.
+int X86FastPreTileConfig::getStackSpaceFor(Register VirtReg) {
+ // Find the location Reg would belong...
+ int SS = StackSlotForVirtReg[VirtReg];
+ // Already has space allocated?
+ if (SS != -1)
+ return SS;
+
+ // Allocate a new stack object for this spill location...
+ const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+ unsigned Size = TRI->getSpillSize(RC);
+ Align Alignment = TRI->getSpillAlign(RC);
+ int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
+
+ // Assign the slot.
+ StackSlotForVirtReg[VirtReg] = FrameIdx;
+ return FrameIdx;
+}
+
+/// Returns false if \p VirtReg is known to not live out of the current config.
+/// If \p VirtReg live out of the current MBB, it must live out of the current
+/// config
+bool X86FastPreTileConfig::mayLiveOut(Register VirtReg, MachineInstr *CfgMI) {
+ if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg)))
+ return true;
+
+ for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
+ if (UseInst.getParent() != MBB) {
+ MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
+ return true;
+ }
+
+ // The use and def are in the same MBB. If the tile register is
+ // reconfigured, it is crobbered and we need to spill and reload
+ // tile register.
+ if (CfgMI) {
+ if (dominates(*MBB, *CfgMI, UseInst)) {
+ MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+void X86FastPreTileConfig::InitializeTileConfigStackSpace() {
+ MachineBasicBlock &MBB = MF->front();
+ MachineInstr *MI = &*MBB.getFirstNonPHI();
+ DebugLoc DL;
+ if (ST->hasAVX512()) {
+ Register Zmm = MRI->createVirtualRegister(&X86::VR512RegClass);
+ BuildMI(MBB, MI, DL, TII->get(X86::AVX512_512_SET0), Zmm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::VMOVUPSZmr)), CfgSS)
+ .addReg(Zmm);
+ } else if (ST->hasAVX2()) {
+ Register Ymm = MRI->createVirtualRegister(&X86::VR256RegClass);
+ BuildMI(MBB, MI, DL, TII->get(X86::AVX_SET0), Ymm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::VMOVUPSYmr)), CfgSS)
+ .addReg(Ymm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::VMOVUPSYmr)), CfgSS,
+ 32)
+ .addReg(Ymm);
+ } else {
+ assert(ST->hasSSE2() && "AMX should assume SSE2 enabled");
+ unsigned StoreOpc = ST->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;
+ Register Xmm = MRI->createVirtualRegister(&X86::VR128RegClass);
+ BuildMI(MBB, MI, DL, TII->get(X86::V_SET0), Xmm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), CfgSS)
+ .addReg(Xmm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), CfgSS, 16)
+ .addReg(Xmm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), CfgSS, 32)
+ .addReg(Xmm);
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), CfgSS, 48)
+ .addReg(Xmm);
+ }
+ // Fill in the palette first.
+ addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::MOV8mi)), CfgSS)
+ .addImm(1);
+}
+
+/// Insert spill instruction for \p AssignedReg before \p Before.
+/// TODO: Update DBG_VALUEs with \p VirtReg operands with the stack slot.
+void X86FastPreTileConfig::spill(MachineBasicBlock::iterator Before,
+ Register VirtReg, bool Kill) {
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) << " \n");
+ int FI = getStackSpaceFor(VirtReg);
+ LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
+
+ const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+ // Don't need shape information for tile store, becasue it is adjacent to
+ // the tile def instruction.
+ TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, TRI);
+ ++NumStores;
+
+ // TODO: update DBG_VALUEs
+}
+
+/// Insert reload instruction for \p PhysReg before \p Before.
+void X86FastPreTileConfig::reload(MachineBasicBlock::iterator UseMI,
+ Register OrigReg, MachineOperand *RowMO,
+ MachineOperand *ColMO) {
+ int FI = getStackSpaceFor(OrigReg);
+ const TargetRegisterClass &RC = *MRI->getRegClass(OrigReg);
+ Register TileReg;
+ // Fold copy to tileload
+ // BB1:
+ // spill src to s
+ //
+ // BB2:
+ // t = copy src
+ // -->
+ // t = tileload (s)
+ if (UseMI->isCopy())
+ TileReg = UseMI->getOperand(0).getReg();
+ else
+ TileReg = MRI->createVirtualRegister(&RC);
+ // Can't use TII->loadRegFromStackSlot(), because we need the shape
+ // information for reload.
+ // tileloadd (%sp, %idx), %tmm
+ unsigned Opc = X86::PTILELOADDV;
+ Register StrideReg = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
+ // FIXME: MBB is not the parent of UseMI.
+ MachineInstr *NewMI = BuildMI(*UseMI->getParent(), UseMI, DebugLoc(),
+ TII->get(X86::MOV64ri), StrideReg)
+ .addImm(64);
+ NewMI = addFrameReference(
+ BuildMI(*UseMI->getParent(), UseMI, DebugLoc(), TII->get(Opc), TileReg)
+ .addReg(RowMO->getReg())
+ .addReg(ColMO->getReg()),
+ FI);
+ MachineOperand &MO = NewMI->getOperand(5);
+ MO.setReg(StrideReg);
+ MO.setIsKill(true);
+ RowMO->setIsKill(false);
+ ColMO->setIsKill(false);
+ // Erase copy instruction after it is folded.
+ if (UseMI->isCopy()) {
+ UseMI->eraseFromParent();
+ } else {
+ // Replace the register in the user MI.
+ for (auto &MO : UseMI->operands()) {
+ if (MO.isReg() && MO.getReg() == OrigReg)
+ MO.setReg(TileReg);
+ }
+ }
+
+ ++NumLoads;
+ LLVM_DEBUG(dbgs() << "Reloading " << printReg(OrigReg, TRI) << " into "
+ << printReg(TileReg, TRI) << '\n');
+}
+
+static bool isTileDef(MachineRegisterInfo *MRI, MachineInstr &MI) {
+ // The instruction must have 3 operands: tile def, row, col.
+ if (MI.isDebugInstr() || MI.getNumOperands() < 3 || !MI.isPseudo())
+ return false;
+ MachineOperand &MO = MI.getOperand(0);
+
+ if (MO.isReg()) {
+ Register Reg = MO.getReg();
+ // FIXME it may be used after Greedy RA and the physical
+ // register is not rewritten yet.
+ if (Reg.isVirtual() &&
+ MRI->getRegClass(Reg)->getID() == X86::TILERegClassID)
+ return true;
+ if (Reg >= X86::TMM0 && Reg <= X86::TMM7)
+ return true;
+ }
+
+ return false;
+}
+
+static ShapeT getShape(MachineRegisterInfo *MRI, Register TileReg) {
+ MachineInstr *MI = MRI->getVRegDef(TileReg);
+ if (isTileDef(MRI, *MI)) {
+ MachineOperand *RowMO = &MI->getOperand(1);
+ MachineOperand *ColMO = &MI->getOperand(2);
+ return ShapeT(RowMO, ColMO, MRI);
+ } else if (MI->isCopy()) {
+ TileReg = MI->getOperand(1).getReg();
+ return getShape(MRI, TileReg);
+ }
+
+ // The def should not be PHI node, because we walk the MBB in reverse post
+ // order.
+ assert(MI->isPHI() && "Unexpected PHI when get shape.");
+ llvm_unreachable("Unexpected MI when get shape.");
+}
+
+// BB0:
+// spill t0 to s0
+// BB1:
+// spill t1 to s1
+//
+// BB2:
+// t = phi [t0, bb0] [t1, bb1]
+// -->
+// row = phi [r0, bb0] [r1, bb1]
+// col = phi [c0, bb0] [c1, bb1]
+// s = phi [s0, bb0] [s1, bb1]
+// t = tileload row, col, s
+// The new instruction is inserted at the end of the phi node. The order
+// of the original phi node is not ensured.
+void X86FastPreTileConfig::convertPHI(MachineBasicBlock *MBB,
+ MachineInstr &PHI) {
+ // 1. Create instruction to get stack slot address of each incoming block.
+ // 2. Create PHI node for the stack address.
+ // 3. Create PHI node for shape. If one of the incoming shape is immediate
+ // use the immediate and delete the PHI node.
+ // 4. Create tileload instruction from the stack address.
+ Register StackAddrReg = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
+ MachineInstrBuilder AddrPHI = BuildMI(*MBB, ++PHI.getIterator(), DebugLoc(),
+ TII->get(X86::PHI), StackAddrReg);
+ Register RowReg = MRI->createVirtualRegister(&X86::GR16RegClass);
+ MachineInstrBuilder RowPHI = BuildMI(*MBB, ++PHI.getIterator(), DebugLoc(),
+ TII->get(X86::PHI), RowReg);
+ Register ColReg = MRI->createVirtualRegister(&X86::GR16RegClass);
+ MachineInstrBuilder ColPHI = BuildMI(*MBB, ++PHI.getIterator(), DebugLoc(),
+ TII->get(X86::PHI), ColReg);
+ // Record the mapping of phi node and its row/column information.
+ VisitedPHIs[&PHI] = {RowReg, ColReg, StackAddrReg};
+
+ for (unsigned I = 1, E = PHI.getNumOperands(); I != E; I += 2) {
+ // Get the 2 incoming value of tile register and MBB.
+ Register InTileReg = PHI.getOperand(I).getReg();
+ // Mark it as liveout, so that it will be spilled when visit
+ // the incoming MBB. Otherwise since phi will be deleted, it
+ // would miss spill when visit incoming MBB.
+ MayLiveAcrossBlocks.set(Register::virtReg2Index(InTileReg));
+ MachineBasicBlock *InMBB = PHI.getOperand(I + 1).getMBB();
+
+ MachineInstr *TileDefMI = MRI->getVRegDef(InTileReg);
+ MachineBasicBlock::iterator InsertPos;
+ if (TileDefMI->isPHI()) {
+ InsertPos = TileDefMI->getParent()->getFirstNonPHI();
+ if (VisitedPHIs.count(TileDefMI)) { // circular phi reference
+ // def t1
+ // / \
+ // def t2 t3 = phi(t1, t4) <--
+ // \ / |
+ // t4 = phi(t2, t3)-------------
+ //
+ // For each (row, column and stack address) append phi incoming value.
+ // Create r3 = phi(r1, r4)
+ // Create r4 = phi(r2, r3)
+ Register InRowReg = VisitedPHIs[TileDefMI].Row;
+ Register InColReg = VisitedPHIs[TileDefMI].Col;
+ Register InStackAddrReg = VisitedPHIs[TileDefMI].StackAddr;
+ RowPHI.addReg(InRowReg).addMBB(InMBB);
+ ColPHI.addReg(InColReg).addMBB(InMBB);
+ AddrPHI.addReg(InStackAddrReg).addMBB(InMBB);
+ continue;
+ } else {
+ // Recursively convert PHI to tileload
+ convertPHI(TileDefMI->getParent(), *TileDefMI);
+ // The PHI node is coverted to tileload instruction. Get the stack
+ // address from tileload operands.
+ MachineInstr *TileLoad = MRI->getVRegDef(InTileReg);
+ assert(TileLoad->getOpcode() == X86::PTILELOADDV);
+ Register InRowReg = TileLoad->getOperand(1).getReg();
+ Register InColReg = TileLoad->getOperand(2).getReg();
+ Register InStackAddrReg = TileLoad->getOperand(3).getReg();
+ RowPHI.addReg(InRowReg).addMBB(InMBB);
+ ColPHI.addReg(InColReg).addMBB(InMBB);
+ AddrPHI.addReg(InStackAddrReg).addMBB(InMBB);
+ }
+ } else {
+ InsertPos = TileDefMI->getIterator();
+
+ // Fill the incoming operand of row/column phi instruction.
+ ShapeT Shape = getShape(MRI, InTileReg);
+ Shape.getRow()->setIsKill(false);
+ Shape.getCol()->setIsKill(false);
+ RowPHI.addReg(Shape.getRow()->getReg()).addMBB(InMBB);
+ ColPHI.addReg(Shape.getCol()->getReg()).addMBB(InMBB);
+
+ // The incoming tile register live out of its def BB, it would be spilled.
+ // Create MI to get the spill stack slot address for the tile register
+ int FI = getStackSpaceFor(InTileReg);
+ Register InStackAddrReg =
+ MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
+ addOffset(BuildMI(*TileDefMI->getParent(), InsertPos, DebugLoc(),
+ TII->get(X86::LEA64r), InStackAddrReg)
+ .addFrameIndex(FI),
+ 0);
+ AddrPHI.addReg(InStackAddrReg).addMBB(InMBB);
+ }
+ }
+
+ MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI();
+ Register StrideReg = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
+ BuildMI(*MBB, InsertPos, DebugLoc(), TII->get(X86::MOV64ri), StrideReg)
+ .addImm(64);
+ Register TileReg = PHI.getOperand(0).getReg();
+ MachineInstr *NewMI = addDirectMem(
+ BuildMI(*MBB, InsertPos, DebugLoc(), TII->get(X86::PTILELOADDV), TileReg)
+ .addReg(RowReg)
+ .addReg(ColReg),
+ StackAddrReg);
+ MachineOperand &MO = NewMI->getOperand(5);
+ MO.setReg(StrideReg);
+ MO.setIsKill(true);
+ PHI.eraseFromParent();
+ VisitedPHIs.erase(&PHI);
+}
+
+static bool isTileRegDef(MachineRegisterInfo *MRI, MachineInstr &MI) {
+ MachineOperand &MO = MI.getOperand(0);
+ if (MO.isReg() && MO.getReg().isVirtual() &&
+ MRI->getRegClass(MO.getReg())->getID() == X86::TILERegClassID)
+ return true;
+ return false;
+}
+
+void X86FastPreTileConfig::canonicalizePHIs(MachineBasicBlock &MBB) {
+ SmallVector<MachineInstr *, 8> PHIs;
+
+ for (MachineInstr &MI : MBB) {
+ if (!MI.isPHI())
+ break;
+ if (!isTileRegDef(MRI, MI))
+ continue;
+ PHIs.push_back(&MI);
+ }
+ // Canonicalize the phi node first. One tile phi may depeneds previous
+ // phi node. For below case, we need convert %t4.
+ //
+ // BB0:
+ // %t3 = phi (t1 BB1, t2 BB0)
+ // %t4 = phi (t5 BB1, t3 BB0)
+ // -->
+ // %t3 = phi (t1 BB1, t2 BB0)
+ // %t4 = phi (t5 BB1, t2 BB0)
+ //
+ while (!PHIs.empty()) {
+ MachineInstr *PHI = PHIs.pop_back_val();
+
+ // Find the operand that is incoming from the same MBB and the def
+ // is also phi node.
+ MachineOperand *InMO = nullptr;
+ MachineInstr *DefMI = nullptr;
+ for (unsigned I = 1, E = PHI->getNumOperands(); I != E; I += 2) {
+ Register InTileReg = PHI->getOperand(I).getReg();
+ MachineBasicBlock *InMBB = PHI->getOperand(I + 1).getMBB();
+ DefMI = MRI->getVRegDef(InTileReg);
+ if (InMBB != &MBB || !DefMI->isPHI())
+ continue;
+
+ InMO = &PHI->getOperand(I);
+ break;
+ }
+ // If can't find such operand, do nothing.
+ if (!InMO)
+ continue;
+
+ // Current phi node depends on previous phi node. Break the
+ // dependency.
+ Register DefTileReg;
+ for (unsigned I = 1, E = DefMI->getNumOperands(); I != E; I += 2) {
+ MachineBasicBlock *InMBB = PHI->getOperand(I + 1).getMBB();
+ if (InMBB != &MBB)
+ continue;
+ DefTileReg = DefMI->getOperand(I).getReg();
+ InMO->setReg(DefTileReg);
+ break;
+ }
+ }
+}
+
+void X86FastPreTileConfig::convertPHIs(MachineBasicBlock &MBB) {
+ SmallVector<MachineInstr *, 8> PHIs;
+ for (MachineInstr &MI : MBB) {
+ if (!MI.isPHI())
+ break;
+ if (!isTileRegDef(MRI, MI))
+ continue;
+ PHIs.push_back(&MI);
+ }
+ while (!PHIs.empty()) {
+ MachineInstr *MI = PHIs.pop_back_val();
+ VisitedPHIs.clear();
+ convertPHI(&MBB, *MI);
+ }
+}
+
+// PreTileConfig should configure the tile registers based on basic
+// block.
+bool X86FastPreTileConfig::configBasicBlock(MachineBasicBlock &MBB) {
+ this->MBB = &MBB;
+ bool Change = false;
+ MachineInstr *LastShapeMI = nullptr;
+ MachineInstr *LastTileCfg = nullptr;
+ bool HasUnconfigTile = false;
+
+ auto Config = [&](MachineInstr &Before) {
+ if (CfgSS == -1)
+ CfgSS = MFI->CreateStackObject(ST->getTileConfigSize(),
+ ST->getTileConfigAlignment(), false);
+ LastTileCfg = addFrameReference(
+ BuildMI(MBB, Before, DebugLoc(), TII->get(X86::LDTILECFG)), CfgSS);
+ LastShapeMI = nullptr;
+ Change = true;
+ };
+ auto HasTileOperand = [](MachineRegisterInfo *MRI, MachineInstr &MI) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg())
+ continue;
+ Register Reg = MO.getReg();
+ if (Reg.isVirtual() &&
+ MRI->getRegClass(Reg)->getID() == X86::TILERegClassID)
+ return true;
+ }
+ return false;
+ };
+ for (MachineInstr &MI : reverse(MBB)) {
+ // We have transformed phi node before configuring BB.
+ if (MI.isPHI())
+ break;
+ // Don't collect the shape of used tile, the tile should be defined
+ // before the tile use. Spill and reload would happen if there is only
+ // tile use after ldtilecfg, so the shape can be collected from reload.
+ // Take below code for example. %t would be reloaded before tilestore
+ // call
+ // ....
+ // tilestore %r, %c, %t
+ // -->
+ // call
+ // ldtilecfg
+ // %t = tileload %r, %c
+ // tilestore %r, %c, %t
+ if (HasTileOperand(MRI, MI))
+ HasUnconfigTile = true;
+ // According to AMX ABI, all the tile registers including config register
+ // are volatile. Caller need to save/restore config register.
+ if (MI.isCall() && HasUnconfigTile) {
+ MachineBasicBlock::iterator I;
+ if (LastShapeMI && dominates(MBB, MI, LastShapeMI))
+ I = ++LastShapeMI->getIterator();
+ else
+ I = ++MI.getIterator();
+ Config(*I);
+ HasUnconfigTile = false;
+ continue;
+ }
+ if (!isTileDef(MRI, MI))
+ continue;
+ //
+ //---------------------------------------------------------------------
+ // Don't handle COPY instruction. If the src and dst of the COPY can be
+ // in the same config in below case, we just check the shape of t0.
+ // def row0
+ // def col0
+ // ldtilecfg
+ // t0 = tielzero(row0, col0)
+ // t1 = copy t0
+ // ...
+ // If the src and dst of the COPY can NOT be in the same config in below
+ // case. Reload would be generated befor the copy instruction.
+ // def row0
+ // def col0
+ // t0 = tielzero(row0, col0)
+ // spill t0
+ // ...
+ // def row1
+ // def col1
+ // ldtilecfg
+ // t1 = tilezero(row1, col1)
+ // reload t0
+ // t1 = copy t0
+ //---------------------------------------------------------------------
+ //
+ // If MI dominate the last shape def instruction, we need insert
+ // ldtilecfg after LastShapeMI now. The config doesn't include
+ // current MI.
+ // def row0
+ // def col0
+ // tilezero(row0, col0) <- MI
+ // def row1
+ // def col1
+ // ldtilecfg <- insert
+ // tilezero(row1, col1)
+ if (LastShapeMI && dominates(MBB, MI, LastShapeMI))
+ Config(*(++LastShapeMI->getIterator()));
+ MachineOperand *RowMO = &MI.getOperand(1);
+ MachineOperand *ColMO = &MI.getOperand(2);
+ MachineInstr *RowMI = MRI->getVRegDef(RowMO->getReg());
+ MachineInstr *ColMI = MRI->getVRegDef(ColMO->getReg());
+ // If the shape is defined in current MBB, check the domination.
+ // FIXME how about loop?
+ if (RowMI->getParent() == &MBB) {
+ if (!LastShapeMI)
+ LastShapeMI = RowMI;
+ else if (dominates(MBB, LastShapeMI, RowMI))
+ LastShapeMI = RowMI;
+ }
+ if (ColMI->getParent() == &MBB) {
+ if (!LastShapeMI)
+ LastShapeMI = ColMI;
+ else if (dominates(MBB, LastShapeMI, ColMI))
+ LastShapeMI = ColMI;
+ }
+ // If there is user live out of the tilecfg, spill it and reload in
+ // before the user.
+ Register TileReg = MI.getOperand(0).getReg();
+ if (mayLiveOut(TileReg, LastTileCfg))
+ spill(++MI.getIterator(), TileReg, false);
+ for (MachineInstr &UseMI : MRI->use_instructions(TileReg)) {
+ if (UseMI.getParent() == &MBB) {
+ // check user should not across ldtilecfg
+ if (!LastTileCfg || !dominates(MBB, LastTileCfg, UseMI))
+ continue;
+ // reload befor UseMI
+ reload(UseMI.getIterator(), TileReg, RowMO, ColMO);
+ } else {
+ // Don't reload for phi instruction, we handle phi reload separately.
+ // TODO: merge the reload for the same user MBB.
+ if (!UseMI.isPHI())
+ reload(UseMI.getIterator(), TileReg, RowMO, ColMO);
+ }
+ }
+ }
+
+ // Configure tile registers at the head of the MBB
+ if (HasUnconfigTile) {
+ MachineInstr *Before;
+ if (LastShapeMI == nullptr || LastShapeMI->isPHI())
+ Before = &*MBB.getFirstNonPHI();
+ else
+ Before = &*(++LastShapeMI->getIterator());
+
+ Config(*Before);
+ }
+
+ return Change;
+}
+
+bool X86FastPreTileConfig::runOnMachineFunction(MachineFunction &MFunc) {
+ MF = &MFunc;
+ MRI = &MFunc.getRegInfo();
+ ST = &MFunc.getSubtarget<X86Subtarget>();
+ TII = ST->getInstrInfo();
+ X86FI = MFunc.getInfo<X86MachineFunctionInfo>();
+ MFI = &MFunc.getFrameInfo();
+ TRI = ST->getRegisterInfo();
+ CfgSS = -1;
+
+ unsigned NumVirtRegs = MRI->getNumVirtRegs();
+ StackSlotForVirtReg.resize(NumVirtRegs);
+ MayLiveAcrossBlocks.clear();
+ // We will create register during config. *3 is to make sure
+ // the virtual register number doesn't exceed the size of
+ // the bit vector.
+ MayLiveAcrossBlocks.resize(NumVirtRegs * 3);
+ bool Change = false;
+ assert(MRI->isSSA());
+
+ // Canonicalize the phi node first.
+ for (MachineBasicBlock &MBB : MFunc)
+ canonicalizePHIs(MBB);
+
+ // Loop over all of the basic blocks in reverse post order and insert
+ // ldtilecfg for tile registers. The reserse post order is to facilitate
+ // PHI node convert.
+ ReversePostOrderTraversal<MachineFunction *> RPOT(MF);
+ for (MachineBasicBlock *MBB : RPOT) {
+ convertPHIs(*MBB);
+ Change |= configBasicBlock(*MBB);
+ }
+
+ if (Change)
+ InitializeTileConfigStackSpace();
+
+ StackSlotForVirtReg.clear();
+ return Change;
+}
+
+FunctionPass *llvm::createX86FastPreTileConfigPass() {
+ return new X86FastPreTileConfig();
+}
diff --git a/llvm/lib/Target/X86/X86FastTileConfig.cpp b/llvm/lib/Target/X86/X86FastTileConfig.cpp
index 5ff848243e433..2949bd048ee00 100644
--- a/llvm/lib/Target/X86/X86FastTileConfig.cpp
+++ b/llvm/lib/Target/X86/X86FastTileConfig.cpp
@@ -40,40 +40,25 @@ namespace {
class X86FastTileConfig : public MachineFunctionPass {
// context
MachineFunction *MF = nullptr;
- const X86Subtarget *ST = nullptr;
- const TargetRegisterInfo *TRI = nullptr;
const TargetInstrInfo *TII = nullptr;
MachineRegisterInfo *MRI = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
X86MachineFunctionInfo *X86FI = nullptr;
- MachineInstr *getTileConfigPoint();
- void tileConfig();
+ bool configBasicBlock(MachineBasicBlock &MBB);
public:
X86FastTileConfig() : MachineFunctionPass(ID) {}
- bool fastTileConfig();
- bool isTileLoad(MachineInstr &MI);
- bool isTileStore(MachineInstr &MI);
- bool isAMXInstr(MachineInstr &MI);
-
- MachineInstr *getKeyAMXInstr(MachineInstr *MI);
- void getTileShapesCfg(MachineInstr *MI,
- SmallVector<MachineOperand *> &ShapedTiles);
- void getShapeCfgInstrs(MachineInstr *MI,
- std::map<unsigned, MachineInstr *> &RowCfgs,
- std::map<unsigned, MachineInstr *> &ColCfgs);
-
/// Return the pass name.
StringRef getPassName() const override {
return "Fast Tile Register Configure";
}
- void materializeTileCfg(MachineInstr *MI);
-
- void rewriteTileCfg(SmallVector<MachineOperand *> &ShapedTiles,
- std::map<unsigned, MachineInstr *> &RowCfgs,
- std::map<unsigned, MachineInstr *> &ColCfgs);
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
/// Perform register allocation.
bool runOnMachineFunction(MachineFunction &MFunc) override;
@@ -95,210 +80,107 @@ INITIALIZE_PASS_BEGIN(X86FastTileConfig, DEBUG_TYPE,
INITIALIZE_PASS_END(X86FastTileConfig, DEBUG_TYPE,
"Fast Tile Register Configure", false, false)
-static bool isTilePhysReg(MachineOperand &Op) {
- if (!Op.isReg())
+static bool isTileDef(MachineRegisterInfo *MRI, MachineInstr &MI) {
+ // There is no phi instruction after register allocation.
+ assert(MI.isPHI() == false);
+ // The instruction must have 3 operands: tile def, row, col.
+ // It should be AMX pseudo instruction that have shape operand.
+ if (MI.isDebugInstr() || MI.isCopy() || MI.getNumOperands() < 3 ||
+ !MI.isPseudo())
return false;
+ MachineOperand &MO = MI.getOperand(0);
+
+ if (MO.isReg()) {
+ Register Reg = MO.getReg();
+ // FIXME it may be used after Greedy RA and the physical
+ // register is not rewritten yet.
+ if (Reg.isVirtual() &&
+ MRI->getRegClass(Reg)->getID() == X86::TILERegClassID)
+ return true;
+ if (Reg >= X86::TMM0 && Reg <= X86::TMM7)
+ return true;
+ }
- Register Reg = Op.getReg();
- if (Reg >= X86::TMM0 && Reg <= X86::TMM7)
- return true;
return false;
}
-static unsigned getTilePhysRegIdx(MachineOperand *Op) {
- assert(isTilePhysReg(*Op) && "Tile Operand is invalid");
- return Op->getReg() - X86::TMM0;
-}
-
-static inline void adjustRowCfg(unsigned TIdx, MachineInstr *MI) {
- unsigned Offset = 48 + TIdx;
- MI->getOperand(3).ChangeToImmediate(Offset);
-}
-
-static inline void adjustColCfg(unsigned TIdx, MachineInstr *MI) {
- unsigned Offset = 16 + TIdx * 2;
- MI->getOperand(3).ChangeToImmediate(Offset);
-}
-
-bool X86FastTileConfig::isTileLoad(MachineInstr &MI) {
- return MI.getOpcode() == X86::PTILELOADDV ||
- MI.getOpcode() == X86::PTILELOADDT1V;
-}
-bool X86FastTileConfig::isTileStore(MachineInstr &MI) {
- return MI.getOpcode() == X86::PTILESTOREDV;
-}
-bool X86FastTileConfig::isAMXInstr(MachineInstr &MI) {
- // TODO: May need to handle some special nontile amx instrucion.
- if (MI.getOpcode() == X86::PLDTILECFGV || MI.isDebugInstr())
- return false;
-
- return llvm::any_of(MI.operands(), isTilePhysReg);
-}
-
-MachineInstr *X86FastTileConfig::getKeyAMXInstr(MachineInstr *MI) {
- auto Cfg = MachineBasicBlock::iterator(MI);
- MachineBasicBlock *MBB = MI->getParent();
- MachineInstr *KeyMI = nullptr;
- int KeyAMXNum = 0;
-
- for (auto II = Cfg; II != MBB->end(); II++) {
- if (isTileLoad(*II)) {
- KeyMI = &*II;
+// PreTileConfig should configure the tile registers based on basic
+// block.
+bool X86FastTileConfig::configBasicBlock(MachineBasicBlock &MBB) {
+ bool Change = false;
+ SmallVector<std::pair<unsigned, ShapeT>, 6> ShapeInfos;
+ for (MachineInstr &MI : reverse(MBB)) {
+ if (!isTileDef(MRI, MI) && MI.getOpcode() != X86::LDTILECFG)
continue;
+ // AMX instructions that define tile register.
+ if (MI.getOpcode() != X86::LDTILECFG) {
+ MachineOperand &Row = MI.getOperand(1);
+ MachineOperand &Col = MI.getOperand(2);
+ unsigned TMMIdx = MI.getOperand(0).getReg() - X86::TMM0;
+ ShapeInfos.push_back({TMMIdx, ShapeT(&Row, &Col)});
+ } else { // LDTILECFG
+ // Rewrite the shape information to memory. Stack slot should have
+ // been initialized to zero in pre config.
+ int SS = MI.getOperand(0).getIndex(); // tile config stack slot.
+ for (auto &ShapeInfo : ShapeInfos) {
+ DebugLoc DL;
+ unsigned TMMIdx = ShapeInfo.first;
+ Register RowReg = ShapeInfo.second.getRow()->getReg();
+ Register ColReg = ShapeInfo.second.getCol()->getReg();
+ // Here is the data format for the tile config.
+ // 0 palette
+ // 1 start_row
+ // 2-15 reserved, must be zero
+ // 16-17 tile0.colsb Tile 0 bytes per row.
+ // 18-19 tile1.colsb Tile 1 bytes per row.
+ // 20-21 tile2.colsb Tile 2 bytes per row.
+ // ... (sequence continues)
+ // 30-31 tile7.colsb Tile 7 bytes per row.
+ // 32-47 reserved, must be zero
+ // 48 tile0.rows Tile 0 rows.
+ // 49 tile1.rows Tile 1 rows.
+ // 50 tile2.rows Tile 2 rows.
+ // ... (sequence continues)
+ // 55 tile7.rows Tile 7 rows.
+ // 56-63 reserved, must be zero
+ int RowOffset = 48 + TMMIdx;
+ int ColOffset = 16 + TMMIdx * 2;
+
+ Register SubRowReg = TRI->getSubReg(RowReg, X86::sub_8bit);
+ BuildMI(MBB, MI, DL, TII->get(X86::IMPLICIT_DEF), SubRowReg);
+ MachineInstrBuilder StoreRow =
+ BuildMI(MBB, MI, DL, TII->get(X86::MOV8mr));
+ addFrameReference(StoreRow, SS, RowOffset).addReg(SubRowReg);
+
+ MachineInstrBuilder StoreCol =
+ BuildMI(MBB, MI, DL, TII->get(X86::MOV16mr));
+ addFrameReference(StoreCol, SS, ColOffset).addReg(ColReg);
+ }
+ ShapeInfos.clear();
+ Change = true;
}
-
- if (isTileStore(*II)) {
- assert(KeyMI && "Key AMX Should be found before!");
- break;
- }
-
- if (isAMXInstr(*II)) {
- assert((KeyAMXNum == 0) && "Too many Key AMX instruction!");
- (void) KeyAMXNum;
- KeyAMXNum++;
- KeyMI = &*II;
- }
- }
- assert(KeyMI && "There must be an AMX instruction.");
- return KeyMI;
-}
-
-// Orderly get the tiles in key amx instruction, uses before defs.
-void X86FastTileConfig::getTileShapesCfg(
- MachineInstr *CfgMI, SmallVector<MachineOperand *> &ShapedTiles) {
- MachineInstr *KeyMI = getKeyAMXInstr(CfgMI);
-
- SmallVector<MachineOperand *> DefTiles;
- for (MachineOperand &MO : KeyMI->operands()) {
- if (!isTilePhysReg(MO))
- continue;
- if (MO.isDef())
- DefTiles.push_back(&MO);
- else
- ShapedTiles.push_back(&MO);
- }
- ShapedTiles.append(DefTiles);
-}
-
-// We pre-config the shapes at position named with "amx.tmm.N.shape.row* and
-// amx.shape.N.col*" at pass "Pre AMX Tile Config".
-// The 'N' implies the order of tiles in key amx intrinsic.
-void X86FastTileConfig::getShapeCfgInstrs(
- MachineInstr *MI, std::map<unsigned, MachineInstr *> &RowCfgs,
- std::map<unsigned, MachineInstr *> &ColCfgs) {
- auto Cfg = MachineBasicBlock::iterator(MI);
- MachineBasicBlock *MBB = MI->getParent();
-
- for (auto II = Cfg; II != MBB->begin(); II--) {
- if (isAMXInstr(*II) || II->isTerminator() || II->isCall())
- break;
- if (!II->mayStore() || !II->hasOneMemOperand())
- continue;
- const Value *MemPtr = II->memoperands()[0]->getValue();
- if (!MemPtr)
- continue;
-
- StringRef Name = MemPtr->getName();
- if (!Name.startswith("amx.tmm."))
- continue;
-
- // Get the 'N'th tile shape config in key amx instruction.
- auto N = Name.find(".shape");
- StringRef STileIdx = Name.slice(8, N);
- unsigned Idx;
- STileIdx.getAsInteger(10, Idx);
-
- // And related them with their store instructions.
- if (Name.contains("row"))
- RowCfgs[Idx] = &*II;
- else if (Name.contains("col"))
- ColCfgs[Idx] = &*II;
- else
- llvm_unreachable("Invalid tile shape info!");
}
- assert((RowCfgs.size() == ColCfgs.size()) &&
- "The number of tile row and col must be equal!");
-}
-
-// Here is the data format for the tile config.
-// 0 palette = 1 now.
-// 1 start_row = 0 now.
-// 2-15 reserved, must be zero
-// 16-17 tile0.colsb Tile 0 bytes per row.
-// 18-19 tile1.colsb Tile 1 bytes per row.
-// 20-21 tile2.colsb Tile 2 bytes per row.
-// ... (sequence continues)
-// 30-31 tile7.colsb Tile 7 bytes per row.
-// 32-47 reserved, must be zero
-// 48 tile0.rows Tile 0 rows.
-// 49 tile1.rows Tile 1 rows.
-// 50 tile2.rows Tile 2 rows.
-// ... (sequence continues)
-// 55 tile7.rows Tile 7 rows.
-// 56-63 reserved, must be zero
-void X86FastTileConfig::rewriteTileCfg(
- SmallVector<MachineOperand *> &ShapedTiles,
- std::map<unsigned, MachineInstr *> &RowCfgs,
- std::map<unsigned, MachineInstr *> &ColCfgs) {
- assert((RowCfgs.size() == ShapedTiles.size()) &&
- "The number of tile shapes not equal with the number of tiles!");
- // Orderly get the tiles and adjust the shape config.
- for (unsigned I = 0, E = ShapedTiles.size(); I < E; I++) {
- MachineOperand *MO = ShapedTiles[I];
- unsigned TmmIdx = getTilePhysRegIdx(MO);
- if (I == TmmIdx)
- continue;
- adjustRowCfg(TmmIdx, RowCfgs[I]);
- adjustColCfg(TmmIdx, ColCfgs[I]);
- }
-}
-
-// We have already preconfig the shapes before fast register allocation at
-// X86PreAMXConfig::preWriteTileCfg(). Now, we have done fast register
-// allocation, the shapes pre-written before may not rightly corresponding
-// to the correct tmm registers, so we need adjust them.
-void X86FastTileConfig::materializeTileCfg(MachineInstr *CfgMI) {
- SmallVector<MachineOperand *> ShapedTiles;
- std::map<unsigned, MachineInstr *> RowCfgs;
- std::map<unsigned, MachineInstr *> ColCfgs;
-
- // Orderly keep the tile uses and def in ShapedTiles;
- getTileShapesCfg(CfgMI, ShapedTiles);
- assert(ShapedTiles.size() && "Not find shapes config!");
-
- getShapeCfgInstrs(CfgMI, RowCfgs, ColCfgs);
-
- rewriteTileCfg(ShapedTiles, RowCfgs, ColCfgs);
-}
-
-bool X86FastTileConfig::fastTileConfig() {
- bool Changed = false;
-
- for (MachineBasicBlock &MBB : *MF) {
- SmallVector<MachineInstr *, 2> CFGs;
- for (MachineInstr &MI : MBB)
- if (MI.getOpcode() == X86::PLDTILECFGV)
- CFGs.push_back(&MI);
- for (auto *MI : CFGs)
- materializeTileCfg(MI);
- if (!CFGs.empty())
- Changed = true;
- }
- if (Changed)
+ if (Change)
X86FI->setHasVirtualTileReg(true);
- return Changed;
+
+ return Change;
}
bool X86FastTileConfig::runOnMachineFunction(MachineFunction &MFunc) {
MF = &MFunc;
MRI = &MFunc.getRegInfo();
- ST = &MFunc.getSubtarget<X86Subtarget>();
+ const TargetSubtargetInfo *ST = &MFunc.getSubtarget<X86Subtarget>();
TRI = ST->getRegisterInfo();
TII = MFunc.getSubtarget().getInstrInfo();
X86FI = MFunc.getInfo<X86MachineFunctionInfo>();
+ bool Change = false;
+
+ // Loop over all of the basic blocks, eliminating virtual register references
+ for (MachineBasicBlock &MBB : MFunc)
+ Change |= configBasicBlock(MBB);
- return fastTileConfig();
+ return Change;
}
FunctionPass *llvm::createX86FastTileConfigPass() {
diff --git a/llvm/lib/Target/X86/X86InstrAMX.td b/llvm/lib/Target/X86/X86InstrAMX.td
index 368b05ee8db4b..df8c096204e05 100644
--- a/llvm/lib/Target/X86/X86InstrAMX.td
+++ b/llvm/lib/Target/X86/X86InstrAMX.td
@@ -48,22 +48,23 @@ let Predicates = [HasAMXTILE, In64BitMode] in {
VEX, T8XD;
// Pseduo instruction for RA.
- let mayLoad = 1 in
+ let isPseudo = true, mayLoad = 1 in
def PLDTILECFGV : PseudoI<(outs), (ins opaquemem:$src),
[(int_x86_ldtilecfg_internal addr:$src)]>;
- let mayLoad = 1 in
+ let isPseudo = true, mayLoad = 1 in
def PTILELOADDV : PseudoI<(outs TILE:$dst), (ins GR16:$src1,
GR16:$src2,
opaquemem:$src3), []>;
- let mayLoad = 1 in
+ let isPseudo = true, mayLoad = 1 in
def PTILELOADDT1V : PseudoI<(outs TILE:$dst), (ins GR16:$src1,
GR16:$src2,
opaquemem:$src3), []>;
- let mayStore = 1 in
+ let isPseudo = true, mayStore = 1 in
def PTILESTOREDV : PseudoI<(outs), (ins GR16:$src1,
GR16:$src2, opaquemem:$src3,
TILE:$src4), []>;
- let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in
+ let isPseudo = true, isReMaterializable = 1, isAsCheapAsAMove = 1,
+ canFoldAsLoad = 1 in
def PTILEZEROV : PseudoI<(outs TILE:$dst), (ins GR16:$src1, GR16:$src2),
[(set TILE:$dst, (int_x86_tilezero_internal
GR16:$src1, GR16:$src2))]>;
@@ -106,7 +107,7 @@ let Predicates = [HasAMXINT8, In64BitMode] in {
}
// Pseduo instruction for RA.
- let Constraints = "$src4 = $dst" in {
+ let isPseudo = true, Constraints = "$src4 = $dst" in {
def PTDPBSSDV : PseudoI<(outs TILE:$dst), (ins GR16:$src1,
GR16:$src2, GR16:$src3, TILE:$src4,
TILE:$src5, TILE:$src6),
@@ -165,7 +166,7 @@ let Predicates = [HasAMXBF16, In64BitMode] in {
[]>, VEX_4V, T8XS;
// Pseduo instruction for RA.
- let Constraints = "$src4 = $dst" in
+ let isPseudo = true, Constraints = "$src4 = $dst" in
def PTDPBF16PSV : PseudoI<(outs TILE: $dst), (ins GR16:$src1,
GR16:$src2, GR16:$src3, TILE:$src4,
TILE:$src5, TILE:$src6),
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 04778d31169d0..7834162e1d9c5 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -78,6 +78,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86Target() {
initializeX86CallFrameOptimizationPass(PR);
initializeX86CmovConverterPassPass(PR);
initializeX86TileConfigPass(PR);
+ initializeX86FastPreTileConfigPass(PR);
initializeX86FastTileConfigPass(PR);
initializeX86LowerTileCopyPass(PR);
initializeX86ExpandPseudoPass(PR);
@@ -420,9 +421,6 @@ void X86PassConfig::addIRPasses() {
addPass(createX86LowerAMXIntrinsicsPass());
addPass(createX86LowerAMXTypePass());
- if (TM->getOptLevel() == CodeGenOpt::None)
- addPass(createX86PreAMXConfigPass());
-
TargetPassConfig::addIRPasses();
if (TM->getOptLevel() != CodeGenOpt::None) {
@@ -511,9 +509,10 @@ void X86PassConfig::addPreRegAlloc() {
addPass(createX86FlagsCopyLoweringPass());
addPass(createX86DynAllocaExpander());
- if (getOptLevel() != CodeGenOpt::None) {
+ if (getOptLevel() != CodeGenOpt::None)
addPass(createX86PreTileConfigPass());
- }
+ else
+ addPass(createX86FastPreTileConfigPass());
}
void X86PassConfig::addMachineSSAOptimization() {
diff --git a/llvm/test/CodeGen/X86/AMX/amx-across-func.ll b/llvm/test/CodeGen/X86/AMX/amx-across-func.ll
index 7c4024cb1a379..df4102e3b61d8 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-across-func.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-across-func.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -verify-machineinstrs | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -verify-machineinstrs -enable-ipra | FileCheck -check-prefix=IPRA %s
+; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -verify-machineinstrs | FileCheck -check-prefix=O0 %s
@buf = dso_local global [3072 x i8] zeroinitializer, align 64
@@ -12,6 +13,10 @@ define internal void @foo() {
; IPRA-LABEL: foo:
; IPRA: # %bb.0: # %entry
; IPRA-NEXT: retq
+;
+; O0-LABEL: foo:
+; O0: # %bb.0: # %entry
+; O0-NEXT: retq
entry:
ret void
}
@@ -93,6 +98,112 @@ define dso_local void @test_api(i16 signext %0, i16 signext %1) nounwind {
; IPRA-NEXT: tilerelease
; IPRA-NEXT: vzeroupper
; IPRA-NEXT: retq
+;
+; O0-LABEL: test_api:
+; O0: # %bb.0:
+; O0-NEXT: pushq %rbp
+; O0-NEXT: movq %rsp, %rbp
+; O0-NEXT: andq $-1024, %rsp # imm = 0xFC00
+; O0-NEXT: subq $8192, %rsp # imm = 0x2000
+; O0-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; O0-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
+; O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %si, %cx
+; O0-NEXT: movw %cx, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; O0-NEXT: movw %di, %ax
+; O0-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: movl $buf, %r8d
+; O0-NEXT: movl $32, %r9d
+; O0-NEXT: movw $8, %si
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %si, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%r8,%r9), %tmm0
+; O0-NEXT: movl $64, %r8d
+; O0-NEXT: movw $8, %si
+; O0-NEXT: tilestored %tmm0, (%rdi,%r8)
+; O0-NEXT: movl $32, %edi
+; O0-NEXT: movl $buf+1024, %esi
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rsi,%rdi), %tmm0
+; O0-NEXT: movl $64, %esi
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rdx,%rsi)
+; O0-NEXT: vzeroupper
+; O0-NEXT: callq foo
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; O0-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
+; O0-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: movl $32, %r10d
+; O0-NEXT: movl $buf+2048, %edi
+; O0-NEXT: tileloadd (%rdi,%r10), %tmm0
+; O0-NEXT: movl $64, %edi
+; O0-NEXT: tilestored %tmm0, (%rsi,%rdi)
+; O0-NEXT: movl $64, %r10d
+; O0-NEXT: movw $8, %di
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%r8,%r10), %tmm0
+; O0-NEXT: movabsq $64, %r8
+; O0-NEXT: tilestored %tmm0, 1024(%rsp,%r8) # 1024-byte Folded Spill
+; O0-NEXT: movl $64, %r10d
+; O0-NEXT: movw $8, %r8w
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; O0-NEXT: # implicit-def: $r8b
+; O0-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%r9,%r10), %tmm2
+; O0-NEXT: movl $64, %r8d
+; O0-NEXT: tileloadd (%rsi,%r8), %tmm0
+; O0-NEXT: movw $8, %si
+; O0-NEXT: movabsq $64, %r8
+; O0-NEXT: tileloadd 1024(%rsp,%r8), %tmm1 # 1024-byte Folded Reload
+; O0-NEXT: tdpbssd %tmm2, %tmm1, %tmm0
+; O0-NEXT: movl $64, %esi
+; O0-NEXT: tilestored %tmm0, (%rdx,%rsi)
+; O0-NEXT: movl $64, %esi
+; O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
+; O0-NEXT: movl $32, %esi
+; O0-NEXT: movl $buf+2048, %edx
+; O0-NEXT: tilestored %tmm0, (%rdx,%rsi)
+; O0-NEXT: movq %rbp, %rsp
+; O0-NEXT: popq %rbp
+; O0-NEXT: tilerelease
+; O0-NEXT: retq
%3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
%4 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 1024), i64 32)
call void @foo()
@@ -226,6 +337,116 @@ define dso_local i32 @test_loop(i32 %0) nounwind {
; IPRA-NEXT: tilerelease
; IPRA-NEXT: vzeroupper
; IPRA-NEXT: retq
+;
+; O0-LABEL: test_loop:
+; O0: # %bb.0:
+; O0-NEXT: pushq %rbp
+; O0-NEXT: movq %rsp, %rbp
+; O0-NEXT: andq $-1024, %rsp # imm = 0xFC00
+; O0-NEXT: subq $4096, %rsp # imm = 0x1000
+; O0-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; O0-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
+; O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
+; O0-NEXT: movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: vzeroupper
+; O0-NEXT: callq foo
+; O0-NEXT: # %bb.1:
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload
+; O0-NEXT: xorl %eax, %eax
+; O0-NEXT: cmpl $0, %ecx
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: jg .LBB2_4
+; O0-NEXT: jmp .LBB2_3
+; O0-NEXT: .LBB2_2:
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: cmpl $3, %eax
+; O0-NEXT: je .LBB2_5
+; O0-NEXT: jmp .LBB2_4
+; O0-NEXT: .LBB2_3: # =>This Inner Loop Header: Depth=1
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: movl $buf, %edx
+; O0-NEXT: movl $32, %esi
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
+; O0-NEXT: movl $64, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rcx,%rdx)
+; O0-NEXT: callq foo
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; O0-NEXT: movl $64, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rcx,%rdx), %tmm0
+; O0-NEXT: movl $32, %edx
+; O0-NEXT: movl $buf+2048, %ecx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rcx,%rdx)
+; O0-NEXT: callq foo
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: addl $1, %eax
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: cmpl $0, %eax
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: je .LBB2_2
+; O0-NEXT: jmp .LBB2_3
+; O0-NEXT: .LBB2_4:
+; O0-NEXT: callq foo
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; O0-NEXT: movl $32, %esi
+; O0-NEXT: movl $buf+1024, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
+; O0-NEXT: movl $64, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rcx,%rdx)
+; O0-NEXT: movl $64, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rcx,%rdx), %tmm0
+; O0-NEXT: movl $32, %edx
+; O0-NEXT: movl $buf+1024, %ecx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rcx,%rdx)
+; O0-NEXT: jmp .LBB2_7
+; O0-NEXT: .LBB2_5:
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: cmpl $7, %eax
+; O0-NEXT: jne .LBB2_7
+; O0-NEXT: # %bb.6:
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: addl $1, %eax
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: jmp .LBB2_8
+; O0-NEXT: .LBB2_7:
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: subl $1, %eax
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: .LBB2_8:
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: movq %rbp, %rsp
+; O0-NEXT: popq %rbp
+; O0-NEXT: tilerelease
+; O0-NEXT: retq
call void @foo()
br label %2
2:
@@ -338,6 +559,65 @@ define dso_local void @test_loop2(i32 %0) nounwind {
; IPRA-NEXT: tilerelease
; IPRA-NEXT: vzeroupper
; IPRA-NEXT: retq
+;
+; O0-LABEL: test_loop2:
+; O0: # %bb.0:
+; O0-NEXT: pushq %rbp
+; O0-NEXT: movq %rsp, %rbp
+; O0-NEXT: andq $-1024, %rsp # imm = 0xFC00
+; O0-NEXT: subq $3072, %rsp # imm = 0xC00
+; O0-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; O0-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
+; O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
+; O0-NEXT: movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; O0-NEXT: xorl %eax, %eax
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: vzeroupper
+; O0-NEXT: callq foo
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: cmpl $0, %eax
+; O0-NEXT: jle .LBB3_3
+; O0-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; O0-NEXT: movl $buf, %edx
+; O0-NEXT: movl $32, %esi
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
+; O0-NEXT: movl $64, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rcx,%rdx)
+; O0-NEXT: callq foo
+; O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; O0-NEXT: movl $64, %edx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: # implicit-def: $al
+; O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; O0-NEXT: tileloadd (%rcx,%rdx), %tmm0
+; O0-NEXT: movl $32, %edx
+; O0-NEXT: movl $buf+2048, %ecx
+; O0-NEXT: movw $8, %ax
+; O0-NEXT: tilestored %tmm0, (%rcx,%rdx)
+; O0-NEXT: callq foo
+; O0-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
+; O0-NEXT: addl $1, %eax
+; O0-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; O0-NEXT: jmp .LBB3_1
+; O0-NEXT: .LBB3_3:
+; O0-NEXT: movq %rbp, %rsp
+; O0-NEXT: popq %rbp
+; O0-NEXT: tilerelease
+; O0-NEXT: retq
br label %2
2:
%3 = phi i32 [ 0, %1 ], [ %7, %5 ]
diff --git a/llvm/test/CodeGen/X86/AMX/amx-configO0toO0.ll b/llvm/test/CodeGen/X86/AMX/amx-configO0toO0.ll
index 0bc849db31a8b..8767c86270fc1 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-configO0toO0.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-configO0toO0.ll
@@ -12,6 +12,9 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: movq %rsp, %rbp
; AVX512-NEXT: andq $-1024, %rsp # imm = 0xFC00
; AVX512-NEXT: subq $25600, %rsp # imm = 0x6400
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %dx, %ax
; AVX512-NEXT: movw %si, %cx
; AVX512-NEXT: movl %edi, {{[0-9]+}}(%rsp)
@@ -20,6 +23,7 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
; AVX512-NEXT: xorl %esi, %esi
; AVX512-NEXT: movl $1088, %edx # imm = 0x440
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq memset at PLT
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp)
@@ -44,10 +48,12 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: # %bb.1: # %if.then
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq $buf, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movabsq $buf, %rax
+; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $32, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw (%rax), %si
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw 2(%rax), %dx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -57,15 +63,12 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %dil
-; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: addq $64, %rdx
@@ -73,37 +76,40 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq $buf, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movabsq $buf, %rax
+; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $32, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: movw (%rax), %di
+; AVX512-NEXT: movw (%rax), %si
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw 2(%rax), %dx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %si, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %dx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %r8b
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
-; AVX512-NEXT: tileloadd (%rdx,%rdi), %tmm0
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: addq $64, %rdx
+; AVX512-NEXT: movl $64, %esi
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq $buf, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movabsq $buf, %rax
+; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $32, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw (%rax), %si
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw 2(%rax), %dx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -113,15 +119,12 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %r8b
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg (%rdi)
; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: addq $64, %rdx
@@ -131,10 +134,12 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: .LBB0_2: # %if.else
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq $buf2, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movabsq $buf2, %rax
+; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $32, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw (%rax), %si
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw 2(%rax), %dx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -144,15 +149,12 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %dil
-; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: addq $64, %rdx
@@ -160,37 +162,40 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq $buf2, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movabsq $buf2, %rax
+; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $32, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: movw (%rax), %di
+; AVX512-NEXT: movw (%rax), %si
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw 2(%rax), %dx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %si, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %dx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %r8b
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
-; AVX512-NEXT: tileloadd (%rdx,%rdi), %tmm0
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: addq $64, %rdx
+; AVX512-NEXT: movl $64, %esi
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq $buf2, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movabsq $buf2, %rax
+; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $32, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw (%rax), %si
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movw 2(%rax), %dx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -200,15 +205,12 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %r8b
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg (%rdi)
; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: addq $64, %rdx
@@ -219,7 +221,6 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
; AVX512-NEXT: movl $1088, %edx # imm = 0x440
; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq memcpy at PLT
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
@@ -516,23 +517,21 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
-; AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
-; AVX512-NEXT: movw %r10w, %di
-; AVX512-NEXT: shrl $2, %r10d
-; AVX512-NEXT: movw %r10w, %r9w
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovaps %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %r8b
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %r8d
+; AVX512-NEXT: movw %r8w, %di
+; AVX512-NEXT: shrl $2, %r8d
+; AVX512-NEXT: movw %r8w, %r9w
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %di, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: # kill: def $r10b killed $r10b killed $r10d
-; AVX512-NEXT: movb %r10b, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $r9b
+; AVX512-NEXT: movb %r9b, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-NEXT: movl $64, %r8d
@@ -599,9 +598,9 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %si # 2-byte Reload
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %dx # 2-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT: # kill: def $rdi killed $rax
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm0
; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm1
; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm2
; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm3
@@ -617,12 +616,10 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm13
; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm14
; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm15
-; AVX512-NEXT: vmovdqa64 {{[0-9]+}}(%rsp), %zmm16
; AVX512-NEXT: movw %si, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %dx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovdqa64 %zmm16, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovdqa64 %zmm15, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovdqa64 %zmm14, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovdqa64 %zmm13, {{[0-9]+}}(%rsp)
@@ -638,18 +635,16 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) #
; AVX512-NEXT: vmovdqa64 %zmm3, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovdqa64 %zmm2, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovdqa64 %zmm1, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %ax
; AVX512-NEXT: movw {{[0-9]+}}(%rsp), %cx
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
-; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %r8
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %r9b
-; AVX512-NEXT: movb %r9b, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg (%r8)
; AVX512-NEXT: movl $64, %r8d
; AVX512-NEXT: tileloadd (%rdi,%r8), %tmm0
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
diff --git a/llvm/test/CodeGen/X86/AMX/amx-configO2toO0.ll b/llvm/test/CodeGen/X86/AMX/amx-configO2toO0.ll
index 38c01f2f46cce..4aedf0a9788e5 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-configO2toO0.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-configO2toO0.ll
@@ -10,7 +10,10 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) n
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: movq %rsp, %rbp
; AVX512-NEXT: andq $-1024, %rsp # imm = 0xFC00
-; AVX512-NEXT: subq $6144, %rsp # imm = 0x1800
+; AVX512-NEXT: subq $8192, %rsp # imm = 0x2000
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %dx, %ax
; AVX512-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
; AVX512-NEXT: movw %si, %ax
@@ -30,34 +33,32 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) n
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %sil
-; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw $8, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; AVX512-NEXT: movl $buf, %r9d
; AVX512-NEXT: movl $32, %r10d
; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %si, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
-; AVX512-NEXT: movl $64, %r8d
-; AVX512-NEXT: tilestored %tmm0, (%r11,%r8)
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $8, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movl $64, %r9d
+; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: tilestored %tmm0, (%r8,%r9)
+; AVX512-NEXT: movl $buf, %r8d
+; AVX512-NEXT: movl $32, %r9d
+; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $sil
+; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
-; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
+; AVX512-NEXT: tileloadd (%r8,%r9), %tmm0
+; AVX512-NEXT: movl $64, %r8d
+; AVX512-NEXT: movw $8, %si
; AVX512-NEXT: tilestored %tmm0, (%rdi,%r8)
-; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %dil
-; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg (%rsi)
; AVX512-NEXT: movl $buf, %esi
; AVX512-NEXT: movl $32, %edi
; AVX512-NEXT: tileloadd (%rsi,%rdi), %tmm0
@@ -69,34 +70,32 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) n
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %sil
-; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw $8, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; AVX512-NEXT: movl $buf2, %r9d
; AVX512-NEXT: movl $32, %r10d
; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %si, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
-; AVX512-NEXT: movl $64, %r8d
-; AVX512-NEXT: tilestored %tmm0, (%r11,%r8)
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $8, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movl $64, %r9d
+; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: tilestored %tmm0, (%r8,%r9)
+; AVX512-NEXT: movl $buf2, %r8d
+; AVX512-NEXT: movl $32, %r9d
+; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $sil
+; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
-; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
+; AVX512-NEXT: tileloadd (%r8,%r9), %tmm0
+; AVX512-NEXT: movl $64, %r8d
+; AVX512-NEXT: movw $8, %si
; AVX512-NEXT: tilestored %tmm0, (%rdi,%r8)
-; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %dil
-; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg (%rsi)
; AVX512-NEXT: movl $buf2, %esi
; AVX512-NEXT: movl $32, %edi
; AVX512-NEXT: tileloadd (%rsi,%rdi), %tmm0
@@ -106,36 +105,45 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) n
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %sil
-; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: movl $64, %r10d
+; AVX512-NEXT: movw $8, %di
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX512-NEXT: tileloadd (%r8,%r10), %tmm0
+; AVX512-NEXT: movabsq $64, %r8
+; AVX512-NEXT: tilestored %tmm0, 1024(%rsp,%r8) # 1024-byte Folded Spill
+; AVX512-NEXT: movl $64, %r10d
+; AVX512-NEXT: movw $8, %r8w
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw $8, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $8, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %di, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $al
+; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: # implicit-def: $r8b
+; AVX512-NEXT: movb %r8b, {{[0-9]+}}(%rsp)
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movl $64, %esi
-; AVX512-NEXT: movw $8, %di
-; AVX512-NEXT: tileloadd (%r10,%rsi), %tmm1
-; AVX512-NEXT: tileloadd (%r9,%rsi), %tmm2
-; AVX512-NEXT: tileloadd (%r8,%rsi), %tmm0
+; AVX512-NEXT: tileloadd (%r9,%r10), %tmm2
+; AVX512-NEXT: movl $64, %r8d
+; AVX512-NEXT: tileloadd (%rsi,%r8), %tmm0
+; AVX512-NEXT: movw $8, %si
+; AVX512-NEXT: movabsq $64, %r8
+; AVX512-NEXT: tileloadd 1024(%rsp,%r8), %tmm1 # 1024-byte Folded Reload
; AVX512-NEXT: tdpbssd %tmm2, %tmm1, %tmm0
+; AVX512-NEXT: movl $64, %esi
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
-; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movb %al, %dil
-; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: ldtilecfg (%rsi)
; AVX512-NEXT: movl $64, %esi
; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-NEXT: movl $buf, %edx
diff --git a/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi.mir b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi.mir
new file mode 100644
index 0000000000000..2a300199f61b4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi.mir
@@ -0,0 +1,177 @@
+# RUN: llc -mtriple=x86_64-- -run-pass=fastpretileconfig -o - %s | FileCheck %s
+#
+# This case test tile phi is nested accessed, but the its def block is
+# not visited yet.
+#
+# BB.5
+# %6 = phi(%3, b%10) <-----
+# | | |
+# | | |
+# BB.6 BB.7 |
+# \ / |
+# \ / |
+# \ / |
+# BB.8 -------------
+# %10 = phi(%8, %9)
+#
+# #define STRIDE 32
+# void foo(int cond, char *buf) {
+# __tile1024i a = {16, 64};
+# __tile1024i b = {16, 64};
+# __tile1024i c = {16, 64};
+#
+# if (cond) {
+# __tile_zero(&c);
+# } else {
+# __tile_loadd(&c, buf, STRIDE);
+# }
+# __tile_zero(&a);
+# __tile_zero(&b);
+# for(int i = 0; i < 10; i++) {
+# __tile_dpbssd(&c, a, b);
+# if (cond) {
+# __tile_zero(&c);
+# } else {
+# __tile_loadd(&c, buf, STRIDE);
+# }
+# }
+# __tile_stored(buf, STRIDE, c);
+# }
+---
+name: foo
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr8 }
+ - { id: 1, class: tile }
+ - { id: 2, class: tile }
+ - { id: 3, class: tile }
+ - { id: 4, class: tile }
+ - { id: 5, class: tile }
+ - { id: 6, class: gr32 }
+ - { id: 7, class: tile }
+ - { id: 8, class: tile }
+ - { id: 9, class: tile }
+ - { id: 10, class: tile }
+ - { id: 11, class: gr32 }
+ - { id: 12, class: gr32 }
+ - { id: 13, class: gr32 }
+ - { id: 14, class: gr64 }
+ - { id: 15, class: gr64 }
+ - { id: 16, class: gr8 }
+ - { id: 17, class: gr16 }
+ - { id: 18, class: gr16 }
+ - { id: 19, class: gr64_nosp }
+ - { id: 20, class: gr16 }
+ - { id: 21, class: gr16 }
+ - { id: 22, class: gr32 }
+ - { id: 23, class: gr16 }
+ - { id: 24, class: gr16 }
+ - { id: 25, class: gr16 }
+ - { id: 26, class: gr16 }
+ - { id: 27, class: gr16 }
+ - { id: 28, class: gr16 }
+ - { id: 29, class: tile }
+ - { id: 30, class: gr16 }
+ - { id: 31, class: gr16 }
+ - { id: 32, class: gr64_nosp }
+ - { id: 33, class: gr16 }
+ - { id: 34, class: gr16 }
+ - { id: 35, class: gr32 }
+ - { id: 36, class: gr64_nosp }
+ - { id: 37, class: gr16 }
+ - { id: 38, class: gr16 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%12' }
+ - { reg: '$rsi', virtual-reg: '%14' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $edi, $rsi
+
+ %14:gr64 = COPY $rsi
+ %12:gr32 = COPY $edi
+ %13:gr32 = COPY killed %12
+ %15:gr64 = COPY killed %14
+ CMP32ri8 %13, 0, implicit-def $eflags
+ %16:gr8 = SETCCr 4, implicit $eflags
+ TEST8ri %16, 1, implicit-def $eflags
+ JCC_1 %bb.2, 5, implicit $eflags
+
+ bb.1:
+ %17:gr16 = MOV16ri 64
+ %18:gr16 = MOV16ri 16
+ %1:tile = PTILEZEROV killed %18, killed %17
+ JMP_1 %bb.3
+
+ bb.2:
+ %19:gr64_nosp = MOV32ri64 32
+ %20:gr16 = MOV16ri 64
+ %21:gr16 = MOV16ri 16
+ %2:tile = PTILELOADDV killed %21, killed %20, %15, 1, killed %19, 0, $noreg
+
+ bb.3:
+ ; CHECK: %43:gr16 = PHI %17, %bb.1, %20, %bb.2
+ ; CHECK-NEXT: %42:gr16 = PHI %18, %bb.1, %21, %bb.2
+ ; CHECK-NEXT: %41:gr64_nosp = PHI %44, %bb.1, %45, %bb.2
+ ; CHECK-NEXT: LDTILECFG
+
+ %3:tile = PHI %1, %bb.1, %2, %bb.2
+ %25:gr16 = MOV16ri 64
+ %26:gr16 = MOV16ri 16
+ %4:tile = PTILEZEROV killed %26, killed %25
+ %23:gr16 = MOV16ri 64
+ %24:gr16 = MOV16ri 16
+ %5:tile = PTILEZEROV killed %24, killed %23
+ %22:gr32 = MOV32r0 implicit-def $eflags
+ JMP_1 %bb.5
+
+ bb.4:
+ %36:gr64_nosp = MOV32ri64 32
+ %37:gr16 = MOV16ri 64
+ %38:gr16 = MOV16ri 16
+ PTILESTOREDV killed %38, killed %37, %15, 1, killed %36, 0, $noreg, %10
+ RET64
+
+ bb.5:
+ ; CHECK: %6:gr32 = PHI %22, %bb.3, %35, %bb.8
+ ; CHECK-NEXT: %56:gr16 = PHI %43, %bb.3, %60, %bb.8
+ ; CHECK-NEXT: %55:gr16 = PHI %42, %bb.3, %59, %bb.8
+ ; CHECK-NEXT: %54:gr64_nosp = PHI %57, %bb.3, %58, %bb.8
+ ; CHECK-NEXT: LDTILECFG
+
+ %6:gr32 = PHI %22, %bb.3, %35, %bb.8
+ %7:tile = PHI %3, %bb.3, %10, %bb.8
+ %27:gr16 = MOV16ri 64
+ %28:gr16 = MOV16ri 16
+ %29:tile = PTDPBSSDV killed %28, %27, %27, %7, %4, %5
+ TEST8ri %16, 1, implicit-def $eflags
+ JCC_1 %bb.7, 5, implicit $eflags
+
+ bb.6:
+ %30:gr16 = MOV16ri 64
+ %31:gr16 = MOV16ri 16
+ %8:tile = PTILEZEROV killed %31, killed %30
+ JMP_1 %bb.8
+
+ bb.7:
+ %32:gr64_nosp = MOV32ri64 32
+ %33:gr16 = MOV16ri 64
+ %34:gr16 = MOV16ri 16
+ %9:tile = PTILELOADDV killed %34, killed %33, %15, 1, killed %32, 0, $noreg
+
+ bb.8:
+ ; CHECK: %60:gr16 = PHI %30, %bb.6, %33, %bb.7
+ ; CHECK-NEXT: %59:gr16 = PHI %31, %bb.6, %34, %bb.7
+ ; CHECK-NEXT: %58:gr64_nosp = PHI %61, %bb.6, %62, %bb.7
+ ; CHECK-NEXT: LDTILECFG
+
+ %10:tile = PHI %8, %bb.6, %9, %bb.7
+ %35:gr32 = ADD32ri8 %6, 1, implicit-def $eflags
+ CMP32ri8 %35, 10, implicit-def $eflags
+ JCC_1 %bb.4, 4, implicit $eflags
+ JMP_1 %bb.5
+
+...
diff --git a/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi2.mir b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi2.mir
new file mode 100644
index 0000000000000..4cea456e7e917
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi2.mir
@@ -0,0 +1,130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=x86_64-- -run-pass=fastpretileconfig -o - %s | FileCheck %s
+#
+# bb.0
+# def %0
+# / \
+# bb.1 bb.2 <---------
+# def %1 %2=phi(%0, %3) |
+# \ / |
+# bb.3 -------------------
+# %3=phi(%1, %2)
+#
+# This case test tile PHIs depend each other, and the its def block is
+# not visited yet.
+---
+name: foo
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 1, class: tile }
+ - { id: 2, class: tile }
+ - { id: 3, class: tile }
+ - { id: 4, class: tile }
+ - { id: 12, class: gr32 }
+ - { id: 13, class: gr32 }
+ - { id: 16, class: gr8 }
+ - { id: 17, class: gr16 }
+ - { id: 18, class: gr16 }
+ - { id: 19, class: gr64_nosp }
+ - { id: 22, class: gr32 }
+ - { id: 23, class: gr16 }
+ - { id: 24, class: gr16 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%12' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: foo
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $edi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_SET0_:%[0-9]+]]:vr128 = V_SET0
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 0, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1, align 4)
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 16, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 16, align 4)
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 32, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 32, align 4)
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 48, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 48, align 4)
+ ; CHECK-NEXT: MOV8mi %stack.1, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.1, align 4)
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY killed [[COPY]]
+ ; CHECK-NEXT: %r0:gr16 = MOV16ri 64
+ ; CHECK-NEXT: %c0:gr16 = MOV16ri 16
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_nosp = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: %t0:tile = PTILEZEROV %r0, %c0
+ ; CHECK-NEXT: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.0, 1, killed [[MOV64ri]], 0, $noreg, %t0 :: (store (s8192) into %stack.0)
+ ; CHECK-NEXT: CMP32ri8 [[COPY1]], 0, implicit-def $eflags
+ ; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NEXT: TEST8ri [[SETCCr]], 1, implicit-def $eflags
+ ; CHECK-NEXT: JCC_1 %bb.2, 5, implicit $eflags
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 64
+ ; CHECK-NEXT: [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 16
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[LEA64r1:%[0-9]+]]:gr64_nosp = LEA64r %stack.2, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV [[MOV16ri1]], [[MOV16ri]]
+ ; CHECK-NEXT: [[MOV64ri1:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.2, 1, killed [[MOV64ri1]], 0, $noreg, [[PTILEZEROV]] :: (store (s8192) into %stack.2)
+ ; CHECK-NEXT: JMP_1 %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr16 = PHI %c0, %bb.0, %24, %bb.3
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gr16 = PHI %r0, %bb.0, %23, %bb.3
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:gr64_nosp = PHI [[LEA64r]], %bb.0, %22, %bb.3
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[MOV64ri2:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[PHI1]], [[PHI]], [[PHI2]], 1, killed [[MOV64ri2]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri3:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.3, 1, killed [[MOV64ri3]], 0, $noreg, [[PTILELOADDV]] :: (store (s8192) into %stack.3)
+ ; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64_nosp = MOV32ri64 32
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:gr16 = PHI [[MOV16ri]], %bb.1, [[PHI]], %bb.2
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:gr16 = PHI [[MOV16ri1]], %bb.1, [[PHI1]], %bb.2
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:gr64_nosp = PHI [[LEA64r1]], %bb.1, [[PHI2]], %bb.2
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[MOV64ri4:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[PHI4]], [[PHI3]], [[PHI5]], 1, killed [[MOV64ri4]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri5:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.4, 1, killed [[MOV64ri5]], 0, $noreg, [[PTILELOADDV1]] :: (store (s8192) into %stack.4)
+ ; CHECK-NEXT: [[MOV16ri2:%[0-9]+]]:gr16 = MOV16ri 64
+ ; CHECK-NEXT: [[MOV16ri3:%[0-9]+]]:gr16 = MOV16ri 16
+ ; CHECK-NEXT: JMP_1 %bb.2
+ bb.0.entry:
+ liveins: $edi
+
+ %12:gr32 = COPY $edi
+ %13:gr32 = COPY killed %12
+ %r0:gr16 = MOV16ri 64
+ %c0:gr16 = MOV16ri 16
+ %t0:tile = PTILEZEROV killed %r0, killed %c0
+ CMP32ri8 %13, 0, implicit-def $eflags
+ %16:gr8 = SETCCr 4, implicit $eflags
+ TEST8ri %16, 1, implicit-def $eflags
+ JCC_1 %bb.2, 5, implicit $eflags
+
+ bb.1:
+ %17:gr16 = MOV16ri 64
+ %18:gr16 = MOV16ri 16
+ %1:tile = PTILEZEROV killed %18, killed %17
+ JMP_1 %bb.3
+
+ bb.2:
+ %2:tile = PHI %t0, %bb.0, %3, %bb.3
+ %19:gr64_nosp = MOV32ri64 32
+
+ bb.3:
+ %3:tile = PHI %1, %bb.1, %2, %bb.2
+ %23:gr16 = MOV16ri 64
+ %24:gr16 = MOV16ri 16
+ JMP_1 %bb.2
diff --git a/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi4.mir b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi4.mir
new file mode 100644
index 0000000000000..14ff3b2996d62
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-phi4.mir
@@ -0,0 +1,144 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=x86_64-- -run-pass=fastpretileconfig -o - %s | FileCheck %s
+#
+# bb.0
+# def %0
+# / \
+# bb.1 |-->bb.2 <-----------
+# | %2=phi(%0, %3, %4) |
+# def %1 -- %5=phi(%0, %3, %2) |
+# \ / |
+# bb.3 ---------------------
+# def %3
+#
+# This case test tile PHIs depend each other, and the its def block is
+# not visited yet.
+---
+name: foo
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 1, class: tile }
+ - { id: 2, class: tile }
+ - { id: 3, class: tile }
+ - { id: 4, class: tile }
+ - { id: 12, class: gr32 }
+ - { id: 13, class: gr32 }
+ - { id: 16, class: gr8 }
+ - { id: 17, class: gr16 }
+ - { id: 18, class: gr16 }
+ - { id: 19, class: gr64_nosp }
+ - { id: 22, class: gr32 }
+ - { id: 23, class: gr16 }
+ - { id: 24, class: gr16 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%12' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: foo
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $edi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_SET0_:%[0-9]+]]:vr128 = V_SET0
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 0, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1, align 4)
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 16, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 16, align 4)
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 32, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 32, align 4)
+ ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 48, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 48, align 4)
+ ; CHECK-NEXT: MOV8mi %stack.1, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.1, align 4)
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY killed [[COPY]]
+ ; CHECK-NEXT: %r0:gr16 = MOV16ri 64
+ ; CHECK-NEXT: %c0:gr16 = MOV16ri 16
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_nosp = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[LEA64r1:%[0-9]+]]:gr64_nosp = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: %t0:tile = PTILEZEROV %r0, %c0
+ ; CHECK-NEXT: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.0, 1, killed [[MOV64ri]], 0, $noreg, %t0 :: (store (s8192) into %stack.0)
+ ; CHECK-NEXT: CMP32ri8 [[COPY1]], 0, implicit-def $eflags
+ ; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NEXT: TEST8ri [[SETCCr]], 1, implicit-def $eflags
+ ; CHECK-NEXT: JCC_1 %bb.2, 5, implicit $eflags
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 64
+ ; CHECK-NEXT: [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 16
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV killed [[MOV16ri1]], killed [[MOV16ri]]
+ ; CHECK-NEXT: JMP_1 %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr16 = PHI %c0, %bb.0, %11, %bb.3, %17, %bb.2
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gr16 = PHI %r0, %bb.0, %12, %bb.3, %18, %bb.2
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:gr64_nosp = PHI [[LEA64r1]], %bb.0, %31, %bb.3, %32, %bb.2
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:gr16 = PHI %c0, %bb.0, %11, %bb.3, %17, %bb.2
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:gr16 = PHI %r0, %bb.0, %12, %bb.3, %18, %bb.2
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:gr64_nosp = PHI [[LEA64r]], %bb.0, %24, %bb.3, %25, %bb.2
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[MOV64ri1:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[PHI1]], [[PHI]], [[PHI2]], 1, killed [[MOV64ri1]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri2:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[PHI4]], [[PHI3]], [[PHI5]], 1, killed [[MOV64ri2]], 0, $noreg
+ ; CHECK-NEXT: [[MOV16ri2:%[0-9]+]]:gr16 = MOV16ri 64
+ ; CHECK-NEXT: [[MOV16ri3:%[0-9]+]]:gr16 = MOV16ri 16
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[LEA64r2:%[0-9]+]]:gr64_nosp = LEA64r %stack.3, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[LEA64r3:%[0-9]+]]:gr64_nosp = LEA64r %stack.3, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[PTILEZEROV1:%[0-9]+]]:tile = PTILEZEROV [[MOV16ri3]], [[MOV16ri2]]
+ ; CHECK-NEXT: [[MOV64ri3:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.3, 1, killed [[MOV64ri3]], 0, $noreg, [[PTILEZEROV1]] :: (store (s8192) into %stack.3)
+ ; CHECK-NEXT: TEST8ri [[SETCCr]], 1, implicit-def $eflags
+ ; CHECK-NEXT: JCC_1 %bb.2, 5, implicit $eflags
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOV16ri4:%[0-9]+]]:gr16 = MOV16ri 64
+ ; CHECK-NEXT: [[MOV16ri5:%[0-9]+]]:gr16 = MOV16ri 16
+ ; CHECK-NEXT: LDTILECFG %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.1, align 4)
+ ; CHECK-NEXT: [[LEA64r4:%[0-9]+]]:gr64_nosp = LEA64r %stack.2, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[LEA64r5:%[0-9]+]]:gr64_nosp = LEA64r %stack.2, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[PTILEZEROV2:%[0-9]+]]:tile = PTILEZEROV [[MOV16ri5]], [[MOV16ri4]]
+ ; CHECK-NEXT: [[MOV64ri4:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.2, 1, killed [[MOV64ri4]], 0, $noreg, [[PTILEZEROV2]] :: (store (s8192) into %stack.2)
+ ; CHECK-NEXT: JMP_1 %bb.2
+ bb.0.entry:
+ liveins: $edi
+
+ %12:gr32 = COPY $edi
+ %13:gr32 = COPY killed %12
+ %r0:gr16 = MOV16ri 64
+ %c0:gr16 = MOV16ri 16
+ %t0:tile = PTILEZEROV %r0, %c0
+ CMP32ri8 %13, 0, implicit-def $eflags
+ %16:gr8 = SETCCr 4, implicit $eflags
+ TEST8ri %16, 1, implicit-def $eflags
+ JCC_1 %bb.2, 5, implicit $eflags
+
+ bb.1:
+ %17:gr16 = MOV16ri 64
+ %18:gr16 = MOV16ri 16
+ %1:tile = PTILEZEROV killed %18, killed %17
+ JMP_1 %bb.3
+
+ bb.2:
+ %2:tile = PHI %t0, %bb.0, %3, %bb.3, %4, %bb.2
+ %5:tile = PHI %t0, %bb.0, %3, %bb.3, %2, %bb.2
+ %25:gr16 = MOV16ri 64
+ %26:gr16 = MOV16ri 16
+ %4:tile = PTILEZEROV killed %26, killed %25
+ TEST8ri %16, 1, implicit-def $eflags
+ JCC_1 %bb.2, 5, implicit $eflags
+
+ bb.3:
+ %23:gr16 = MOV16ri 64
+ %24:gr16 = MOV16ri 16
+ %3:tile = PTILEZEROV killed %24, killed %23
+ JMP_1 %bb.2
diff --git a/llvm/test/CodeGen/X86/AMX/amx-fastconfig-spill.mir b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-spill.mir
new file mode 100644
index 0000000000000..c797ce764c2c9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastconfig-spill.mir
@@ -0,0 +1,154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=x86_64-- -mattr=+amx-int8,avx512f -run-pass=fastpretileconfig -o - %s | FileCheck %s
+
+# Test spill/reload across basic block.
+
+---
+name: foo
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr16 }
+ - { id: 1, class: gr16 }
+ - { id: 2, class: tile }
+ - { id: 3, class: gr64_nosp }
+ - { id: 4, class: gr64 }
+ - { id: 5, class: tile }
+ - { id: 6, class: tile }
+ - { id: 7, class: tile }
+ - { id: 8, class: gr32 }
+ - { id: 9, class: vr512 }
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, size: 1024, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: foo
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[AVX512_512_SET0_:%[0-9]+]]:vr512 = AVX512_512_SET0
+ ; CHECK-NEXT: VMOVUPSZmr %stack.4, 1, $noreg, 0, $noreg, [[AVX512_512_SET0_]] :: (store (s512) into %stack.4, align 4)
+ ; CHECK-NEXT: MOV8mi %stack.4, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.4, align 4)
+ ; CHECK-NEXT: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 32
+ ; CHECK-NEXT: [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 8
+ ; CHECK-NEXT: LDTILECFG %stack.4, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.4, align 4)
+ ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV [[MOV16ri1]], [[MOV16ri]]
+ ; CHECK-NEXT: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.3, 1, killed [[MOV64ri]], 0, $noreg, [[PTILEZEROV]] :: (store (s8192) into %stack.3)
+ ; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64_nosp = MOV32ri64 32
+ ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri1:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.2, 1, killed [[MOV64ri1]], 0, $noreg, [[PTILELOADDV]] :: (store (s8192) into %stack.2)
+ ; CHECK-NEXT: %row:gr16 = MOV16ri 32
+ ; CHECK-NEXT: %col:gr16 = MOV16ri 8
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: LDTILECFG %stack.4, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.4, align 4)
+ ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV %row, %col, [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri2:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV2:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], %stack.2, 1, killed [[MOV64ri2]], 0, $noreg :: (load (s8192) from %stack.2)
+ ; CHECK-NEXT: [[MOV64ri3:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV3:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], %stack.3, 1, killed [[MOV64ri3]], 0, $noreg :: (load (s8192) from %stack.3)
+ ; CHECK-NEXT: [[PTDPBSSDV:%[0-9]+]]:tile = PTDPBSSDV [[MOV16ri1]], [[MOV16ri]], [[MOV16ri]], killed [[PTILELOADDV1]], killed [[PTILELOADDV3]], killed [[PTILELOADDV2]]
+ ; CHECK-NEXT: PTILESTOREDV killed [[MOV16ri1]], killed [[MOV16ri]], killed [[LEA64r]], 1, killed [[MOV32ri64_]], 0, $noreg, killed [[PTDPBSSDV]]
+ ; CHECK-NEXT: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: $eax = COPY killed [[MOV32r0_]]
+ ; CHECK-NEXT: RET 0, killed $eax
+ bb.0.entry:
+ %0:gr16 = MOV16ri 32
+ %1:gr16 = MOV16ri 8
+ %2:tile = PTILEZEROV %1, %0
+ %3:gr64_nosp = MOV32ri64 32
+ %4:gr64 = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ %5:tile = PTILELOADDV %1, %0, %4, 1, %3, 0, $noreg
+ %row:gr16 = MOV16ri 32
+ %col:gr16 = MOV16ri 8
+ JMP_1 %bb.1
+ bb.1:
+ %6:tile = PTILELOADDV %row, %col, %4, 1, %3, 0, $noreg
+ %7:tile = PTDPBSSDV %1, %0, %0, killed %6, killed %2, killed %5
+ PTILESTOREDV killed %1, killed %0, killed %4, 1, killed %3, 0, $noreg, killed %7
+ %8:gr32 = MOV32r0 implicit-def dead $eflags
+ $eax = COPY killed %8
+ RET 0, killed $eax
+
+...
+
+# Test tile copy fold
+---
+name: copy
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr16 }
+ - { id: 1, class: gr16 }
+ - { id: 2, class: tile }
+ - { id: 3, class: gr64_nosp }
+ - { id: 4, class: gr64 }
+ - { id: 5, class: tile }
+ - { id: 6, class: tile }
+ - { id: 7, class: tile }
+ - { id: 8, class: gr32 }
+ - { id: 9, class: vr512 }
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, size: 1024, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: copy
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[AVX512_512_SET0_:%[0-9]+]]:vr512 = AVX512_512_SET0
+ ; CHECK-NEXT: VMOVUPSZmr %stack.4, 1, $noreg, 0, $noreg, [[AVX512_512_SET0_]] :: (store (s512) into %stack.4, align 4)
+ ; CHECK-NEXT: MOV8mi %stack.4, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.4, align 4)
+ ; CHECK-NEXT: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 32
+ ; CHECK-NEXT: [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 8
+ ; CHECK-NEXT: LDTILECFG %stack.4, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.4, align 4)
+ ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV [[MOV16ri1]], [[MOV16ri]]
+ ; CHECK-NEXT: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.3, 1, killed [[MOV64ri]], 0, $noreg, [[PTILEZEROV]] :: (store (s8192) into %stack.3)
+ ; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64_nosp = MOV32ri64 32
+ ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri1:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: TILESTORED %stack.2, 1, killed [[MOV64ri1]], 0, $noreg, [[PTILELOADDV]] :: (store (s8192) into %stack.2)
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: LDTILECFG %stack.4, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.4, align 4)
+ ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[MOV64ri2:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: %t:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], %stack.2, 1, killed [[MOV64ri2]], 0, $noreg :: (load (s8192) from %stack.2)
+ ; CHECK-NEXT: [[MOV64ri3:%[0-9]+]]:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: [[PTILELOADDV2:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], %stack.3, 1, killed [[MOV64ri3]], 0, $noreg :: (load (s8192) from %stack.3)
+ ; CHECK-NEXT: [[PTDPBSSDV:%[0-9]+]]:tile = PTDPBSSDV [[MOV16ri1]], [[MOV16ri]], [[MOV16ri]], killed [[PTILELOADDV1]], killed [[PTILELOADDV2]], killed %t
+ ; CHECK-NEXT: PTILESTOREDV killed [[MOV16ri1]], killed [[MOV16ri]], killed [[LEA64r]], 1, killed [[MOV32ri64_]], 0, $noreg, killed [[PTDPBSSDV]]
+ ; CHECK-NEXT: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: $eax = COPY killed [[MOV32r0_]]
+ ; CHECK-NEXT: RET 0, killed $eax
+ bb.0.entry:
+ %0:gr16 = MOV16ri 32
+ %1:gr16 = MOV16ri 8
+ %2:tile = PTILEZEROV %1, %0
+ %3:gr64_nosp = MOV32ri64 32
+ %4:gr64 = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ %5:tile = PTILELOADDV %1, %0, %4, 1, %3, 0, $noreg
+ JMP_1 %bb.1
+ bb.1:
+ %6:tile = PTILELOADDV %1, %0, %4, 1, %3, 0, $noreg
+ %t:tile = COPY %5
+ %7:tile = PTDPBSSDV %1, %0, %0, killed %6, killed %2, killed %t
+ PTILESTOREDV killed %1, killed %0, killed %4, 1, killed %3, 0, $noreg, killed %7
+ %8:gr32 = MOV32r0 implicit-def dead $eflags
+ $eax = COPY killed %8
+ RET 0, killed $eax
+
+...
diff --git a/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir b/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
new file mode 100644
index 0000000000000..ff0fdbe3aaf71
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
@@ -0,0 +1,146 @@
+# RUN: llc -mtriple=x86_64-- -run-pass=fastpretileconfig -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-unknown"
+
+ @buf = dso_local global [1024 x i8] zeroinitializer, align 16
+ @buf2 = dso_local global [1024 x i8] zeroinitializer, align 16
+
+ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) local_unnamed_addr #0 {
+ entry:
+ %tobool.not = icmp eq i32 %cond, 0
+ br i1 %tobool.not, label %if.else, label %if.then
+
+ if.then: ; preds = %entry
+ %0 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
+ %1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
+ %2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
+ br label %if.end
+
+ if.else: ; preds = %entry
+ %3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
+ %4 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
+ %5 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
+ br label %if.end
+
+ if.end: ; preds = %if.else, %if.then
+ %a.sroa.1094.0.in = phi x86_amx [ %3, %if.else ], [ %0, %if.then ]
+ %b.sroa.1069.0.in = phi x86_amx [ %4, %if.else ], [ %1, %if.then ]
+ %c.sroa.1044.0.in = phi x86_amx [ %5, %if.else ], [ %2, %if.then ]
+ %6 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %row, i16 %col, i16 8, x86_amx %c.sroa.1044.0.in, x86_amx %a.sroa.1094.0.in, x86_amx %b.sroa.1069.0.in)
+ tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx %6)
+ ret void
+ }
+
+ declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) #1
+ declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) #1
+ declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) #1
+
+ attributes #0 = { "target-features"="+amx-int8,+avx512f" }
+ attributes #1 = { nounwind "target-features"="+amx-int8,+avx512f" }
+
+...
+---
+name: test_api
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: tile }
+ - { id: 1, class: tile }
+ - { id: 2, class: tile }
+ - { id: 3, class: tile }
+ - { id: 4, class: tile }
+ - { id: 5, class: tile }
+ - { id: 6, class: tile }
+ - { id: 7, class: tile }
+ - { id: 8, class: tile }
+ - { id: 9, class: gr32 }
+ - { id: 10, class: gr32 }
+ - { id: 11, class: gr32 }
+ - { id: 12, class: gr16 }
+ - { id: 13, class: gr16 }
+ - { id: 14, class: gr64 }
+ - { id: 15, class: gr64_nosp }
+ - { id: 16, class: gr16 }
+ - { id: 17, class: gr64 }
+ - { id: 18, class: gr64_nosp }
+ - { id: 19, class: gr16 }
+ - { id: 20, class: gr16 }
+ - { id: 21, class: tile }
+ - { id: 22, class: gr64 }
+ - { id: 23, class: gr64_nosp }
+liveins:
+ - { reg: '$edi', virtual-reg: '%9' }
+ - { reg: '$esi', virtual-reg: '%10' }
+ - { reg: '$edx', virtual-reg: '%11' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ successors: %bb.2(0x30000000), %bb.1(0x50000000)
+ liveins: $edi, $esi, $edx
+
+ ; CHECK: {{%.*}}:vr512 = AVX512_512_SET0
+ ; CHECK-NEXT: VMOVUPSZmr %stack.3, 1, $noreg, 0, $noreg, {{%.*}}
+
+ %11:gr32 = COPY killed $edx
+ %10:gr32 = COPY killed $esi
+ %9:gr32 = COPY killed $edi
+ %13:gr16 = COPY killed %11.sub_16bit
+ %12:gr16 = COPY killed %10.sub_16bit
+ TEST32rr killed %9, %9, implicit-def $eflags
+ JCC_1 %bb.2, 4, implicit killed $eflags
+ JMP_1 %bb.1
+
+ bb.1.if.then:
+ %14:gr64 = MOV32ri64 @buf
+ %15:gr64_nosp = MOV32ri64 32
+ %16:gr16 = MOV16ri 8
+ ; CHECK: LDTILECFG
+ %0:tile = PTILELOADDV %12, %16, %14, 1, %15, 0, $noreg
+ %1:tile = PTILELOADDV killed %16, %13, %14, 1, %15, 0, $noreg
+ %2:tile = PTILELOADDV %12, %13, killed %14, 1, killed %15, 0, $noreg
+ JMP_1 %bb.3
+
+ bb.2.if.else:
+ %17:gr64 = MOV32ri64 @buf2
+ %18:gr64_nosp = MOV32ri64 32
+ %19:gr16 = MOV16ri 8
+ ; CHECK: LDTILECFG
+ %3:tile = PTILELOADDV %12, %19, %17, 1, %18, 0, $noreg
+ %4:tile = PTILELOADDV killed %19, %13, %17, 1, %18, 0, $noreg
+ %5:tile = PTILELOADDV %12, %13, killed %17, 1, killed %18, 0, $noreg
+
+ bb.3.if.end:
+
+ ; CHECK: bb.3.if.end
+ ; CHECK-NEXT: %44:gr16 = PHI %16, %bb.1, %19, %bb.2
+ ; CHECK-NEXT: %43:gr16 = PHI %12, %bb.1, %12, %bb.2
+ ; CHECK-NEXT: %42:gr64_nosp = PHI %45, %bb.1, %46, %bb.2
+ ; CHECK-NEXT: %38:gr16 = PHI %13, %bb.1, %13, %bb.2
+ ; CHECK-NEXT: %37:gr16 = PHI %16, %bb.1, %19, %bb.2
+ ; CHECK-NEXT: %36:gr64_nosp = PHI %39, %bb.1, %40, %bb.2
+ ; CHECK-NEXT: %32:gr16 = PHI %13, %bb.1, %13, %bb.2
+ ; CHECK-NEXT: %31:gr16 = PHI %12, %bb.1, %12, %bb.2
+ ; CHECK-NEXT: %30:gr64_nosp = PHI %33, %bb.1, %34, %bb.2
+ ; CHECK-NEXT: LDTILECFG
+ ; CHECK-NEXT: %47:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: %6:tile = PTILELOADDV %43, %44, %42, 1, killed %47, 0, $noreg
+ ; CHECK-NEXT: %41:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: %7:tile = PTILELOADDV %37, %38, %36, 1, killed %41, 0, $noreg
+ ; CHECK-NEXT: %35:gr64_nosp = MOV64ri 64
+ ; CHECK-NEXT: %8:tile = PTILELOADDV %31, %32, %30, 1, killed %35, 0, $noreg
+
+ %6:tile = PHI %0, %bb.1, %3, %bb.2
+ %7:tile = PHI %1, %bb.1, %4, %bb.2
+ %8:tile = PHI %2, %bb.1, %5, %bb.2
+ %20:gr16 = MOV16ri 8
+ %21:tile = PTDPBSSDV %12, %13, killed %20, killed %8, killed %6, killed %7
+ %22:gr64 = MOV32ri64 @buf
+ %23:gr64_nosp = MOV32ri64 32
+ PTILESTOREDV killed %12, killed %13, killed %22, 1, killed %23, 0, $noreg, killed %21
+ RET 0
+
+...
diff --git a/llvm/test/CodeGen/X86/AMX/amx-fastpreconfig.mir b/llvm/test/CodeGen/X86/AMX/amx-fastpreconfig.mir
new file mode 100644
index 0000000000000..53877b5da84a3
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastpreconfig.mir
@@ -0,0 +1,61 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=x86_64-- -mattr=+amx-int8,avx512f -run-pass=fastpretileconfig -o - %s | FileCheck %s
+
+# Test the case which has TILELOADD being mixed in psuedo AMX instruction
+...
+---
+name: main
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64_nosp }
+ - { id: 1, class: gr64 }
+ - { id: 2, class: gr16 }
+ - { id: 3, class: gr16 }
+ - { id: 4, class: tile }
+ - { id: 5, class: tile }
+ - { id: 6, class: tile }
+ - { id: 7, class: tile }
+ - { id: 8, class: gr32 }
+ - { id: 9, class: vr512 }
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, size: 1024, alignment: 16 }
+ - { id: 1, size: 64, alignment: 4 }
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: main
+ ; CHECK: [[AVX512_512_SET0_:%[0-9]+]]:vr512 = AVX512_512_SET0
+ ; CHECK-NEXT: VMOVUPSZmr %stack.2, 1, $noreg, 0, $noreg, [[AVX512_512_SET0_]] :: (store (s512) into %stack.2, align 4)
+ ; CHECK-NEXT: MOV8mi %stack.2, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.2, align 4)
+ ; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64_nosp = MOV32ri64 32
+ ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ ; CHECK-NEXT: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 32
+ ; CHECK-NEXT: [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 8
+ ; CHECK-NEXT: LDTILECFG %stack.2, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load store (s512) on %stack.2, align 4)
+ ; CHECK-NEXT: $tmm0 = TILELOADD [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[PTILELOADDV2:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[MOV16ri]], [[LEA64r]], 1, [[MOV32ri64_]], 0, $noreg
+ ; CHECK-NEXT: [[PTDPBSSDV:%[0-9]+]]:tile = PTDPBSSDV [[MOV16ri1]], [[MOV16ri]], [[MOV16ri]], killed [[PTILELOADDV2]], killed [[PTILELOADDV]], killed [[PTILELOADDV1]]
+ ; CHECK-NEXT: PTILESTOREDV killed [[MOV16ri1]], killed [[MOV16ri]], killed [[LEA64r]], 1, killed [[MOV32ri64_]], 0, $noreg, killed [[PTDPBSSDV]]
+ ; CHECK-NEXT: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: $eax = COPY killed [[MOV32r0_]]
+ ; CHECK-NEXT: RET 0, killed $eax
+ %0:gr64_nosp = MOV32ri64 32
+ %1:gr64 = LEA64r %stack.0, 1, $noreg, 0, $noreg
+ %2:gr16 = MOV16ri 32
+ %3:gr16 = MOV16ri 8
+ $tmm0 = TILELOADD %1, 1, %0, 0, $noreg
+ %4:tile = PTILELOADDV %3, %2, %1, 1, %0, 0, $noreg
+ %5:tile = PTILELOADDV %3, %2, %1, 1, %0, 0, $noreg
+ %6:tile = PTILELOADDV %3, %2, %1, 1, %0, 0, $noreg
+ %7:tile = PTDPBSSDV %3, %2, %2, killed %6, killed %4, killed %5
+ PTILESTOREDV killed %3, killed %2, killed %1, 1, killed %0, 0, $noreg, killed %7
+ %8:gr32 = MOV32r0 implicit-def dead $eflags
+ $eax = COPY killed %8
+ RET 0, killed $eax
+
+...
diff --git a/llvm/test/CodeGen/X86/AMX/amx-zero-config.ll b/llvm/test/CodeGen/X86/AMX/amx-zero-config.ll
index a76a1add0676a..7e0fd38523996 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-zero-config.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-zero-config.ll
@@ -66,30 +66,29 @@ define void @foo(i8 *%buf) nounwind {
; AVX512-O0-NEXT: pushq %rbp
; AVX512-O0-NEXT: movq %rsp, %rbp
; AVX512-O0-NEXT: andq $-1024, %rsp # imm = 0xFC00
-; AVX512-O0-NEXT: subq $2048, %rsp # imm = 0x800
-; AVX512-O0-NEXT: movq %rsp, %rdx
+; AVX512-O0-NEXT: subq $3072, %rsp # imm = 0xC00
; AVX512-O0-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-O0-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
+; AVX512-O0-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
; AVX512-O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: movb $8, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: movw $32, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX512-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
; AVX512-O0-NEXT: movw $32, %cx
; AVX512-O0-NEXT: movw $8, %ax
+; AVX512-O0-NEXT: # implicit-def: $al
+; AVX512-O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-O0-NEXT: tilezero %tmm0
; AVX512-O0-NEXT: movl $64, %esi
+; AVX512-O0-NEXT: movw $32, %cx
+; AVX512-O0-NEXT: movw $8, %ax
; AVX512-O0-NEXT: tilestored %tmm0, (%rdx,%rsi)
-; AVX512-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rax
-; AVX512-O0-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: movw $8, %cx
-; AVX512-O0-NEXT: # kill: def $cl killed $cl killed $cx
-; AVX512-O0-NEXT: movb %cl, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: movw $32, {{[0-9]+}}(%rsp)
-; AVX512-O0-NEXT: ldtilecfg (%rax)
; AVX512-O0-NEXT: movl $64, %esi
; AVX512-O0-NEXT: movw $32, %cx
; AVX512-O0-NEXT: movw $8, %ax
+; AVX512-O0-NEXT: # implicit-def: $al
+; AVX512-O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX512-O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX512-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX512-O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX512-O0-NEXT: movl $1024, %edx # imm = 0x400
; AVX512-O0-NEXT: movw $32, %cx
@@ -106,32 +105,30 @@ define void @foo(i8 *%buf) nounwind {
; AVX2-O0-NEXT: pushq %rbp
; AVX2-O0-NEXT: movq %rsp, %rbp
; AVX2-O0-NEXT: andq $-1024, %rsp # imm = 0xFC00
-; AVX2-O0-NEXT: subq $2048, %rsp # imm = 0x800
-; AVX2-O0-NEXT: movq %rsp, %rdx
+; AVX2-O0-NEXT: subq $3072, %rsp # imm = 0xC00
; AVX2-O0-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX2-O0-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX2-O0-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX2-O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: movb $8, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: movw $32, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; AVX2-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
; AVX2-O0-NEXT: movw $32, %cx
; AVX2-O0-NEXT: movw $8, %ax
+; AVX2-O0-NEXT: # implicit-def: $al
+; AVX2-O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX2-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX2-O0-NEXT: tilezero %tmm0
; AVX2-O0-NEXT: movl $64, %esi
+; AVX2-O0-NEXT: movw $32, %cx
+; AVX2-O0-NEXT: movw $8, %ax
; AVX2-O0-NEXT: tilestored %tmm0, (%rdx,%rsi)
-; AVX2-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rax
-; AVX2-O0-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: movw $8, %cx
-; AVX2-O0-NEXT: # kill: def $cl killed $cl killed $cx
-; AVX2-O0-NEXT: movb %cl, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: movw $32, {{[0-9]+}}(%rsp)
-; AVX2-O0-NEXT: ldtilecfg (%rax)
; AVX2-O0-NEXT: movl $64, %esi
; AVX2-O0-NEXT: movw $32, %cx
; AVX2-O0-NEXT: movw $8, %ax
+; AVX2-O0-NEXT: # implicit-def: $al
+; AVX2-O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; AVX2-O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; AVX2-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; AVX2-O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
; AVX2-O0-NEXT: movl $1024, %edx # imm = 0x400
; AVX2-O0-NEXT: movw $32, %cx
@@ -148,36 +145,32 @@ define void @foo(i8 *%buf) nounwind {
; SSE2-O0-NEXT: pushq %rbp
; SSE2-O0-NEXT: movq %rsp, %rbp
; SSE2-O0-NEXT: andq $-1024, %rsp # imm = 0xFC00
-; SSE2-O0-NEXT: subq $2048, %rsp # imm = 0x800
-; SSE2-O0-NEXT: movq %rsp, %rdx
+; SSE2-O0-NEXT: subq $3072, %rsp # imm = 0xC00
; SSE2-O0-NEXT: xorps %xmm0, %xmm0
; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
; SSE2-O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movb $8, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movw $32, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
+; SSE2-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
; SSE2-O0-NEXT: movw $32, %cx
; SSE2-O0-NEXT: movw $8, %ax
+; SSE2-O0-NEXT: # implicit-def: $al
+; SSE2-O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; SSE2-O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; SSE2-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; SSE2-O0-NEXT: tilezero %tmm0
; SSE2-O0-NEXT: movl $64, %esi
+; SSE2-O0-NEXT: movw $32, %cx
+; SSE2-O0-NEXT: movw $8, %ax
; SSE2-O0-NEXT: tilestored %tmm0, (%rdx,%rsi)
-; SSE2-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rax
-; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movb $1, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movw $8, %cx
-; SSE2-O0-NEXT: # kill: def $cl killed $cl killed $cx
-; SSE2-O0-NEXT: movb %cl, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: movw $32, {{[0-9]+}}(%rsp)
-; SSE2-O0-NEXT: ldtilecfg (%rax)
; SSE2-O0-NEXT: movl $64, %esi
; SSE2-O0-NEXT: movw $32, %cx
; SSE2-O0-NEXT: movw $8, %ax
+; SSE2-O0-NEXT: # implicit-def: $al
+; SSE2-O0-NEXT: movb %al, {{[0-9]+}}(%rsp)
+; SSE2-O0-NEXT: movw %cx, {{[0-9]+}}(%rsp)
+; SSE2-O0-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
; SSE2-O0-NEXT: tileloadd (%rdx,%rsi), %tmm0
; SSE2-O0-NEXT: movl $1024, %edx # imm = 0x400
; SSE2-O0-NEXT: movw $32, %cx
diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll
index 54eecb113540f..43d0c0839940e 100644
--- a/llvm/test/CodeGen/X86/O0-pipeline.ll
+++ b/llvm/test/CodeGen/X86/O0-pipeline.ll
@@ -20,7 +20,6 @@
; CHECK-NEXT: Expand Atomic instructions
; CHECK-NEXT: Lower AMX intrinsics
; CHECK-NEXT: Lower AMX type for load/store
-; CHECK-NEXT: Pre AMX Tile Config
; CHECK-NEXT: Module Verifier
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
@@ -42,6 +41,7 @@
; CHECK-NEXT: MachineDominator Tree Construction
; CHECK-NEXT: X86 EFLAGS copy lowering
; CHECK-NEXT: X86 DynAlloca Expander
+; CHECK-NEXT: Fast Tile Register Preconfigure
; CHECK-NEXT: Eliminate PHI nodes for register allocation
; CHECK-NEXT: Two-Address instruction pass
; CHECK-NEXT: Fast Register Allocator
More information about the llvm-commits
mailing list