[llvm] reapply "[TargetInstrInfo] enable foldMemoryOperand for InlineAsm (#70743)" (PR #72910)
Nick Desaulniers via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 20 11:59:51 PST 2023
https://github.com/nickdesaulniers created https://github.com/llvm/llvm-project/pull/72910
This reverts commit 42204c94ba9fcb0b4b1335e648ce140a3eef8a9d.
It was accidentally backed out.
>From f07bf059f40280a7cfc0a019d2b841d460352d7c Mon Sep 17 00:00:00 2001
From: Nick Desaulniers <ndesaulniers at google.com>
Date: Mon, 20 Nov 2023 11:55:38 -0800
Subject: [PATCH] reapply "[TargetInstrInfo] enable foldMemoryOperand for
InlineAsm (#70743)"
This reverts commit 42204c94ba9fcb0b4b1335e648ce140a3eef8a9d.
It was accidentally backed out.
---
llvm/include/llvm/CodeGen/TargetInstrInfo.h | 10 ++++
llvm/lib/CodeGen/TargetInstrInfo.cpp | 62 +++++++++++++++++++++
2 files changed, 72 insertions(+)
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index fe130d282ded15e..de065849eaa6ebc 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -2188,6 +2188,16 @@ class TargetInstrInfo : public MCInstrInfo {
// Get the call frame size just before MI.
unsigned getCallFrameSizeAt(MachineInstr &MI) const;
+ /// Fills in the necessary MachineOperands to refer to a frame index.
+ /// The best way to understand this is to print `asm(""::"m"(x));` after
+ /// finalize-isel. Example:
+ /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg
+ /// we would add placeholders for: ^ ^ ^ ^
+ virtual void
+ getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops) const {
+ llvm_unreachable("unknown number of operands necessary");
+ }
+
private:
mutable std::unique_ptr<MIRFormatter> Formatter;
unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 3013a768bc4d566..5ede36505b5b00c 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -565,6 +565,64 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
return NewMI;
}
+static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
+ const TargetInstrInfo &TII) {
+ MachineOperand &MO = MI->getOperand(OpNo);
+ const VirtRegInfo &RI = AnalyzeVirtRegInBundle(*MI, MO.getReg());
+
+ // If the machine operand is tied, untie it first.
+ if (MO.isTied()) {
+ unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
+ MI->untieRegOperand(OpNo);
+ // Intentional recursion!
+ foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
+ }
+
+ // Change the operand from a register to a frame index.
+ MO.ChangeToFrameIndex(FI, MO.getTargetFlags());
+
+ SmallVector<MachineOperand, 4> NewOps;
+ TII.getFrameIndexOperands(NewOps);
+ assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
+ MI->insert(MI->operands_begin() + OpNo + 1, NewOps);
+
+ // Change the previous operand to a MemKind InlineAsm::Flag. The second param
+ // is the per-target number of operands that represent the memory operand
+ // excluding this one (MD). This includes MO.
+ InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size() + 1);
+ F.setMemConstraint(InlineAsm::ConstraintCode::m);
+ MachineOperand &MD = MI->getOperand(OpNo - 1);
+ MD.setImm(F);
+
+ // Update mayload/maystore metadata.
+ MachineOperand &ExtraMO = MI->getOperand(InlineAsm::MIOp_ExtraInfo);
+ if (RI.Reads)
+ ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
+ if (RI.Writes)
+ ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
+}
+
+// Returns nullptr if not possible to fold.
+static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI,
+ ArrayRef<unsigned> Ops, int FI,
+ const TargetInstrInfo &TII) {
+ assert(MI.isInlineAsm() && "wrong opcode");
+ if (Ops.size() > 1)
+ return nullptr;
+ unsigned Op = Ops[0];
+ assert(Op && "should never be first operand");
+ assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
+
+ if (!MI.mayFoldInlineAsmRegOp(Op))
+ return nullptr;
+
+ MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
+
+ foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
+
+ return &NewMI;
+}
+
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops, int FI,
LiveIntervals *LIS,
@@ -612,6 +670,8 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
MBB->insert(MI, NewMI);
+ } else if (MI.isInlineAsm()) {
+ NewMI = foldInlineAsmMemOperand(MI, Ops, FI, *this);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
@@ -683,6 +743,8 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
if (NewMI)
NewMI = &*MBB.insert(MI, NewMI);
+ } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
+ NewMI = foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
More information about the llvm-commits
mailing list