[llvm] 778a484 - [InlineAsm] Steal a bit to denote a register is foldable (#70738)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 3 09:37:12 PDT 2023
Author: Nick Desaulniers
Date: 2023-11-03T09:37:07-07:00
New Revision: 778a48468b5fce8deafb40be0704cb69b052a50a
URL: https://github.com/llvm/llvm-project/commit/778a48468b5fce8deafb40be0704cb69b052a50a
DIFF: https://github.com/llvm/llvm-project/commit/778a48468b5fce8deafb40be0704cb69b052a50a.diff
LOG: [InlineAsm] Steal a bit to denote a register is foldable (#70738)
When using the inline asm constraint string "rm" (or "g"), we generally
would like the compiler to choose "r", but it is permitted to choose "m"
if there's register pressure. This is distinct from "r" in which the
register is not permitted to be spilled to the stack.
The decision of which to use must be made at some point. Currently, the
instruction selection frameworks (ISELs) make the choice, and the
register allocators had better be able to handle the result.
Steal a bit from Storage when using register operands to disambiguate
between the two cases. Add helpers/getters/setters, and print in MIR
when such a register is foldable.
The getter will later be used by the register allocation frameworks (and
asserted by the ISELs) while the setters will be used by the instruction
selection frameworks.
Link: https://github.com/llvm/llvm-project/issues/20571
Added:
Modified:
llvm/include/llvm/CodeGen/MachineInstr.h
llvm/include/llvm/IR/InlineAsm.h
llvm/lib/CodeGen/MachineInstr.cpp
llvm/lib/CodeGen/TargetInstrInfo.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 4877f43e8578d1c..bd72ac23fc9c08e 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1364,6 +1364,10 @@ class MachineInstr
return getOpcode() == TargetOpcode::INLINEASM ||
getOpcode() == TargetOpcode::INLINEASM_BR;
}
+ /// Returns true if the register operand can be folded with a load or store
+ /// into a frame index. Does so by checking the InlineAsm::Flag immediate
+ /// operand at OpId - 1.
+ bool mayFoldInlineAsmRegOp(unsigned OpId) const;
bool isStackAligningInlineAsm() const;
InlineAsm::AsmDialect getInlineAsmDialect() const;
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 969ad42816a7e52..e5f506e5694daf2 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -291,18 +291,23 @@ class InlineAsm final : public Value {
// Bits 30-16 - A ConstraintCode:: value indicating the original
// constraint code. (MemConstraintCode)
// Else:
- // Bits 30-16 - The register class ID to use for the operand. (RegClass)
+ // Bits 29-16 - The register class ID to use for the operand. (RegClass)
+ // Bit 30 - If the register is permitted to be spilled.
+ // (RegMayBeFolded)
+ // Defaults to false "r", may be set for constraints like
+ // "rm" (or "g").
//
- // As such, MatchedOperandNo, MemConstraintCode, and RegClass are views of
- // the same slice of bits, but are mutually exclusive depending on the
- // fields IsMatched then KindField.
+ // As such, MatchedOperandNo, MemConstraintCode, and
+ // (RegClass+RegMayBeFolded) are views of the same slice of bits, but are
+ // mutually exclusive depending on the fields IsMatched then KindField.
class Flag {
uint32_t Storage;
using KindField = Bitfield::Element<Kind, 0, 3, Kind::Func>;
using NumOperands = Bitfield::Element<unsigned, 3, 13>;
using MatchedOperandNo = Bitfield::Element<unsigned, 16, 15>;
using MemConstraintCode = Bitfield::Element<ConstraintCode, 16, 15, ConstraintCode::Max>;
- using RegClass = Bitfield::Element<unsigned, 16, 15>;
+ using RegClass = Bitfield::Element<unsigned, 16, 14>;
+ using RegMayBeFolded = Bitfield::Element<bool, 30, 1>;
using IsMatched = Bitfield::Element<bool, 31, 1>;
@@ -413,6 +418,26 @@ class InlineAsm final : public Value {
"Flag is not a memory or function constraint!");
Bitfield::set<MemConstraintCode>(Storage, ConstraintCode::Unknown);
}
+
+ /// Set a bit to denote that while this operand is some kind of register
+ /// (use, def, ...), a memory flag did appear in the original constraint
+ /// list. This is set by the instruction selection framework, and consumed
+ /// by the register allocator. While the register allocator is generally
+ /// responsible for spilling registers, we need to be able to distinguish
+ /// between registers that the register allocator has permission to fold
+ /// ("rm") vs ones it does not ("r"). This is because the inline asm may use
+ /// instructions which don't support memory addressing modes for that
+ /// operand.
+ void setRegMayBeFolded(bool B) {
+ assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
+ "Must be reg");
+ Bitfield::set<RegMayBeFolded>(Storage, B);
+ }
+ bool getRegMayBeFolded() const {
+ assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
+ "Must be reg");
+ return Bitfield::get<RegMayBeFolded>(Storage);
+ }
};
static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 048563cc2bcc4e4..9e7b4df2576feee 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1792,6 +1792,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (F.isUseOperandTiedToDef(TiedTo))
OS << " tiedto:$" << TiedTo;
+ if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
+ F.isRegUseKind()) &&
+ F.getRegMayBeFolded()) {
+ OS << " foldable";
+ }
+
OS << ']';
// Compute the index of the next operand descriptor.
@@ -2526,3 +2532,20 @@ void MachineInstr::insert(mop_iterator InsertBefore,
tieOperands(Tie1, Tie2);
}
}
+
+bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
+ assert(OpId && "expected non-zero operand id");
+ assert(isInlineAsm() && "should only be used on inline asm");
+
+ if (!getOperand(OpId).isReg())
+ return false;
+
+ const MachineOperand &MD = getOperand(OpId - 1);
+ if (!MD.isImm())
+ return false;
+
+ InlineAsm::Flag F(MD.getImm());
+ if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
+ return F.getRegMayBeFolded();
+ return false;
+}
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index fe7efb73a2dce83..3013a768bc4d566 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1639,6 +1639,10 @@ std::string TargetInstrInfo::createMIROperandComment(
if (F.isUseOperandTiedToDef(TiedTo))
OS << " tiedto:$" << TiedTo;
+ if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
+ F.getRegMayBeFolded())
+ OS << " foldable";
+
return OS.str();
}
More information about the llvm-commits
mailing list