[llvm] [InlineAsm] Steal a bit to denote a register is spillable (PR #70738)

Nick Desaulniers via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 31 14:42:34 PDT 2023


https://github.com/nickdesaulniers updated https://github.com/llvm/llvm-project/pull/70738

>From 9a607c2323703c3936a84681299460c3779e62b9 Mon Sep 17 00:00:00 2001
From: Nick Desaulniers <ndesaulniers at google.com>
Date: Mon, 30 Oct 2023 15:19:27 -0700
Subject: [PATCH 1/2] [InlineAsm] Steal a bit to denote a register is spillable

When using the inline asm constraint string "rm" (or "g"), we generally
would like the compiler to choose "r", but it is permitted to choose "m"
if there's register pressure. This is distinct from "r" in which the
register is not permitted to be spilled to the stack.

The decision of which to use must be made at some point.  Currently, the
instruction selection frameworks (ISELs) make the choice, and the
register allocators had better be able to handle the result.

Steal a bit from Storage when using registers operands to disambiguate
between the two cases.  Add helpers/getters/setters, and print in MIR
when such a register is spillable.

The getter will later be used by the register allocation frameworks (and
asserted by the ISELs) while the setters will be used by the instruction
selection frameworks.
---
 llvm/include/llvm/CodeGen/MachineInstr.h |  3 ++
 llvm/include/llvm/IR/InlineAsm.h         | 35 ++++++++++++++++++++----
 llvm/lib/CodeGen/MachineInstr.cpp        | 23 ++++++++++++++++
 llvm/lib/CodeGen/TargetInstrInfo.cpp     |  4 +++
 4 files changed, 60 insertions(+), 5 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 4877f43e8578d1c..93e8ff389d65673 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1364,6 +1364,9 @@ class MachineInstr
     return getOpcode() == TargetOpcode::INLINEASM ||
            getOpcode() == TargetOpcode::INLINEASM_BR;
   }
+  /// Returns true if the memory operand can be folded. Does so by checking the
+  /// InlineAsm::Flag immediate operand at OpId - 1.
+  bool mayFoldInlineAsmMemOp(unsigned OpId) const;
 
   bool isStackAligningInlineAsm() const;
   InlineAsm::AsmDialect getInlineAsmDialect() const;
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 969ad42816a7e52..2d395a53608b0b7 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -291,18 +291,23 @@ class InlineAsm final : public Value {
   //     Bits 30-16 - A ConstraintCode:: value indicating the original
   //                  constraint code. (MemConstraintCode)
   //   Else:
-  //     Bits 30-16 - The register class ID to use for the operand. (RegClass)
+  //     Bits 29-16 - The register class ID to use for the operand. (RegClass)
+  //     Bit  30    - If the register is permitted to be spilled.
+  //                  (RegMayBeSpilled)
+  //                  Defaults to false "r", may be set for constraints like
+  //                  "rm" (or "g").
   //
-  //   As such, MatchedOperandNo, MemConstraintCode, and RegClass are views of
-  //   the same slice of bits, but are mutually exclusive depending on the
-  //   fields IsMatched then KindField.
+  //   As such, MatchedOperandNo, MemConstraintCode, and
+  //   (RegClass+RegMayBeSpilled) are views of the same slice of bits, but are
+  //   mutually exclusive depending on the fields IsMatched then KindField.
   class Flag {
     uint32_t Storage;
     using KindField = Bitfield::Element<Kind, 0, 3, Kind::Func>;
     using NumOperands = Bitfield::Element<unsigned, 3, 13>;
     using MatchedOperandNo = Bitfield::Element<unsigned, 16, 15>;
     using MemConstraintCode = Bitfield::Element<ConstraintCode, 16, 15, ConstraintCode::Max>;
-    using RegClass = Bitfield::Element<unsigned, 16, 15>;
+    using RegClass = Bitfield::Element<unsigned, 16, 14>;
+    using RegMayBeSpilled = Bitfield::Element<bool, 30, 1>;
     using IsMatched = Bitfield::Element<bool, 31, 1>;
 
 
@@ -413,6 +418,26 @@ class InlineAsm final : public Value {
              "Flag is not a memory or function constraint!");
       Bitfield::set<MemConstraintCode>(Storage, ConstraintCode::Unknown);
     }
+
+    /// Set a bit to denote that while this operand is some kind of register
+    /// (use, def, ...), a memory flag did appear in the original constraint
+    /// list.  This is set by the instruction selection framework, and consumed
+    /// by the register allocator. While the register allocator is generally
+    /// responsible for spilling registers, we need to be able to distinguish
+    /// between registers that the register allocator has permission to spill
+    /// ("rm") vs ones it does not ("r"). This is because the inline asm may use
+    /// instructions which don't support memory addressing modes for that
+    /// operand.
+    void setRegMayBeSpilled(bool B) {
+      assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
+             "Must be reg");
+      Bitfield::set<RegMayBeSpilled>(Storage, B);
+    }
+    bool getRegMayBeSpilled() const {
+      assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
+             "Must be reg");
+      return Bitfield::get<RegMayBeSpilled>(Storage);
+    }
   };
 
   static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 048563cc2bcc4e4..92c789e85a205b4 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1792,6 +1792,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
       if (F.isUseOperandTiedToDef(TiedTo))
         OS << " tiedto:$" << TiedTo;
 
+      if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
+           F.isRegUseKind()) &&
+          F.getRegMayBeSpilled()) {
+        OS << " spillable";
+      }
+
       OS << ']';
 
       // Compute the index of the next operand descriptor.
@@ -2526,3 +2532,20 @@ void MachineInstr::insert(mop_iterator InsertBefore,
     tieOperands(Tie1, Tie2);
   }
 }
+
+bool MachineInstr::mayFoldInlineAsmMemOp(unsigned OpId) const {
+  assert(OpId && "expected non-zero operand id");
+  assert(isInlineAsm() && "should only be used on inline asm");
+
+  if (!getOperand(OpId).isReg())
+    return false;
+
+  const MachineOperand &MD = getOperand(OpId - 1);
+  if (!MD.isImm())
+    return false;
+
+  InlineAsm::Flag F(MD.getImm());
+  if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
+    return F.getRegMayBeSpilled();
+  return false;
+}
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index fe7efb73a2dce83..bcf9105ea64ca96 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1639,6 +1639,10 @@ std::string TargetInstrInfo::createMIROperandComment(
   if (F.isUseOperandTiedToDef(TiedTo))
     OS << " tiedto:$" << TiedTo;
 
+  if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
+      F.getRegMayBeSpilled())
+    OS << " spillable";
+
   return OS.str();
 }
 

>From 14d69cf1ee4abca98a175ff5ab0cd4be2ee21151 Mon Sep 17 00:00:00 2001
From: Nick Desaulniers <ndesaulniers at google.com>
Date: Tue, 31 Oct 2023 13:03:36 -0700
Subject: [PATCH 2/2] s/spillable/foldable/

---
 llvm/include/llvm/CodeGen/MachineInstr.h |  7 ++++---
 llvm/include/llvm/IR/InlineAsm.h         | 12 ++++++------
 llvm/lib/CodeGen/MachineInstr.cpp        |  8 ++++----
 llvm/lib/CodeGen/TargetInstrInfo.cpp     |  4 ++--
 4 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 93e8ff389d65673..bd72ac23fc9c08e 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1364,9 +1364,10 @@ class MachineInstr
     return getOpcode() == TargetOpcode::INLINEASM ||
            getOpcode() == TargetOpcode::INLINEASM_BR;
   }
-  /// Returns true if the memory operand can be folded. Does so by checking the
-  /// InlineAsm::Flag immediate operand at OpId - 1.
-  bool mayFoldInlineAsmMemOp(unsigned OpId) const;
+  /// Returns true if the register operand can be folded with a load or store
+  /// into a frame index. Does so by checking the InlineAsm::Flag immediate
+  /// operand at OpId - 1.
+  bool mayFoldInlineAsmRegOp(unsigned OpId) const;
 
   bool isStackAligningInlineAsm() const;
   InlineAsm::AsmDialect getInlineAsmDialect() const;
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 2d395a53608b0b7..05f1c081b26c48f 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -293,7 +293,7 @@ class InlineAsm final : public Value {
   //   Else:
   //     Bits 29-16 - The register class ID to use for the operand. (RegClass)
   //     Bit  30    - If the register is permitted to be spilled.
-  //                  (RegMayBeSpilled)
+  //                  (RegMayBeFolded)
   //                  Defaults to false "r", may be set for constraints like
   //                  "rm" (or "g").
   //
@@ -307,7 +307,7 @@ class InlineAsm final : public Value {
     using MatchedOperandNo = Bitfield::Element<unsigned, 16, 15>;
     using MemConstraintCode = Bitfield::Element<ConstraintCode, 16, 15, ConstraintCode::Max>;
     using RegClass = Bitfield::Element<unsigned, 16, 14>;
-    using RegMayBeSpilled = Bitfield::Element<bool, 30, 1>;
+    using RegMayBeFolded = Bitfield::Element<bool, 30, 1>;
     using IsMatched = Bitfield::Element<bool, 31, 1>;
 
 
@@ -428,15 +428,15 @@ class InlineAsm final : public Value {
     /// ("rm") vs ones it does not ("r"). This is because the inline asm may use
     /// instructions which don't support memory addressing modes for that
     /// operand.
-    void setRegMayBeSpilled(bool B) {
+    void setRegMayBeFolded(bool B) {
       assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
              "Must be reg");
-      Bitfield::set<RegMayBeSpilled>(Storage, B);
+      Bitfield::set<RegMayBeFolded>(Storage, B);
     }
-    bool getRegMayBeSpilled() const {
+    bool getRegMayBeFolded() const {
       assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
              "Must be reg");
-      return Bitfield::get<RegMayBeSpilled>(Storage);
+      return Bitfield::get<RegMayBeFolded>(Storage);
     }
   };
 
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 92c789e85a205b4..9e7b4df2576feee 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1794,8 +1794,8 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
 
       if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
            F.isRegUseKind()) &&
-          F.getRegMayBeSpilled()) {
-        OS << " spillable";
+          F.getRegMayBeFolded()) {
+        OS << " foldable";
       }
 
       OS << ']';
@@ -2533,7 +2533,7 @@ void MachineInstr::insert(mop_iterator InsertBefore,
   }
 }
 
-bool MachineInstr::mayFoldInlineAsmMemOp(unsigned OpId) const {
+bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
   assert(OpId && "expected non-zero operand id");
   assert(isInlineAsm() && "should only be used on inline asm");
 
@@ -2546,6 +2546,6 @@ bool MachineInstr::mayFoldInlineAsmMemOp(unsigned OpId) const {
 
   InlineAsm::Flag F(MD.getImm());
   if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
-    return F.getRegMayBeSpilled();
+    return F.getRegMayBeFolded();
   return false;
 }
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index bcf9105ea64ca96..3013a768bc4d566 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1640,8 +1640,8 @@ std::string TargetInstrInfo::createMIROperandComment(
     OS << " tiedto:$" << TiedTo;
 
   if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
-      F.getRegMayBeSpilled())
-    OS << " spillable";
+      F.getRegMayBeFolded())
+    OS << " foldable";
 
   return OS.str();
 }



More information about the llvm-commits mailing list