[llvm] r193811 - Add support for stack map generation in the X86 backend.

Sean Silva silvas at purdue.edu
Thu Oct 31 15:41:38 PDT 2013


+  const MCSection *StackMapSection =
+    OutContext.getMachOSection("__LLVM_STACKMAPS", "__llvm_stackmaps", 0,
+                               SectionKind::getMetadata());

Did a path forward for non-darwin ever come up in the discussions? Is this
"hardcoded" value amenable to replacement with something more target
neutral without excessive hacking (e.g. without having to thread some state
through multiple layers of objects)? (I'm not familiar with the layering of
the API's at play here and how "far away" the necessary platform knowledge
is).

-- Sean Silva


On Thu, Oct 31, 2013 at 6:11 PM, Andrew Trick <atrick at apple.com> wrote:

> Author: atrick
> Date: Thu Oct 31 17:11:56 2013
> New Revision: 193811
>
> URL: http://llvm.org/viewvc/llvm-project?rev=193811&view=rev
> Log:
> Add support for stack map generation in the X86 backend.
>
> Originally implemented by Lang Hames.
>
> Added:
>     llvm/trunk/include/llvm/CodeGen/StackMaps.h
>     llvm/trunk/lib/CodeGen/StackMaps.cpp
>     llvm/trunk/test/CodeGen/X86/patchpoint.ll
>     llvm/trunk/test/CodeGen/X86/stackmap.ll
> Modified:
>     llvm/trunk/lib/CodeGen/CMakeLists.txt
>     llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
>     llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp
>     llvm/trunk/lib/Target/X86/X86AsmPrinter.h
>     llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
>     llvm/trunk/lib/Target/X86/X86MCInstLower.cpp
>
> Added: llvm/trunk/include/llvm/CodeGen/StackMaps.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/StackMaps.h?rev=193811&view=auto
>
> ==============================================================================
> --- llvm/trunk/include/llvm/CodeGen/StackMaps.h (added)
> +++ llvm/trunk/include/llvm/CodeGen/StackMaps.h Thu Oct 31 17:11:56 2013
> @@ -0,0 +1,107 @@
> +//===------------------- StackMaps.h - StackMaps ----------------*- C++
> -*-===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
>
> +//===----------------------------------------------------------------------===//
> +
> +#ifndef LLVM_STACKMAPS
> +#define LLVM_STACKMAPS
> +
> +#include "llvm/ADT/SmallVector.h"
> +#include "llvm/CodeGen/MachineInstr.h"
> +#include <map>
> +#include <vector>
> +
> +namespace llvm {
> +
> +class AsmPrinter;
> +class MCExpr;
> +
> +class StackMaps {
> +public:
> +  struct Location {
> +    enum LocationType { Unprocessed, Register, Direct, Indirect, Constant,
> +                        ConstantIndex };
> +    LocationType LocType;
> +    unsigned Reg;
> +    int64_t Offset;
> +    Location() : LocType(Unprocessed), Reg(0), Offset(0) {}
> +    Location(LocationType LocType, unsigned Reg, int64_t Offset)
> +      : LocType(LocType), Reg(Reg), Offset(Offset) {}
> +  };
> +
> +  // Typedef a function pointer for functions that parse sequences of
> operands
> +  // and return a Location, plus a new "next" operand iterator.
> +  typedef std::pair<Location, MachineInstr::const_mop_iterator>
> +    (*OperandParser)(MachineInstr::const_mop_iterator,
> +                     MachineInstr::const_mop_iterator);
> +
> +  // OpTypes are used to encode information about the following logical
> +  // operand (which may consist of several MachineOperands) for the
> +  // OpParser.
> +  typedef enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp } OpType;
> +
> +  StackMaps(AsmPrinter &AP, OperandParser OpParser)
> +    : AP(AP), OpParser(OpParser) {}
> +
> +  /// This should be called by the MC lowering code _immediately_ before
> +  /// lowering the MI to an MCInst. It records where the operands for the
> +  /// instruction are stored, and outputs a label to record the offset of
> +  /// the call from the start of the text section.
> +  void recordStackMap(const MachineInstr &MI, uint32_t ID,
> +                      MachineInstr::const_mop_iterator MOI,
> +                      MachineInstr::const_mop_iterator MOE);
> +
> +  /// If there is any stack map data, create a stack map section and
> serialize
> +  /// the map info into it. This clears the stack map data structures
> +  /// afterwards.
> +  void serializeToStackMapSection();
> +
> +private:
> +
> +  typedef SmallVector<Location, 8> LocationVec;
> +
> +  struct CallsiteInfo {
> +    const MCExpr *CSOffsetExpr;
> +    unsigned ID;
> +    LocationVec Locations;
> +    CallsiteInfo() : CSOffsetExpr(0), ID(0) {}
> +    CallsiteInfo(const MCExpr *CSOffsetExpr, unsigned ID,
> +                 LocationVec Locations)
> +      : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(Locations) {}
> +  };
> +
> +  typedef std::vector<CallsiteInfo> CallsiteInfoList;
> +
> +  struct ConstantPool {
> +  private:
> +    typedef std::map<int64_t, size_t> ConstantsMap;
> +    std::vector<int64_t> ConstantsList;
> +    ConstantsMap ConstantIndexes;
> +
> +  public:
> +    size_t getNumConstants() const { return ConstantsList.size(); }
> +    int64_t getConstant(size_t Idx) const { return ConstantsList[Idx]; }
> +    size_t getConstantIndex(int64_t ConstVal) {
> +      size_t NextIdx = ConstantsList.size();
> +      ConstantsMap::const_iterator I =
> +        ConstantIndexes.insert(ConstantIndexes.end(),
> +                               std::make_pair(ConstVal, NextIdx));
> +      if (I->second == NextIdx)
> +        ConstantsList.push_back(ConstVal);
> +      return I->second;
> +    }
> +  };
> +
> +  AsmPrinter &AP;
> +  OperandParser OpParser;
> +  CallsiteInfoList CSInfos;
> +  ConstantPool ConstPool;
> +};
> +
> +}
> +
> +#endif // LLVM_STACKMAPS
>
> Modified: llvm/trunk/lib/CodeGen/CMakeLists.txt
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CMakeLists.txt?rev=193811&r1=193810&r2=193811&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/CMakeLists.txt (original)
> +++ llvm/trunk/lib/CodeGen/CMakeLists.txt Thu Oct 31 17:11:56 2013
> @@ -97,6 +97,7 @@ add_llvm_library(LLVMCodeGen
>    StackColoring.cpp
>    StackProtector.cpp
>    StackSlotColoring.cpp
> +  StackMaps.cpp
>    TailDuplication.cpp
>    TargetFrameLoweringImpl.cpp
>    TargetInstrInfo.cpp
>
> Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=193811&r1=193810&r2=193811&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
> +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Thu Oct 31
> 17:11:56 2013
> @@ -33,6 +33,7 @@
>  #include "llvm/CodeGen/MachineModuleInfo.h"
>  #include "llvm/CodeGen/MachineRegisterInfo.h"
>  #include "llvm/CodeGen/SelectionDAG.h"
> +#include "llvm/CodeGen/StackMaps.h"
>  #include "llvm/DebugInfo.h"
>  #include "llvm/IR/CallingConv.h"
>  #include "llvm/IR/Constants.h"
> @@ -6879,6 +6880,8 @@ void SelectionDAGBuilder::visitPatchpoin
>      SDValue OpVal = getValue(CI.getArgOperand(i));
>      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
>        Ops.push_back(
> +        DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
> +      Ops.push_back(
>          DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
>      } else
>        Ops.push_back(OpVal);
>
> Added: llvm/trunk/lib/CodeGen/StackMaps.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/StackMaps.cpp?rev=193811&view=auto
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/StackMaps.cpp (added)
> +++ llvm/trunk/lib/CodeGen/StackMaps.cpp Thu Oct 31 17:11:56 2013
> @@ -0,0 +1,213 @@
> +//===---------------------------- StackMaps.cpp
> ---------------------------===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
>
> +//===----------------------------------------------------------------------===//
> +
> +#define DEBUG_TYPE "stackmaps"
> +
> +#include "llvm/CodeGen/StackMaps.h"
> +
> +#include "llvm/CodeGen/AsmPrinter.h"
> +#include "llvm/CodeGen/MachineInstr.h"
> +#include "llvm/MC/MCContext.h"
> +#include "llvm/MC/MCExpr.h"
> +#include "llvm/MC/MCSectionMachO.h"
> +#include "llvm/MC/MCStreamer.h"
> +#include "llvm/Support/Debug.h"
> +#include "llvm/Support/raw_ostream.h"
> +#include "llvm/Target/TargetOpcodes.h"
> +#include "llvm/Target/TargetMachine.h"
> +#include "llvm/Target/TargetRegisterInfo.h"
> +
> +#include <iterator>
> +
> +using namespace llvm;
> +
> +void StackMaps::recordStackMap(const MachineInstr &MI, uint32_t ID,
> +                               MachineInstr::const_mop_iterator MOI,
> +                               MachineInstr::const_mop_iterator MOE) {
> +
> +  MCContext &OutContext = AP.OutStreamer.getContext();
> +  MCSymbol *MILabel = OutContext.CreateTempSymbol();
> +  AP.OutStreamer.EmitLabel(MILabel);
> +
> +  LocationVec CallsiteLocs;
> +
> +  while (MOI != MOE) {
> +    std::pair<Location, MachineInstr::const_mop_iterator> ParseResult =
> +      OpParser(MOI, MOE);
> +
> +    Location &Loc = ParseResult.first;
> +
> +    // Move large constants into the constant pool.
> +    if (Loc.LocType == Location::Constant && (Loc.Offset &
> ~0xFFFFFFFFULL)) {
> +      Loc.LocType = Location::ConstantIndex;
> +      Loc.Offset = ConstPool.getConstantIndex(Loc.Offset);
> +    }
> +
> +    CallsiteLocs.push_back(Loc);
> +    MOI = ParseResult.second;
> +  }
> +
> +  const MCExpr *CSOffsetExpr = MCBinaryExpr::CreateSub(
> +    MCSymbolRefExpr::Create(MILabel, OutContext),
> +    MCSymbolRefExpr::Create(AP.CurrentFnSym, OutContext),
> +    OutContext);
> +
> +  CSInfos.push_back(CallsiteInfo(CSOffsetExpr, ID, CallsiteLocs));
> +}
> +
> +/// serializeToStackMapSection conceptually populates the following
> fields:
> +///
> +/// uint32 : Reserved (header)
> +/// uint32 : NumConstants
> +/// int64  : Constants[NumConstants]
> +/// uint32 : NumRecords
> +/// StkMapRecord[NumRecords] {
> +///   uint32 : PatchPoint ID
> +///   uint32 : Instruction Offset
> +///   uint16 : Reserved (record flags)
> +///   uint16 : NumLocations
> +///   Location[NumLocations] {
> +///     uint8  : Register | Direct | Indirect | Constant | ConstantIndex
> +///     uint8  : Reserved (location flags)
> +///     uint16 : Dwarf RegNum
> +///     int32  : Offset
> +///   }
> +/// }
> +///
> +/// Location Encoding, Type, Value:
> +///   0x1, Register, Reg                 (value in register)
> +///   0x2, Direct, Reg + Offset          (frame index)
> +///   0x3, Indirect, [Reg + Offset]      (spilled value)
> +///   0x4, Constant, Offset              (small constant)
> +///   0x5, ConstIndex, Constants[Offset] (large constant)
> +///
> +void StackMaps::serializeToStackMapSection() {
> +  // Bail out if there's no stack map data.
> +  if (CSInfos.empty())
> +    return;
> +
> +  MCContext &OutContext = AP.OutStreamer.getContext();
> +  const TargetRegisterInfo *TRI = AP.TM.getRegisterInfo();
> +
> +  // Create the section.
> +  const MCSection *StackMapSection =
> +    OutContext.getMachOSection("__LLVM_STACKMAPS", "__llvm_stackmaps", 0,
> +                               SectionKind::getMetadata());
> +  AP.OutStreamer.SwitchSection(StackMapSection);
> +
> +  // Emit a dummy symbol to force section inclusion.
> +  AP.OutStreamer.EmitLabel(
> +    OutContext.GetOrCreateSymbol(Twine("__LLVM_StackMaps")));
> +
> +  // Serialize data.
> +  const char *WSMP = "Stack Maps: ";
> +  const MCRegisterInfo &MCRI = *OutContext.getRegisterInfo();
> +
> +  DEBUG(dbgs() << "********** Stack Map Output **********\n");
> +
> +  // Header.
> +  AP.OutStreamer.EmitIntValue(0, 4);
> +
> +  // Num constants.
> +  AP.OutStreamer.EmitIntValue(ConstPool.getNumConstants(), 4);
> +
> +  // Constant pool entries.
> +  for (unsigned i = 0; i < ConstPool.getNumConstants(); ++i)
> +    AP.OutStreamer.EmitIntValue(ConstPool.getConstant(i), 8);
> +
> +  DEBUG(dbgs() << WSMP << "#callsites = " << CSInfos.size() << "\n");
> +  AP.OutStreamer.EmitIntValue(CSInfos.size(), 4);
> +
> +  for (CallsiteInfoList::const_iterator CSII = CSInfos.begin(),
> +                                        CSIE = CSInfos.end();
> +       CSII != CSIE; ++CSII) {
> +
> +    unsigned CallsiteID = CSII->ID;
> +    const LocationVec &CSLocs = CSII->Locations;
> +
> +    DEBUG(dbgs() << WSMP << "callsite " << CallsiteID << "\n");
> +
> +    // Verify stack map entry. It's better to communicate a problem to the
> +    // runtime than crash in case of in-process compilation. Currently,
> we do
> +    // simple overflow checks, but we may eventually communicate other
> +    // compilation errors this way.
> +    if (CSLocs.size() > UINT16_MAX) {
> +      AP.OutStreamer.EmitIntValue(UINT32_MAX, 4); // Invalid ID.
> +      AP.OutStreamer.EmitValue(CSII->CSOffsetExpr, 4);
> +      AP.OutStreamer.EmitIntValue(0, 2); // Reserved.
> +      AP.OutStreamer.EmitIntValue(0, 2); // 0 locations.
> +      continue;
> +    }
> +
> +    AP.OutStreamer.EmitIntValue(CallsiteID, 4);
> +    AP.OutStreamer.EmitValue(CSII->CSOffsetExpr, 4);
> +
> +    // Reserved for flags.
> +    AP.OutStreamer.EmitIntValue(0, 2);
> +
> +    DEBUG(dbgs() << WSMP << "  has " << CSLocs.size() << " locations\n");
> +
> +    AP.OutStreamer.EmitIntValue(CSLocs.size(), 2);
> +
> +    unsigned operIdx = 0;
> +    for (LocationVec::const_iterator LocI = CSLocs.begin(), LocE =
> CSLocs.end();
> +         LocI != LocE; ++LocI, ++operIdx) {
> +      const Location &Loc = *LocI;
> +      DEBUG(
> +        dbgs() << WSMP << "  Loc " << operIdx << ": ";
> +        switch (Loc.LocType) {
> +        case Location::Unprocessed:
> +          dbgs() << "<Unprocessed operand>";
> +          break;
> +        case Location::Register:
> +          dbgs() << "Register " << MCRI.getName(Loc.Reg);
> +          break;
> +        case Location::Direct:
> +          dbgs() << "Direct " << MCRI.getName(Loc.Reg);
> +          if (Loc.Offset)
> +            dbgs() << " + " << Loc.Offset;
> +          break;
> +        case Location::Indirect:
> +          dbgs() << "Indirect " << MCRI.getName(Loc.Reg)
> +                 << " + " << Loc.Offset;
> +          break;
> +        case Location::Constant:
> +          dbgs() << "Constant " << Loc.Offset;
> +          break;
> +        case Location::ConstantIndex:
> +          dbgs() << "Constant Index " << Loc.Offset;
> +          break;
> +        }
> +        dbgs() << "\n";
> +      );
> +
> +      unsigned RegNo = 0;
> +      if(Loc.Reg) {
> +        RegNo = MCRI.getDwarfRegNum(Loc.Reg, false);
> +        for (MCSuperRegIterator SR(Loc.Reg, TRI);
> +             SR.isValid() && (int)RegNo < 0; ++SR) {
> +          RegNo = TRI->getDwarfRegNum(*SR, false);
> +        }
> +      }
> +      else {
> +        assert((Loc.LocType != Location::Register
> +                && Loc.LocType != Location::Register) &&
> +               "Missing location register");
> +      }
> +      AP.OutStreamer.EmitIntValue(Loc.LocType, 1);
> +      AP.OutStreamer.EmitIntValue(0, 1); // Reserved location flags.
> +      AP.OutStreamer.EmitIntValue(RegNo, 2);
> +      AP.OutStreamer.EmitIntValue(Loc.Offset, 4);
> +    }
> +  }
> +
> +  AP.OutStreamer.AddBlankLine();
> +
> +  CSInfos.clear();
> +}
>
> Modified: llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp?rev=193811&r1=193810&r2=193811&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp Thu Oct 31 17:11:56 2013
> @@ -626,6 +626,8 @@ void X86AsmPrinter::EmitEndOfAsmFile(Mod
>        OutStreamer.AddBlankLine();
>      }
>
> +    SM.serializeToStackMapSection();
> +
>      // Funny Darwin hack: This flag tells the linker that no global
> symbols
>      // contain code that falls through to other global symbols (e.g. the
> obvious
>      // implementation of multiple entry points).  If this doesn't occur,
> the
>
> Modified: llvm/trunk/lib/Target/X86/X86AsmPrinter.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86AsmPrinter.h?rev=193811&r1=193810&r2=193811&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86AsmPrinter.h (original)
> +++ llvm/trunk/lib/Target/X86/X86AsmPrinter.h Thu Oct 31 17:11:56 2013
> @@ -16,6 +16,7 @@
>  #include "llvm/CodeGen/AsmPrinter.h"
>  #include "llvm/CodeGen/MachineModuleInfo.h"
>  #include "llvm/CodeGen/ValueTypes.h"
> +#include "llvm/CodeGen/StackMaps.h"
>  #include "llvm/Support/Compiler.h"
>
>  namespace llvm {
> @@ -24,9 +25,20 @@ class MCStreamer;
>
>  class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
>    const X86Subtarget *Subtarget;
> +  StackMaps SM;
> +
> +  // Parses operands of PATCHPOINT and STACKMAP to produce stack map
> Location
> +  // structures. Returns a result location and an iterator to the operand
> +  // immediately following the operands consumed.
> +  //
> +  // This method is implemented in X86MCInstLower.cpp.
> +  static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
> +    stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
> +                          MachineInstr::const_mop_iterator MOE);
> +
>   public:
>    explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
> -    : AsmPrinter(TM, Streamer) {
> +    : AsmPrinter(TM, Streamer), SM(*this, stackmapOperandParser) {
>      Subtarget = &TM.getSubtarget<X86Subtarget>();
>    }
>
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=193811&r1=193810&r2=193811&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Thu Oct 31 17:11:56 2013
> @@ -24,6 +24,7 @@
>  #include "llvm/CodeGen/MachineFrameInfo.h"
>  #include "llvm/CodeGen/MachineInstrBuilder.h"
>  #include "llvm/CodeGen/MachineRegisterInfo.h"
> +#include "llvm/CodeGen/StackMaps.h"
>  #include "llvm/IR/DerivedTypes.h"
>  #include "llvm/IR/LLVMContext.h"
>  #include "llvm/MC/MCAsmInfo.h"
> @@ -4192,10 +4193,44 @@ breakPartialRegDependency(MachineBasicBl
>    MI->addRegisterKilled(Reg, TRI, true);
>  }
>
> -MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
> -                                                  MachineInstr *MI,
> -                                           const
> SmallVectorImpl<unsigned> &Ops,
> -                                                  int FrameIndex) const {
> +static MachineInstr* foldPatchpoint(MachineFunction &MF,
> +                                    MachineInstr *MI,
> +                                    const SmallVectorImpl<unsigned> &Ops,
> +                                    int FrameIndex,
> +                                    const TargetInstrInfo &TII) {
> +  MachineInstr *NewMI =
> +    MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(),
> true);
> +  MachineInstrBuilder MIB(MF, NewMI);
> +
> +  bool isPatchPoint = MI->getOpcode() == TargetOpcode::PATCHPOINT;
> +  unsigned StartIdx = isPatchPoint ? MI->getOperand(3).getImm() + 4 : 2;
> +
> +  // No need to fold the meta data and function arguments
> +  for (unsigned i = 0; i < StartIdx; ++i)
> +    MIB.addOperand(MI->getOperand(i));
> +
> +  for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
> +    MachineOperand &MO = MI->getOperand(i);
> +    if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
> +
>  MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
> +      MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
> +      addOffset(MIB, 0);
> +    }
> +    else
> +      MIB.addOperand(MO);
> +  }
> +  return NewMI;
> +}
> +
> +MachineInstr*
> +X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
> +                                    const SmallVectorImpl<unsigned> &Ops,
> +                                    int FrameIndex) const {
> +  // Special case stack map and patch point intrinsics.
> +  if (MI->getOpcode() == TargetOpcode::STACKMAP
> +      || MI->getOpcode() == TargetOpcode::PATCHPOINT) {
> +    return foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
> +  }
>    // Check switch flag
>    if (NoFusing) return NULL;
>
>
> Modified: llvm/trunk/lib/Target/X86/X86MCInstLower.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86MCInstLower.cpp?rev=193811&r1=193810&r2=193811&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86MCInstLower.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86MCInstLower.cpp Thu Oct 31 17:11:56 2013
> @@ -17,6 +17,7 @@
>  #include "X86COFFMachineModuleInfo.h"
>  #include "llvm/ADT/SmallString.h"
>  #include "llvm/CodeGen/MachineModuleInfoImpls.h"
> +#include "llvm/CodeGen/StackMaps.h"
>  #include "llvm/IR/Type.h"
>  #include "llvm/MC/MCAsmInfo.h"
>  #include "llvm/MC/MCContext.h"
> @@ -686,6 +687,123 @@ static void LowerTlsAddr(MCStreamer &Out
>      .addExpr(tlsRef));
>  }
>
> +static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
> +parseMemoryOperand(StackMaps::Location::LocationType LocTy,
> +                   MachineInstr::const_mop_iterator MOI,
> +                   MachineInstr::const_mop_iterator MOE) {
> +
> +  typedef StackMaps::Location Location;
> +
> +  assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem
> op.");
> +
> +  const MachineOperand &Base = *MOI;
> +  const MachineOperand &Scale = *(++MOI);
> +  const MachineOperand &Index = *(++MOI);
> +  const MachineOperand &Disp = *(++MOI);
> +  const MachineOperand &ZeroReg = *(++MOI);
> +
> +  // Sanity check for supported operand format.
> +  assert(Base.isReg() &&
> +         Scale.isImm() && Scale.getImm() == 1 &&
> +         Index.isReg() && Index.getReg() == 0 &&
> +         Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
> +         "Unsupported x86 memory operand sequence.");
> +
> +  return std::make_pair(
> +           Location(LocTy, Base.getReg(), Disp.getImm()), ++MOI);
> +}
> +
> +std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
> +X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
> +                                     MachineInstr::const_mop_iterator
> MOE) {
> +
> +  typedef StackMaps::Location Location;
> +
> +  const MachineOperand &MOP = *MOI;
> +  assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
> +         "Register mask and implicit operands should not be processed.");
> +
> +  if (MOP.isImm()) {
> +    switch (MOP.getImm()) {
> +    default: llvm_unreachable("Unrecognized operand type.");
> +    case StackMaps::DirectMemRefOp:
> +      return parseMemoryOperand(StackMaps::Location::Direct,
> +                                llvm::next(MOI), MOE);
> +    case StackMaps::IndirectMemRefOp:
> +      return parseMemoryOperand(StackMaps::Location::Indirect,
> +                                llvm::next(MOI), MOE);
> +    case StackMaps::ConstantOp: {
> +      ++MOI;
> +      assert(MOI->isImm() && "Expected constant operand.");
> +      int64_t Imm = MOI->getImm();
> +      return std::make_pair(Location(Location::Constant, 0, Imm), ++MOI);
> +    }
> +    }
> +  }
> +
> +  // Otherwise this is a reg operand.
> +  assert(MOP.isReg() && "Expected register operand here.");
> +  assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
> +         "Virtreg operands should have been rewritten before now.");
> +  return std::make_pair(Location(Location::Register, MOP.getReg(), 0),
> ++MOI);
> +}
> +
> +static MachineInstr::const_mop_iterator
> +getStackMapEndMOP(MachineInstr::const_mop_iterator MOI,
> +                  MachineInstr::const_mop_iterator MOE) {
> +  for (; MOI != MOE; ++MOI)
> +    if (MOI->isRegMask() || (MOI->isReg() && MOI->isImplicit()))
> +      break;
> +
> +  return MOI;
> +}
> +
> +static void LowerSTACKMAP(MCStreamer &OutStreamer,
> +                          X86MCInstLower &MCInstLowering,
> +                          StackMaps &SM,
> +                          const MachineInstr &MI)
> +{
> +  int64_t ID = MI.getOperand(0).getImm();
> +  unsigned NumNOPBytes = MI.getOperand(1).getImm();
> +
> +  assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs");
> +  SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), 2),
> +                    getStackMapEndMOP(MI.operands_begin(),
> MI.operands_end()));
> +  // Emit padding.
> +  for (unsigned i = 0; i < NumNOPBytes; ++i)
> +    OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
> +}
> +
> +static void LowerPATCHPOINT(MCStreamer &OutStreamer,
> +                            X86MCInstLower &MCInstLowering,
> +                            StackMaps &SM,
> +                            const MachineInstr &MI)
> +{
> +  int64_t ID = MI.getOperand(0).getImm();
> +  assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs");
> +
> +  // Get the number of arguments participating in the call. This number
> was
> +  // adjusted during call lowering by subtracting stack args.
> +  int64_t StackMapIdx = MI.getOperand(3).getImm() + 4;
> +  assert(StackMapIdx <= MI.getNumOperands() && "Patchpoint dropped
> args.");
> +
> +  SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), StackMapIdx),
> +                     getStackMapEndMOP(MI.operands_begin(),
> MI.operands_end()));
> +
> +  // Emit call. We need to know how many bytes we encoded here.
> +  unsigned EncodedBytes = 2;
> +  OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r)
> +                              .addReg(MI.getOperand(2).getReg()));
> +
> +  // Emit padding.
> +  unsigned NumNOPBytes = MI.getOperand(1).getImm();
> +  assert(NumNOPBytes >= EncodedBytes &&
> +         "Patchpoint can't request size less than the length of a call.");
> +
> +  for (unsigned i = EncodedBytes; i < NumNOPBytes; ++i)
> +    OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
> +}
> +
>  void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
>    X86MCInstLower MCInstLowering(*MF, *this);
>    switch (MI->getOpcode()) {
> @@ -775,6 +893,12 @@ void X86AsmPrinter::EmitInstruction(cons
>        .addExpr(DotExpr));
>      return;
>    }
> +
> +  case TargetOpcode::STACKMAP:
> +    return LowerSTACKMAP(OutStreamer, MCInstLowering, SM, *MI);
> +
> +  case TargetOpcode::PATCHPOINT:
> +    return LowerPATCHPOINT(OutStreamer, MCInstLowering, SM, *MI);
>    }
>
>    MCInst TmpInst;
>
> Added: llvm/trunk/test/CodeGen/X86/patchpoint.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/patchpoint.ll?rev=193811&view=auto
>
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/patchpoint.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/patchpoint.ll Thu Oct 31 17:11:56 2013
> @@ -0,0 +1,48 @@
> +; RUN: llc < %s -march=x86-64 | FileCheck %s
> +
> +; Trivial patchpoint codegen
> +;
> +; FIXME: We should verify that the call target is materialize after
> +; the label immediately before the call.
> +; <rdar://15187295> [JS] llvm.webkit.patchpoint call target should be
> +; materialized in nop slide.
> +define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64
> %p4) {
> +entry:
> +; CHECK-LABEL: _trivial_patchpoint_codegen:
> +; CHECK:      Ltmp
> +; CHECK:      callq *%rax
> +; CHECK-NEXT: nop
> +; CHECK:      movq %rax, %[[REG:r.+]]
> +; CHECK:      callq *%rax
> +; CHECK-NEXT: nop
> +; CHECK:      movq %[[REG]], %rax
> +; CHECK:      ret
> +  %resolveCall2 = inttoptr i64 -559038736 to i8*
> +  %result = tail call i64 (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.i64(i32 2, i32 12, i8* %resolveCall2, i32 4,
> i64 %p1, i64 %p2, i64 %p3, i64 %p4)
> +  %resolveCall3 = inttoptr i64 -559038737 to i8*
> +  tail call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 3, i32 12, i8* %resolveCall3, i32 2,
> i64 %p1, i64 %result)
> +  ret i64 %result
> +}
> +
> +; Caller frame metadata with stackmaps. This should not be optimized
> +; as a leaf function.
> +;
> +; CHECK-LABEL: _caller_meta_leaf
> +; CHECK: subq $24, %rsp
> +; CHECK: Ltmp
> +; CHECK: addq $24, %rsp
> +; CHECK: ret
> +define void @caller_meta_leaf() {
> +entry:
> +  %metadata = alloca i64, i32 3, align 8
> +  store i64 11, i64* %metadata
> +  store i64 12, i64* %metadata
> +  store i64 13, i64* %metadata
> +  call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 4, i32 0,
> i64* %metadata)
> +  ret void
> +}
> +
> +
> +declare void @llvm.experimental.stackmap(i32, i32, ...)
> +declare void @llvm.experimental.patchpoint.void(i32, i32, i8*, i32, ...)
> +declare i64 @llvm.experimental.patchpoint.i64(i32, i32, i8*, i32, ...)
>
> Added: llvm/trunk/test/CodeGen/X86/stackmap.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stackmap.ll?rev=193811&view=auto
>
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stackmap.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/stackmap.ll Thu Oct 31 17:11:56 2013
> @@ -0,0 +1,205 @@
> +; RUN: llc < %s -march=x86-64 | FileCheck %s
> +;
> +; Note: Print verbose stackmaps using -debug-only=stackmaps.
> +
> +; CHECK-LABEL:  .section  __LLVM_STACKMAPS,__llvm_stackmaps
> +; CHECK-NEXT:  __LLVM_StackMaps:
> +; CHECK-NEXT:   .long   0
> +; Num LargeConstants
> +; CHECK-NEXT:   .long   1
> +; CHECK-NEXT:   .quad   4294967296
> +; Num Callsites
> +; CHECK-NEXT:   .long   8
> +
> +; Constant arguments
> +;
> +; CHECK-NEXT:   .long   1
> +; CHECK-NEXT:   .long   L{{.*}}-_constantargs
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .short  4
> +; SmallConstant
> +; CHECK-NEXT:   .byte   4
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .long   65535
> +; SmallConstant
> +; CHECK-NEXT:   .byte   4
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .long   65536
> +; SmallConstant
> +; CHECK-NEXT:   .byte   4
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .long   4294967295
> +; LargeConstant at index 0
> +; CHECK-NEXT:   .byte   5
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .long   0
> +
> +define void @constantargs() {
> +entry:
> +  %0 = inttoptr i64 12345 to i8*
> +  tail call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 1, i32 2, i8* %0, i32 0, i64 65535,
> i64 65536, i64 4294967295, i64 4294967296)
> +  ret void
> +}
> +
> +; Inline OSR Exit
> +;
> +; CHECK-NEXT:   .long   3
> +; CHECK-NEXT:   .long   L{{.*}}-_osrinline
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .short  2
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long   0
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long  0
> +define void @osrinline(i64 %a, i64 %b) {
> +entry:
> +  ; Runtime void->void call.
> +  call void inttoptr (i64 -559038737 to void ()*)()
> +  ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
> +  call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 3, i32 12,
> i64 %a, i64 %b)
> +  ret void
> +}
> +
> +; Cold OSR Exit
> +;
> +; 2 live variables in register.
> +;
> +; CHECK-NEXT:   .long  4
> +; CHECK-NEXT:   .long   L{{.*}}-_osrcold
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .short  2
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long   0
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long  0
> +define void @osrcold(i64 %a, i64 %b) {
> +entry:
> +  %test = icmp slt i64 %a, %b
> +  br i1 %test, label %ret, label %cold
> +cold:
> +  ; OSR patchpoint with 12-byte nop-slide and 2 live vars.
> +  %thunk = inttoptr i64 -559038737 to i8*
> +  call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 4, i32 12, i8* %thunk, i32 0, i64
> %a, i64 %b)
> +  unreachable
> +ret:
> +  ret void
> +}
> +
> +; Property Read
> +; CHECK-NEXT:  .long  5
> +; CHECK-NEXT:   .long   L{{.*}}-_propertyRead
> +; CHECK-NEXT:  .short  0
> +; CHECK-NEXT:  .short  0
> +;
> +; FIXME: There are currently no stackmap entries. After moving to
> +; AnyRegCC, we will have entries for the object and return value.
> +define i64 @propertyRead(i64* %obj) {
> +entry:
> +  %resolveRead = inttoptr i64 -559038737 to i8*
> +  %result = call i64 (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.i64(i32 5, i32 12, i8* %resolveRead, i32 1,
> i64* %obj)
> +  %add = add i64 %result, 3
> +  ret i64 %add
> +}
> +
> +; Property Write
> +; CHECK-NEXT:  .long  6
> +; CHECK-NEXT:   .long   L{{.*}}-_propertyWrite
> +; CHECK-NEXT:  .short  0
> +; CHECK-NEXT:  .short  0
> +;
> +; FIXME: There are currently no stackmap entries. After moving to
> +; AnyRegCC, we will have entries for the object and return value.
> +define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
> +entry:
> +  %resolveWrite = inttoptr i64 -559038737 to i8*
> +  call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 6, i32 12, i8* %resolveWrite, i32 2,
> i64* %obj, i64 %a)
> +  ret void
> +}
> +
> +; Void JS Call
> +;
> +; 2 live variables in registers.
> +;
> +; CHECK-NEXT:   .long  7
> +; CHECK-NEXT:   .long   L{{.*}}-_jsVoidCall
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .short  2
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long   0
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long   0
> +define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64
> %l2) {
> +entry:
> +  %resolveCall = inttoptr i64 -559038737 to i8*
> +  call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 7, i32 12, i8* %resolveCall, i32 2,
> i64* %obj, i64 %arg, i64 %l1, i64 %l2)
> +  ret void
> +}
> +
> +; i64 JS Call
> +;
> +; 2 live variables in registers.
> +;
> +; CHECK:        .long  8
> +; CHECK-NEXT:   .long   L{{.*}}-_jsIntCall
> +; CHECK-NEXT:   .short  0
> +; CHECK-NEXT:   .short  2
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long   0
> +; CHECK-NEXT:   .byte   1
> +; CHECK-NEXT:   .byte   0
> +; CHECK-NEXT:   .short  {{[0-9]+}}
> +; CHECK-NEXT:   .long   0
> +define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
> {
> +entry:
> +  %resolveCall = inttoptr i64 -559038737 to i8*
> +  %result = call i64 (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.i64(i32 8, i32 12, i8* %resolveCall, i32 2,
> i64* %obj, i64 %arg, i64 %l1, i64 %l2)
> +  %add = add i64 %result, 3
> +  ret i64 %add
> +}
> +
> +; Spilled stack map values.
> +;
> +; Verify 17 stack map entries.
> +;
> +; CHECK:      .long 11
> +; CHECK-NEXT: .long L{{.*}}-_spilledValue
> +; CHECK-NEXT: .short 0
> +; CHECK-NEXT: .short 10
> +;
> +; Check that at least one is a spilled entry (Indirect).
> +; CHECK: .byte 3
> +; CHECK: .byte 0
> +define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64
> %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64
> %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14,
> i64 %l15, i64 %l16) {
> +entry:
> +  %resolveCall = inttoptr i64 -559038737 to i8*
> +  call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 11, i32 12, i8* %resolveCall, i32 5,
> i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1,
> i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9)
> +
> +; FIXME: The Spiller needs to be able to fold all rematted loads! This
> +; can be seen by adding %l15 to the stackmap.
> +; <rdar:/15202984> [JS] Ran out of registers during register allocation
> +;  %resolveCall = inttoptr i64 -559038737 to i8*
> +;  call void (i32, i32, i8*, i32, ...)*
> @llvm.experimental.patchpoint.void(i32 12, i32 12, i8* %resolveCall, i32 5,
> i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1,
> i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64
> %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
> +  ret void
> +}
> +
> +declare void @llvm.experimental.stackmap(i32, i32, ...)
> +declare void @llvm.experimental.patchpoint.void(i32, i32, i8*, i32, ...)
> +declare i64 @llvm.experimental.patchpoint.i64(i32, i32, i8*, i32, ...)
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20131031/2ed4846b/attachment.html>


More information about the llvm-commits mailing list