[llvm] r327580 - [CodeGen] Use MIR syntax for MachineMemOperand printing

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 14 14:52:14 PDT 2018


Author: thegameg
Date: Wed Mar 14 14:52:13 2018
New Revision: 327580

URL: http://llvm.org/viewvc/llvm-project?rev=327580&view=rev
Log:
[CodeGen] Use MIR syntax for MachineMemOperand printing

Get rid of the "; mem:" suffix and use the one we use in MIR: ":: (load 2)".

rdar://38163529

Differential Revision: https://reviews.llvm.org/D42377

Modified:
    llvm/trunk/include/llvm/CodeGen/MachineMemOperand.h
    llvm/trunk/lib/CodeGen/MIRPrinter.cpp
    llvm/trunk/lib/CodeGen/MachineInstr.cpp
    llvm/trunk/lib/CodeGen/MachineOperand.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
    llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
    llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll
    llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
    llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll
    llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
    llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
    llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
    llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll
    llvm/trunk/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll
    llvm/trunk/test/CodeGen/PowerPC/unal-vec-negarith.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
    llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll
    llvm/trunk/test/CodeGen/X86/stack-protector-weight.ll

Modified: llvm/trunk/include/llvm/CodeGen/MachineMemOperand.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineMemOperand.h?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineMemOperand.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineMemOperand.h Wed Mar 14 14:52:13 2018
@@ -295,6 +295,9 @@ public:
   /// @{
   void print(raw_ostream &OS) const;
   void print(raw_ostream &OS, ModuleSlotTracker &MST) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST,
+             SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
+             const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
   /// @}
 
   friend bool operator==(const MachineMemOperand &LHS,

Modified: llvm/trunk/lib/CodeGen/MIRPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRPrinter.cpp?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRPrinter.cpp Wed Mar 14 14:52:13 2018
@@ -19,7 +19,6 @@
 #include "llvm/ADT/SmallBitVector.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/Twine.h"
 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
@@ -157,14 +156,10 @@ public:
   void print(const MachineBasicBlock &MBB);
 
   void print(const MachineInstr &MI);
-  void printIRValueReference(const Value &V);
   void printStackObjectReference(int FrameIndex);
   void print(const MachineInstr &MI, unsigned OpIdx,
              const TargetRegisterInfo *TRI, bool ShouldPrintRegisterTies,
              LLT TypeToPrint, bool PrintDef = true);
-  void print(const LLVMContext &Context, const TargetInstrInfo &TII,
-             const MachineMemOperand &Op);
-  void printSyncScope(const LLVMContext &Context, SyncScope::ID SSID);
 };
 
 } // end namespace llvm
@@ -698,36 +693,17 @@ void MIPrinter::print(const MachineInstr
   if (!MI.memoperands_empty()) {
     OS << " :: ";
     const LLVMContext &Context = MF->getFunction().getContext();
+    const MachineFrameInfo &MFI = MF->getFrameInfo();
     bool NeedComma = false;
     for (const auto *Op : MI.memoperands()) {
       if (NeedComma)
         OS << ", ";
-      print(Context, *TII, *Op);
+      Op->print(OS, MST, SSNs, Context, &MFI, TII);
       NeedComma = true;
     }
   }
 }
 
-void MIPrinter::printIRValueReference(const Value &V) {
-  if (isa<GlobalValue>(V)) {
-    V.printAsOperand(OS, /*PrintType=*/false, MST);
-    return;
-  }
-  if (isa<Constant>(V)) {
-    // Machine memory operands can load/store to/from constant value pointers.
-    OS << '`';
-    V.printAsOperand(OS, /*PrintType=*/true, MST);
-    OS << '`';
-    return;
-  }
-  OS << "%ir.";
-  if (V.hasName()) {
-    printLLVMNameWithoutPrefix(OS, V.getName());
-    return;
-  }
-  MachineOperand::printIRSlotNumber(OS, MST.getLocalSlot(&V));
-}
-
 void MIPrinter::printStackObjectReference(int FrameIndex) {
   auto ObjectInfo = StackObjectOperandMapping.find(FrameIndex);
   assert(ObjectInfo != StackObjectOperandMapping.end() &&
@@ -786,134 +762,6 @@ void MIPrinter::print(const MachineInstr
     break;
   }
   }
-}
-
-static const char *getTargetMMOFlagName(const TargetInstrInfo &TII,
-                                        unsigned TMMOFlag) {
-  auto Flags = TII.getSerializableMachineMemOperandTargetFlags();
-  for (const auto &I : Flags) {
-    if (I.first == TMMOFlag) {
-      return I.second;
-    }
-  }
-  return nullptr;
-}
-
-void MIPrinter::print(const LLVMContext &Context, const TargetInstrInfo &TII,
-                      const MachineMemOperand &Op) {
-  OS << '(';
-  if (Op.isVolatile())
-    OS << "volatile ";
-  if (Op.isNonTemporal())
-    OS << "non-temporal ";
-  if (Op.isDereferenceable())
-    OS << "dereferenceable ";
-  if (Op.isInvariant())
-    OS << "invariant ";
-  if (Op.getFlags() & MachineMemOperand::MOTargetFlag1)
-    OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag1)
-       << "\" ";
-  if (Op.getFlags() & MachineMemOperand::MOTargetFlag2)
-    OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag2)
-       << "\" ";
-  if (Op.getFlags() & MachineMemOperand::MOTargetFlag3)
-    OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag3)
-       << "\" ";
-
-  assert((Op.isLoad() || Op.isStore()) && "machine memory operand must be a load or store (or both)");
-  if (Op.isLoad())
-    OS << "load ";
-  if (Op.isStore())
-    OS << "store ";
-
-  printSyncScope(Context, Op.getSyncScopeID());
-
-  if (Op.getOrdering() != AtomicOrdering::NotAtomic)
-    OS << toIRString(Op.getOrdering()) << ' ';
-  if (Op.getFailureOrdering() != AtomicOrdering::NotAtomic)
-    OS << toIRString(Op.getFailureOrdering()) << ' ';
-
-  OS << Op.getSize();
-  if (const Value *Val = Op.getValue()) {
-    OS << ((Op.isLoad() && Op.isStore()) ? " on "
-                                         : Op.isLoad() ? " from " : " into ");
-    printIRValueReference(*Val);
-  } else if (const PseudoSourceValue *PVal = Op.getPseudoValue()) {
-    OS << ((Op.isLoad() && Op.isStore()) ? " on "
-                                         : Op.isLoad() ? " from " : " into ");
-    assert(PVal && "Expected a pseudo source value");
-    switch (PVal->kind()) {
-    case PseudoSourceValue::Stack:
-      OS << "stack";
-      break;
-    case PseudoSourceValue::GOT:
-      OS << "got";
-      break;
-    case PseudoSourceValue::JumpTable:
-      OS << "jump-table";
-      break;
-    case PseudoSourceValue::ConstantPool:
-      OS << "constant-pool";
-      break;
-    case PseudoSourceValue::FixedStack:
-      printStackObjectReference(
-          cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex());
-      break;
-    case PseudoSourceValue::GlobalValueCallEntry:
-      OS << "call-entry ";
-      cast<GlobalValuePseudoSourceValue>(PVal)->getValue()->printAsOperand(
-          OS, /*PrintType=*/false, MST);
-      break;
-    case PseudoSourceValue::ExternalSymbolCallEntry:
-      OS << "call-entry &";
-      printLLVMNameWithoutPrefix(
-          OS, cast<ExternalSymbolPseudoSourceValue>(PVal)->getSymbol());
-      break;
-    case PseudoSourceValue::TargetCustom:
-      llvm_unreachable("TargetCustom pseudo source values are not supported");
-      break;
-    }
-  }
-  MachineOperand::printOperandOffset(OS, Op.getOffset());
-  if (Op.getBaseAlignment() != Op.getSize())
-    OS << ", align " << Op.getBaseAlignment();
-  auto AAInfo = Op.getAAInfo();
-  if (AAInfo.TBAA) {
-    OS << ", !tbaa ";
-    AAInfo.TBAA->printAsOperand(OS, MST);
-  }
-  if (AAInfo.Scope) {
-    OS << ", !alias.scope ";
-    AAInfo.Scope->printAsOperand(OS, MST);
-  }
-  if (AAInfo.NoAlias) {
-    OS << ", !noalias ";
-    AAInfo.NoAlias->printAsOperand(OS, MST);
-  }
-  if (Op.getRanges()) {
-    OS << ", !range ";
-    Op.getRanges()->printAsOperand(OS, MST);
-  }
-  if (unsigned AS = Op.getAddrSpace())
-    OS << ", addrspace " << AS;
-  OS << ')';
-}
-
-void MIPrinter::printSyncScope(const LLVMContext &Context, SyncScope::ID SSID) {
-  switch (SSID) {
-  case SyncScope::System: {
-    break;
-  }
-  default: {
-    if (SSNs.empty())
-      Context.getSyncScopeNames(SSNs);
-
-    OS << "syncscope(\"";
-    PrintEscapedString(SSNs[SSID], OS);
-    OS << "\") ";
-    break;
-  }
-  }
 }
 
 void llvm::printMIR(raw_ostream &OS, const Module &M) {

Modified: llvm/trunk/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineInstr.cpp?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineInstr.cpp Wed Mar 14 14:52:13 2018
@@ -1441,25 +1441,33 @@ void MachineInstr::print(raw_ostream &OS
     }
   }
 
-  bool HaveSemi = false;
   if (!memoperands_empty()) {
-    if (!HaveSemi) {
-      OS << ";";
-      HaveSemi = true;
+    SmallVector<StringRef, 0> SSNs;
+    const LLVMContext *Context = nullptr;
+    std::unique_ptr<LLVMContext> CtxPtr;
+    const MachineFrameInfo *MFI = nullptr;
+    if (const MachineFunction *MF = getMFIfAvailable(*this)) {
+      MFI = &MF->getFrameInfo();
+      Context = &MF->getFunction().getContext();
+    } else {
+      CtxPtr = llvm::make_unique<LLVMContext>();
+      Context = CtxPtr.get();
     }
 
-    OS << " mem:";
-    for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
-         i != e; ++i) {
-      (*i)->print(OS, MST);
-      if (std::next(i) != e)
-        OS << " ";
+    OS << " :: ";
+    bool NeedComma = false;
+    for (const MachineMemOperand *Op : memoperands()) {
+      if (NeedComma)
+        OS << ", ";
+      Op->print(OS, MST, SSNs, *Context, MFI, TII);
+      NeedComma = true;
     }
   }
 
   if (SkipDebugLoc)
     return;
 
+  bool HaveSemi = false;
   // Print debug location information.
   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
     if (!HaveSemi)

Modified: llvm/trunk/lib/CodeGen/MachineOperand.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineOperand.cpp?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineOperand.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineOperand.cpp Wed Mar 14 14:52:13 2018
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/Analysis/Loads.h"
 #include "llvm/CodeGen/MIRPrinter.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
@@ -440,6 +441,69 @@ static void printIRBlockReference(raw_os
     OS << "<unknown>";
 }
 
+static void printIRValueReference(raw_ostream &OS, const Value &V,
+                                  ModuleSlotTracker &MST) {
+  if (isa<GlobalValue>(V)) {
+    V.printAsOperand(OS, /*PrintType=*/false, MST);
+    return;
+  }
+  if (isa<Constant>(V)) {
+    // Machine memory operands can load/store to/from constant value pointers.
+    OS << '`';
+    V.printAsOperand(OS, /*PrintType=*/true, MST);
+    OS << '`';
+    return;
+  }
+  OS << "%ir.";
+  if (V.hasName()) {
+    printLLVMNameWithoutPrefix(OS, V.getName());
+    return;
+  }
+  MachineOperand::printIRSlotNumber(OS, MST.getLocalSlot(&V));
+}
+
+static void printSyncScope(raw_ostream &OS, const LLVMContext &Context,
+                           SyncScope::ID SSID,
+                           SmallVectorImpl<StringRef> &SSNs) {
+  switch (SSID) {
+  case SyncScope::System:
+    break;
+  default:
+    if (SSNs.empty())
+      Context.getSyncScopeNames(SSNs);
+
+    OS << "syncscope(\"";
+    PrintEscapedString(SSNs[SSID], OS);
+    OS << "\") ";
+    break;
+  }
+}
+
+static const char *getTargetMMOFlagName(const TargetInstrInfo &TII,
+                                        unsigned TMMOFlag) {
+  auto Flags = TII.getSerializableMachineMemOperandTargetFlags();
+  for (const auto &I : Flags) {
+    if (I.first == TMMOFlag) {
+      return I.second;
+    }
+  }
+  return nullptr;
+}
+
+static void printFrameIndex(raw_ostream& OS, int FrameIndex, bool IsFixed,
+                            const MachineFrameInfo *MFI) {
+  StringRef Name;
+  if (MFI) {
+    IsFixed = MFI->isFixedObjectIndex(FrameIndex);
+    if (const AllocaInst *Alloca = MFI->getObjectAllocation(FrameIndex))
+      if (Alloca->hasName())
+        Name = Alloca->getName();
+    if (IsFixed)
+      FrameIndex -= MFI->getObjectIndexBegin();
+  }
+  MachineOperand::printStackObjectReference(OS, FrameIndex, IsFixed, Name);
+}
+
 void MachineOperand::printSubRegIdx(raw_ostream &OS, uint64_t Index,
                                     const TargetRegisterInfo *TRI) {
   OS << "%subreg.";
@@ -716,17 +780,10 @@ void MachineOperand::print(raw_ostream &
   case MachineOperand::MO_FrameIndex: {
     int FrameIndex = getIndex();
     bool IsFixed = false;
-    StringRef Name;
-    if (const MachineFunction *MF = getMFIfAvailable(*this)) {
-      const MachineFrameInfo &MFI = MF->getFrameInfo();
-      IsFixed = MFI.isFixedObjectIndex(FrameIndex);
-      if (const AllocaInst *Alloca = MFI.getObjectAllocation(FrameIndex))
-        if (Alloca->hasName())
-          Name = Alloca->getName();
-      if (IsFixed)
-        FrameIndex -= MFI.getObjectIndexBegin();
-    }
-    printStackObjectReference(OS, FrameIndex, IsFixed, Name);
+    const MachineFrameInfo *MFI = nullptr;
+    if (const MachineFunction *MF = getMFIfAvailable(*this))
+      MFI = &MF->getFrameInfo();
+    printFrameIndex(OS, FrameIndex, IsFixed, MFI);
     break;
   }
   case MachineOperand::MO_ConstantPoolIndex:
@@ -961,108 +1018,116 @@ void MachineMemOperand::print(raw_ostrea
   ModuleSlotTracker DummyMST(nullptr);
   print(OS, DummyMST);
 }
+
 void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST) const {
-  assert((isLoad() || isStore()) && "SV has to be a load, store or both.");
+  SmallVector<StringRef, 0> SSNs;
+  LLVMContext Ctx;
+  print(OS, MST, SSNs, Ctx, nullptr, nullptr);
+}
 
+void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
+                              SmallVectorImpl<StringRef> &SSNs,
+                              const LLVMContext &Context,
+                              const MachineFrameInfo *MFI,
+                              const TargetInstrInfo *TII) const {
+  OS << '(';
   if (isVolatile())
-    OS << "Volatile ";
+    OS << "volatile ";
+  if (isNonTemporal())
+    OS << "non-temporal ";
+  if (isDereferenceable())
+    OS << "dereferenceable ";
+  if (isInvariant())
+    OS << "invariant ";
+  if (getFlags() & MachineMemOperand::MOTargetFlag1)
+    OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag1)
+       << "\" ";
+  if (getFlags() & MachineMemOperand::MOTargetFlag2)
+    OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag2)
+       << "\" ";
+  if (getFlags() & MachineMemOperand::MOTargetFlag3)
+    OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag3)
+       << "\" ";
 
+  assert((isLoad() || isStore()) &&
+         "machine memory operand must be a load or store (or both)");
   if (isLoad())
-    OS << "LD";
+    OS << "load ";
   if (isStore())
-    OS << "ST";
-  OS << getSize();
-
-  // Print the address information.
-  OS << "[";
-  if (const Value *V = getValue())
-    V->printAsOperand(OS, /*PrintType=*/false, MST);
-  else if (const PseudoSourceValue *PSV = getPseudoValue())
-    PSV->printCustom(OS);
-  else
-    OS << "<unknown>";
-
-  unsigned AS = getAddrSpace();
-  if (AS != 0)
-    OS << "(addrspace=" << AS << ')';
-
-  // If the alignment of the memory reference itself differs from the alignment
-  // of the base pointer, print the base alignment explicitly, next to the base
-  // pointer.
-  if (getBaseAlignment() != getAlignment())
-    OS << "(align=" << getBaseAlignment() << ")";
-
-  if (getOffset() != 0)
-    OS << "+" << getOffset();
-  OS << "]";
-
-  // Print the alignment of the reference.
-  if (getBaseAlignment() != getAlignment() || getBaseAlignment() != getSize())
-    OS << "(align=" << getAlignment() << ")";
-
-  // Print TBAA info.
-  if (const MDNode *TBAAInfo = getAAInfo().TBAA) {
-    OS << "(tbaa=";
-    if (TBAAInfo->getNumOperands() > 0)
-      TBAAInfo->getOperand(0)->printAsOperand(OS, MST);
-    else
-      OS << "<unknown>";
-    OS << ")";
-  }
-
-  // Print AA scope info.
-  if (const MDNode *ScopeInfo = getAAInfo().Scope) {
-    OS << "(alias.scope=";
-    if (ScopeInfo->getNumOperands() > 0)
-      for (unsigned i = 0, ie = ScopeInfo->getNumOperands(); i != ie; ++i) {
-        ScopeInfo->getOperand(i)->printAsOperand(OS, MST);
-        if (i != ie - 1)
-          OS << ",";
-      }
-    else
-      OS << "<unknown>";
-    OS << ")";
-  }
+    OS << "store ";
 
-  // Print AA noalias scope info.
-  if (const MDNode *NoAliasInfo = getAAInfo().NoAlias) {
-    OS << "(noalias=";
-    if (NoAliasInfo->getNumOperands() > 0)
-      for (unsigned i = 0, ie = NoAliasInfo->getNumOperands(); i != ie; ++i) {
-        NoAliasInfo->getOperand(i)->printAsOperand(OS, MST);
-        if (i != ie - 1)
-          OS << ",";
-      }
-    else
-      OS << "<unknown>";
-    OS << ")";
-  }
+  printSyncScope(OS, Context, getSyncScopeID(), SSNs);
 
-  if (const MDNode *Ranges = getRanges()) {
-    unsigned NumRanges = Ranges->getNumOperands();
-    if (NumRanges != 0) {
-      OS << "(ranges=";
-
-      for (unsigned I = 0; I != NumRanges; ++I) {
-        Ranges->getOperand(I)->printAsOperand(OS, MST);
-        if (I != NumRanges - 1)
-          OS << ',';
-      }
+  if (getOrdering() != AtomicOrdering::NotAtomic)
+    OS << toIRString(getOrdering()) << ' ';
+  if (getFailureOrdering() != AtomicOrdering::NotAtomic)
+    OS << toIRString(getFailureOrdering()) << ' ';
 
-      OS << ')';
+  OS << getSize();
+  if (const Value *Val = getValue()) {
+    OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into ");
+    printIRValueReference(OS, *Val, MST);
+  } else if (const PseudoSourceValue *PVal = getPseudoValue()) {
+    OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into ");
+    assert(PVal && "Expected a pseudo source value");
+    switch (PVal->kind()) {
+    case PseudoSourceValue::Stack:
+      OS << "stack";
+      break;
+    case PseudoSourceValue::GOT:
+      OS << "got";
+      break;
+    case PseudoSourceValue::JumpTable:
+      OS << "jump-table";
+      break;
+    case PseudoSourceValue::ConstantPool:
+      OS << "constant-pool";
+      break;
+    case PseudoSourceValue::FixedStack: {
+      int FrameIndex = cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex();
+      bool IsFixed = true;
+      printFrameIndex(OS, FrameIndex, IsFixed, MFI);
+      break;
+    }
+    case PseudoSourceValue::GlobalValueCallEntry:
+      OS << "call-entry ";
+      cast<GlobalValuePseudoSourceValue>(PVal)->getValue()->printAsOperand(
+          OS, /*PrintType=*/false, MST);
+      break;
+    case PseudoSourceValue::ExternalSymbolCallEntry:
+      OS << "call-entry &";
+      printLLVMNameWithoutPrefix(
+          OS, cast<ExternalSymbolPseudoSourceValue>(PVal)->getSymbol());
+      break;
+    case PseudoSourceValue::TargetCustom:
+      llvm_unreachable("TargetCustom pseudo source values are not supported");
+      break;
     }
   }
+  MachineOperand::printOperandOffset(OS, getOffset());
+  if (getBaseAlignment() != getSize())
+    OS << ", align " << getBaseAlignment();
+  auto AAInfo = getAAInfo();
+  if (AAInfo.TBAA) {
+    OS << ", !tbaa ";
+    AAInfo.TBAA->printAsOperand(OS, MST);
+  }
+  if (AAInfo.Scope) {
+    OS << ", !alias.scope ";
+    AAInfo.Scope->printAsOperand(OS, MST);
+  }
+  if (AAInfo.NoAlias) {
+    OS << ", !noalias ";
+    AAInfo.NoAlias->printAsOperand(OS, MST);
+  }
+  if (getRanges()) {
+    OS << ", !range ";
+    getRanges()->printAsOperand(OS, MST);
+  }
+  // FIXME: Implement addrspace printing/parsing in MIR.
+  // For now, print this even though parsing it is not available in MIR.
+  if (unsigned AS = getAddrSpace())
+    OS << ", addrspace " << AS;
 
-  if (isNonTemporal())
-    OS << "(nontemporal)";
-  if (isDereferenceable())
-    OS << "(dereferenceable)";
-  if (isInvariant())
-    OS << "(invariant)";
-  if (getFlags() & MOTargetFlag1)
-    OS << "(flag1)";
-  if (getFlags() & MOTargetFlag2)
-    OS << "(flag2)";
-  if (getFlags() & MOTargetFlag3)
-    OS << "(flag3)";
+  OS << ')';
 }

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp Wed Mar 14 14:52:13 2018
@@ -34,6 +34,7 @@
 #include "llvm/IR/DebugLoc.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/ModuleSlotTracker.h"
 #include "llvm/IR/Value.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
@@ -422,6 +423,19 @@ static Printable PrintNodeId(const SDNod
   });
 }
 
+// Print the MMO with more information from the SelectionDAG.
+static void printMemOperand(raw_ostream &OS, const MachineMemOperand &MMO,
+                            const SelectionDAG *G) {
+  const MachineFunction &MF = G->getMachineFunction();
+  const Function &F = MF.getFunction();
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  const TargetInstrInfo *TII = G->getSubtarget().getInstrInfo();
+  ModuleSlotTracker MST(F.getParent());
+  MST.incorporateFunction(F);
+  SmallVector<StringRef, 0> SSNs;
+  MMO.print(OS, MST, SSNs, *G->getContext(), &MFI, TII);
+}
+
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
 LLVM_DUMP_METHOD void SDNode::dump() const { dump(nullptr); }
 
@@ -478,7 +492,7 @@ void SDNode::print_details(raw_ostream &
       OS << "Mem:";
       for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
            e = MN->memoperands_end(); i != e; ++i) {
-        OS << **i;
+        printMemOperand(OS, **i, G);
         if (std::next(i) != e)
           OS << " ";
       }
@@ -570,7 +584,9 @@ void SDNode::print_details(raw_ostream &
     OS << ":" << N->getVT().getEVTString();
   }
   else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
-    OS << "<" << *LD->getMemOperand();
+    OS << "<";
+
+    printMemOperand(OS, *LD->getMemOperand(), G);
 
     bool doExt = true;
     switch (LD->getExtensionType()) {
@@ -588,7 +604,8 @@ void SDNode::print_details(raw_ostream &
 
     OS << ">";
   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
-    OS << "<" << *ST->getMemOperand();
+    OS << "<";
+    printMemOperand(OS, *ST->getMemOperand(), G);
 
     if (ST->isTruncatingStore())
       OS << ", trunc to " << ST->getMemoryVT().getEVTString();
@@ -599,7 +616,9 @@ void SDNode::print_details(raw_ostream &
 
     OS << ">";
   } else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
-    OS << "<" << *M->getMemOperand() << ">";
+    OS << "<";
+    printMemOperand(OS, *M->getMemOperand(), G);
+    OS << ">";
   } else if (const BlockAddressSDNode *BA =
                dyn_cast<BlockAddressSDNode>(this)) {
     int64_t offset = BA->getOffset();

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll Wed Mar 14 14:52:13 2018
@@ -46,7 +46,7 @@ define [1 x double] @constant() {
   ; The key problem here is that we may fail to create an MBB referenced by a
   ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
   ; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0); mem:ST4[%addr] (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0) :: (store seq_cst 4 into %ir.addr) (in function: pending_phis)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
 ; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
 define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -65,7 +65,7 @@ false:
 
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(s24) = G_LOAD %1:_(p0); mem:LD3[undef](align=1) (in function: odd_type_load)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(s24) = G_LOAD %1:_(p0) :: (load 3 from `i24* undef`, align 1) (in function: odd_type_load)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type_load
 ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type_load
 define i32 @odd_type_load() {
@@ -76,7 +76,7 @@ entry:
 }
 
   ; General legalizer inability to handle types whose size wasn't a power of 2.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0); mem:ST6[%addr](align=8) (in function: odd_type)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0) :: (store 6 into %ir.addr, align 8) (in function: odd_type)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
 ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
 define void @odd_type(i42* %addr) {
@@ -85,7 +85,7 @@ define void @odd_type(i42* %addr) {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0); mem:ST28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0) :: (store 28 into %ir.addr, align 32) (in function: odd_vector)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
 ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
 define void @odd_vector(<7 x i32>* %addr) {
@@ -104,7 +104,7 @@ define i128 @sequence_sizes([8 x i8] %in
 }
 
 ; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0); mem:LD8[%addr] (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0) :: (load seq_cst 8 from %ir.addr)  (in function: atomic_ops)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
 ; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
 define i64 @atomic_ops(i64* %addr) {
@@ -169,7 +169,7 @@ end:
   br label %block
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0); mem:ST16[undef] (in function: vector_of_pointers_insertelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0) :: (store 16 into `<2 x i16*>* undef`) (in function: vector_of_pointers_insertelement)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
 ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
 define void @vector_of_pointers_insertelement() {
@@ -185,7 +185,7 @@ end:
   br label %block
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0); mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0) :: (store 12 into `%struct96* undef`, align 4) (in function: nonpow2_insertvalue_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
 %struct96 = type { float, float, float }
@@ -195,7 +195,7 @@ define void @nonpow2_insertvalue_narrowi
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing:
 define void @nonpow2_add_narrowing() {
@@ -206,7 +206,7 @@ define void @nonpow2_add_narrowing() {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing:
 define void @nonpow2_or_narrowing() {
@@ -217,7 +217,7 @@ define void @nonpow2_or_narrowing() {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_load_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing:
 define void @nonpow2_load_narrowing() {
@@ -226,7 +226,7 @@ define void @nonpow2_load_narrowing() {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0); mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0) :: (store 12 into %ir.c, align 16) (in function: nonpow2_store_narrowing
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
 define void @nonpow2_store_narrowing(i96* %c) {
@@ -236,7 +236,7 @@ define void @nonpow2_store_narrowing(i96
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0); mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0) :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_constant_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
 define void @nonpow2_constant_narrowing() {
@@ -246,7 +246,7 @@ define void @nonpow2_constant_narrowing(
 
 ; Currently can't handle vector lengths that aren't an exact multiple of
 ; natively supported vector lengths. Test that the fall-back works for those.
-; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
+; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0 (in function: nonpow2_vector_add_fewerelements
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1:_(<7 x s64>), %3:_(s64) (in function: nonpow2_vector_add_fewerelements)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:

Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll Wed Mar 14 14:52:13 2018
@@ -130,10 +130,10 @@ entry:
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: stp_volatile:%bb.0
 ; CHECK-NOT: Cluster ld/st
-; CHECK:SU(2):   STRXui %1:gpr64, %0:gpr64common, 3; mem:Volatile
-; CHECK:SU(3):   STRXui %1:gpr64, %0:gpr64common, 2; mem:Volatile
-; CHECK:SU(4):   STRXui %1:gpr64, %0:gpr64common, 1; mem:Volatile
-; CHECK:SU(5):   STRXui %1:gpr64, %0:gpr64common, 4; mem:Volatile
+; CHECK:SU(2):   STRXui %1:gpr64, %0:gpr64common, 3 :: (volatile
+; CHECK:SU(3):   STRXui %1:gpr64, %0:gpr64common, 2 :: (volatile
+; CHECK:SU(4):   STRXui %1:gpr64, %0:gpr64common, 1 :: (volatile
+; CHECK:SU(5):   STRXui %1:gpr64, %0:gpr64common, 4 :: (volatile
 define i64 @stp_volatile(i64* nocapture %P, i64 %v) {
 entry:
   %arrayidx = getelementptr inbounds i64, i64* %P, i64 3

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll Wed Mar 14 14:52:13 2018
@@ -5,14 +5,14 @@
 ;
 ; CHECK: ********** MI Scheduling **********
 ; CHECK: misched_bug:%bb.0 entry
-; CHECK: SU(2):   %2:gpr32 = LDRWui %0:gpr64common, 1; mem:LD4[%ptr1_plus1]
+; CHECK: SU(2):   %2:gpr32 = LDRWui %0:gpr64common, 1 :: (load 4 from %ir.ptr1_plus1)
 ; CHECK:   Successors:
 ; CHECK-NEXT:    SU(5): Data Latency=4 Reg=%2
 ; CHECK-NEXT:    SU(4): Ord  Latency=0
-; CHECK: SU(3):   STRWui $wzr, %0:gpr64common, 0; mem:ST4[%ptr1]
+; CHECK: SU(3):   STRWui $wzr, %0:gpr64common, 0 :: (store 4 into %ir.ptr1)
 ; CHECK:   Successors:
 ; CHECK: SU(4): Ord  Latency=0
-; CHECK: SU(4):   STRWui $wzr, %1:gpr64common, 0; mem:ST4[%ptr2]
+; CHECK: SU(4):   STRWui $wzr, %1:gpr64common, 0 :: (store 4 into %ir.ptr2)
 ; CHECK: SU(5):   $w0 = COPY %2
 ; CHECK: ** ScheduleDAGMI::schedule picking next node
 define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {

Modified: llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll Wed Mar 14 14:52:13 2018
@@ -7,7 +7,7 @@ target datalayout = "A5"
 ; size and not 4 corresponding to the sign-extended size (i32).
 
 ; DEBUG: {{^}}# Machine code for function extload_align:
-; DEBUG: mem:LD2[<unknown>(addrspace=5)]
+; DEBUG: (load 2, addrspace 5)
 ; DEBUG: {{^}}# End machine code for function extload_align.
 
 define amdgpu_kernel void @extload_align(i32 addrspace(5)* %out, i32 %index) #0 {

Modified: llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll Wed Mar 14 14:52:13 2018
@@ -5,11 +5,11 @@
 ; latency regardless of whether they are barriers or not.
 
 ; CHECK: ** List Scheduling
-; CHECK: SU(2){{.*}}STR{{.*}}Volatile
+; CHECK: SU(2){{.*}}STR{{.*}}(volatile
 ; CHECK-NOT: SU({{.*}}): Ord
 ; CHECK: SU(3): Ord Latency=1
 ; CHECK-NOT: SU({{.*}}): Ord
-; CHECK: SU(3){{.*}}LDR{{.*}}Volatile
+; CHECK: SU(3){{.*}}LDR{{.*}}(volatile
 ; CHECK-NOT: SU({{.*}}): Ord
 ; CHECK: SU(2): Ord Latency=1
 ; CHECK-NOT: SU({{.*}}): Ord

Modified: llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll Wed Mar 14 14:52:13 2018
@@ -5,7 +5,7 @@
 
 @b = external global i64*
 
-; CHECK: Formed {{.*}} t2LDRD{{.*}} mem:LD4[%0] LD4[%0+4]
+; CHECK: Formed {{.*}} t2LDRD{{.*}} (load 4 from %ir.0), (load 4 from %ir.0 + 4)
 define i64 @t(i64 %a) nounwind readonly {
 entry:
 	%0 = load i64*, i64** @b, align 4

Modified: llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir Wed Mar 14 14:52:13 2018
@@ -42,7 +42,7 @@
 # CHECK_SWIFT: Latency    : 2
 # CHECK_R52:   Latency    : 2
 #
-# CHECK:       SU(3):   %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, $noreg; mem:LD4[@g1](dereferenceable)
+# CHECK:       SU(3):   %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, $noreg :: (dereferenceable load 4 from @g1)
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 3
 # CHECK_R52:   Latency    : 4
@@ -57,7 +57,7 @@
 # CHECK_SWIFT: Latency    : 14
 # CHECK_R52:   Latency    : 8
 
-# CHECK:       SU(8):   t2STRi12 %7:rgpr, %2:rgpr, 0, 14, $noreg; mem:ST4[@g1]
+# CHECK:       SU(8):   t2STRi12 %7:rgpr, %2:rgpr, 0, 14, $noreg :: (store 4 into @g1)
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 0
 # CHECK_R52:   Latency    : 4

Modified: llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir Wed Mar 14 14:52:13 2018
@@ -20,7 +20,7 @@
 
 # CHECK: ********** MI Scheduling **********
 # CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1):   %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, $noreg; mem:LD32[%A](align=8)
+# CHECK: SU(1):   %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, $noreg :: (load 32 from %ir.A, align 8)
 # CHECK: Latency            : 8
 # CHECK: Single Issue       : true;
 # CHECK: SU(2):   %4:dpr = VADDv8i8 %1.dsub_0:qqpr, %1.dsub_1:qqpr, 14, $noreg

Modified: llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll Wed Mar 14 14:52:13 2018
@@ -3,7 +3,7 @@
 
 ; Check that the generated post-increment load has TBAA information.
 ; CHECK-LABEL: Machine code for function fred:
-; CHECK: = V6_vL32b_pi %{{[0-9]+}}{{[^,]*}}, 64; mem:LD64[{{.*}}](tbaa=
+; CHECK: = V6_vL32b_pi %{{[0-9]+}}{{[^,]*}}, 64 :: (load 64{{.*}}!tbaa
 
 target triple = "hexagon"
 

Modified: llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll Wed Mar 14 14:52:13 2018
@@ -12,6 +12,6 @@ entry:
 }
 
 ; Make sure that the MMO on the store has no offset from the byval
-; variable itself (we used to have mem:ST8[%v+64]).
-; CHECK: STD killed renamable $x5, 176, $x1; mem:ST8[%v](align=16)
+; variable itself (we used to have (store 8 into %ir.v + 64)).
+; CHECK: STD killed renamable $x5, 176, $x1 :: (store 8 into %ir.v, align 16)
 

Modified: llvm/trunk/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll Wed Mar 14 14:52:13 2018
@@ -9,13 +9,13 @@ define i64 @func1(i64 %p1, i64 %p2, i64
 ; so we expect the LD8 to load from the address used in the original HIBITS
 ; load.
 ; CHECK-LABEL: Initial selection DAG:
-; CHECK-DAG:     [[LOBITS:t[0-9]+]]: i32,ch = load<LD4[FixedStack-2]>
-; CHECK-DAG:     [[HIBITS:t[0-9]+]]: i32,ch = load<LD4[FixedStack-1]>
+; CHECK-DAG:     [[LOBITS:t[0-9]+]]: i32,ch = load<(load 4 from %fixed-stack.1)>
+; CHECK-DAG:     [[HIBITS:t[0-9]+]]: i32,ch = load<(load 4 from %fixed-stack.2)>
 ; CHECK: Combining: t{{[0-9]+}}: i64 = build_pair [[LOBITS]], [[HIBITS]]
 ; CHECK-NEXT: Creating new node
-; CHECK-SAME: load<LD8[FixedStack-1]
+; CHECK-SAME: load<(load 8 from %fixed-stack.2, align 4)>
 ; CHECK-NEXT: into
-; CHECK-SAME: load<LD8[FixedStack-1]
+; CHECK-SAME: load<(load 8 from %fixed-stack.2, align 4)>
 ; CHECK-LABEL: Optimized lowered selection DAG:
   %result = extractvalue {i64, i8* } %struct, 0
   ret i64 %result

Modified: llvm/trunk/test/CodeGen/PowerPC/unal-vec-negarith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/unal-vec-negarith.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/unal-vec-negarith.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/unal-vec-negarith.ll Wed Mar 14 14:52:13 2018
@@ -9,8 +9,8 @@ entry:
   %r = load <16 x i8>, <16 x i8>* %p, align 1
   ret <16 x i8> %r
 
-; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<LD31[%p+4294967281](align=1)>
-; CHECK:     v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<LD31[%p+-15](align=1)>
+; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load 31 from %ir.p + 4294967281, align 1)>
+; CHECK:     v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load 31 from %ir.p - 15, align 1)>
 }
 
 attributes #0 = { nounwind "target-cpu"="pwr7" }

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll Wed Mar 14 14:52:13 2018
@@ -8,7 +8,7 @@
 ; the fallback path.
 
 ; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0); mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0) :: (store 10 into %ir.ptr, align 16) (in function: test_x86_fp80_dump)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
 ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
 define void @test_x86_fp80_dump(x86_fp80* %ptr){

Modified: llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll Wed Mar 14 14:52:13 2018
@@ -18,12 +18,12 @@
 ; DBGDAG-DAG: [[BASEPTR:t[0-9]+]]: i64,ch = CopyFromReg [[ENTRYTOKEN]],
 ; DBGDAG-DAG: [[ADDPTR:t[0-9]+]]: i64 = add {{(nuw )?}}[[BASEPTR]], Constant:i64<2>
 
-; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<LD2[%tmp81](align=1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64
-; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<LD1[%tmp12]> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64
+; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<(load 2 from %ir.tmp81, align 1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64
+; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<(load 1 from %ir.tmp12)> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64
 
-; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64
+; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<(store 1 into %ir.tmp14)> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64
 ; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1
-; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64
+; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<(store 2 into %ir.tmp10, align 1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64
 
 ; DBGDAG: X86ISD::RET_FLAG t{{[0-9]+}},
 

Modified: llvm/trunk/test/CodeGen/X86/stack-protector-weight.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-protector-weight.ll?rev=327580&r1=327579&r2=327580&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stack-protector-weight.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stack-protector-weight.ll Wed Mar 14 14:52:13 2018
@@ -16,16 +16,16 @@
 ; DARWIN-IR: CALL64pcrel32 @__stack_chk_fail
 
 ; MSVC-SELDAG: # Machine code for function test_branch_weights:
-; MSVC-SELDAG: mem:Volatile LD4[@__security_cookie]
-; MSVC-SELDAG: ST4[FixedStack0]
-; MSVC-SELDAG: LD4[FixedStack0]
+; MSVC-SELDAG: :: (volatile load 4 from @__security_cookie)
+; MSVC-SELDAG: (store 4 into stack)
+; MSVC-SELDAG: (volatile load 4 from %stack.0.StackGuardSlot)
 ; MSVC-SELDAG: CALLpcrel32 @__security_check_cookie
 
 ; MSVC always uses selection DAG now.
 ; MSVC-IR: # Machine code for function test_branch_weights:
-; MSVC-IR: mem:Volatile LD4[@__security_cookie]
-; MSVC-IR: ST4[FixedStack0]
-; MSVC-IR: LD4[FixedStack0]
+; MSVC-IR: :: (volatile load 4 from @__security_cookie)
+; MSVC-IR: (store 4 into stack)
+; MSVC-IR: (volatile load 4 from %stack.0.StackGuardSlot)
 ; MSVC-IR: CALLpcrel32 @__security_check_cookie
 
 define i32 @test_branch_weights(i32 %n) #0 {




More information about the llvm-commits mailing list