[llvm] 1c545f6 - [ms] [X86] Use "P" modifier on all branch-target operands in inline X86 assembly.

Eric Astor via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 9 12:00:03 PST 2020


Author: Eric Astor
Date: 2020-01-09T14:55:03-05:00
New Revision: 1c545f6dbcbb3ada2dfef2c6afbc1ca8939135cb

URL: https://github.com/llvm/llvm-project/commit/1c545f6dbcbb3ada2dfef2c6afbc1ca8939135cb
DIFF: https://github.com/llvm/llvm-project/commit/1c545f6dbcbb3ada2dfef2c6afbc1ca8939135cb.diff

LOG: [ms] [X86] Use "P" modifier on all branch-target operands in inline X86 assembly.

Summary:
Extend D71677 to apply to all branch-target operands, rather than special-casing call instructions.

Also add a regression test for llvm.org/PR44272, since this finishes fixing it.

Reviewers: thakis, rnk

Reviewed By: thakis

Subscribers: merge_guards_bot, hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D72417

Added: 
    

Modified: 
    clang/test/CodeGen/ms-inline-asm-64.c
    llvm/include/llvm/MC/MCInstrDesc.h
    llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
    llvm/lib/MC/MCParser/AsmParser.cpp
    llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
    llvm/lib/Target/X86/AsmParser/X86Operand.h
    llvm/lib/Target/X86/X86InstrControl.td
    llvm/lib/Target/X86/X86InstrInfo.td
    llvm/utils/TableGen/InstrInfoEmitter.cpp
    llvm/utils/TableGen/X86RecognizableInstr.cpp

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/ms-inline-asm-64.c b/clang/test/CodeGen/ms-inline-asm-64.c
index ce46b8821dee..20e8228a04b6 100644
--- a/clang/test/CodeGen/ms-inline-asm-64.c
+++ b/clang/test/CodeGen/ms-inline-asm-64.c
@@ -58,3 +58,17 @@ int t4() {
 // CHECK-SAME: mov [ebx + $$4], ecx
 // CHECK-SAME: "*m,~{eax},~{ebx},~{dirflag},~{fpsr},~{flags}"(%struct.t3_type* %{{.*}})
 }
+
+void bar() {}
+
+void t5() {
+  __asm {
+    call bar
+    jmp bar
+  }
+  // CHECK: t5
+  // CHECK: call void asm sideeffect inteldialect
+  // CHECK-SAME: call qword ptr ${0:P}
+  // CHECK-SAME: jmp qword ptr ${1:P}
+  // CHECK-SAME: "*m,*m,~{dirflag},~{fpsr},~{flags}"(void (...)* bitcast (void ()* @bar to void (...)*), void (...)* bitcast (void ()* @bar to void (...)*))
+}

diff  --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h
index 8791ba3ba425..e6af40868c30 100644
--- a/llvm/include/llvm/MC/MCInstrDesc.h
+++ b/llvm/include/llvm/MC/MCInstrDesc.h
@@ -37,7 +37,12 @@ enum OperandConstraint {
 /// These are flags set on operands, but should be considered
 /// private, all access should go through the MCOperandInfo accessors.
 /// See the accessors for a description of what these are.
-enum OperandFlags { LookupPtrRegClass = 0, Predicate, OptionalDef };
+enum OperandFlags {
+  LookupPtrRegClass = 0,
+  Predicate,
+  OptionalDef,
+  BranchTarget
+};
 
 /// Operands are tagged with one of the values of this enum.
 enum OperandType {
@@ -98,6 +103,9 @@ class MCOperandInfo {
   /// Set if this operand is a optional def.
   bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }
 
+  /// Set if this operand is a branch target.
+  bool isBranchTarget() const { return Flags & (1 << MCOI::BranchTarget); }
+
   bool isGenericType() const {
     return OperandType >= MCOI::OPERAND_FIRST_GENERIC &&
            OperandType <= MCOI::OPERAND_LAST_GENERIC;

diff  --git a/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index 3dde0b8474a2..abb95628c2a9 100644
--- a/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -71,10 +71,6 @@ class MCParsedAsmOperand {
   /// variable/label?   Only valid when parsing MS-style inline assembly.
   virtual bool needAddressOf() const { return false; }
 
-  /// isCallOperand - Is this an operand of an inline-assembly call instruction?
-  /// Only valid when parsing MS-style inline assembly.
-  virtual bool isCallOperand() const { return false; }
-
   /// isOffsetOfLocal - Do we need to emit code to get the offset of the local
   /// variable, rather than its value?   Only valid when parsing MS-style inline
   /// assembly.

diff  --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp
index dc8132b627a6..94a44c1f93b1 100644
--- a/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -5845,7 +5845,7 @@ bool AsmParser::parseMSInlineAsm(
         InputDecls.push_back(OpDecl);
         InputDeclsAddressOf.push_back(Operand.needAddressOf());
         InputConstraints.push_back(Constraint.str());
-        if (Operand.isCallOperand())
+        if (Desc.OpInfo[i - 1].isBranchTarget())
           AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size());
         else
           AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size());

diff  --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 27c6a5f91428..69299ae9b00d 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2924,15 +2924,6 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
     }
   }
 
-  // Mark the operands of a call instruction.  These need to be handled
-  // 
diff erently when referenced in MS-style inline assembly.
-  if (Name.startswith("call") || Name.startswith("lcall")) {
-    for (size_t i = 1; i < Operands.size(); ++i) {
-      X86Operand &Op = static_cast<X86Operand &>(*Operands[i]);
-      Op.setCallOperand(true);
-    }
-  }
-
   if (Flags)
     Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc));
   return false;

diff  --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h
index 36b8bc4e65f5..d831a63b04ee 100644
--- a/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -284,9 +284,6 @@ struct X86Operand final : public MCParsedAsmOperand {
 
   bool needAddressOf() const override { return AddressOf; }
 
-  bool isCallOperand() const override { return CallOperand; }
-  void setCallOperand(bool IsCallOperand) { CallOperand = IsCallOperand; }
-
   bool isMem() const override { return Kind == Memory; }
   bool isMemUnsized() const {
     return Kind == Memory && Mem.Size == 0;

diff  --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td
index e1e6eea59884..32faeb1a86f2 100644
--- a/llvm/lib/Target/X86/X86InstrControl.td
+++ b/llvm/lib/Target/X86/X86InstrControl.td
@@ -220,12 +220,12 @@ let isCall = 1 in
   // registers are added manually.
   let Uses = [ESP, SSP] in {
     def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
-                           (outs), (ins i32imm_pcrel:$dst),
+                           (outs), (ins i32imm_brtarget:$dst),
                            "call{l}\t$dst", []>, OpSize32,
                       Requires<[Not64BitMode]>, Sched<[WriteJump]>;
     let hasSideEffects = 0 in
       def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
-                             (outs), (ins i16imm_pcrel:$dst),
+                             (outs), (ins i16imm_brtarget:$dst),
                              "call{w}\t$dst", []>, OpSize16,
                         Sched<[WriteJump]>;
     def CALL16r     : I<0xFF, MRM2r, (outs), (ins GR16:$dst),
@@ -285,7 +285,7 @@ let isCall = 1 in
 // Tail call stuff.
 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
     isCodeGenOnly = 1, Uses = [ESP, SSP] in {
-  def TCRETURNdi : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$offset),
+  def TCRETURNdi : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$offset),
                            []>, Sched<[WriteJump]>, NotMemoryFoldable;
   def TCRETURNri : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset),
                            []>, Sched<[WriteJump]>, NotMemoryFoldable;
@@ -293,7 +293,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
   def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset),
                            []>, Sched<[WriteJumpLd]>;
 
-  def TAILJMPd : PseudoI<(outs), (ins i32imm_pcrel:$dst),
+  def TAILJMPd : PseudoI<(outs), (ins i32imm_brtarget:$dst),
                          []>, Sched<[WriteJump]>;
 
   def TAILJMPr : PseudoI<(outs), (ins ptr_rc_tailcall:$dst),
@@ -309,10 +309,11 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
     isCodeGenOnly = 1, SchedRW = [WriteJump] in
   let Uses = [ESP, EFLAGS, SSP] in {
   def TCRETURNdicc : PseudoI<(outs),
-                     (ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>;
+                     (ins i32imm_brtarget:$dst, i32imm:$offset, i32imm:$cond),
+                     []>;
 
   // This gets substituted to a conditional jump instruction in MC lowering.
-  def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$cond), []>;
+  def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$cond), []>;
 }
 
 
@@ -328,7 +329,7 @@ let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in {
   // that the offset between an arbitrary immediate and the call will fit in
   // the 32-bit pcrel field that we have.
   def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
-                        (outs), (ins i64i32imm_pcrel:$dst),
+                        (outs), (ins i64i32imm_brtarget:$dst),
                         "call{q}\t$dst", []>, OpSize32,
                       Requires<[In64BitMode]>;
   def CALL64r       : I<0xFF, MRM2r, (outs), (ins GR64:$dst),
@@ -357,7 +358,7 @@ let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in {
 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
     isCodeGenOnly = 1, Uses = [RSP, SSP] in {
   def TCRETURNdi64   : PseudoI<(outs),
-                               (ins i64i32imm_pcrel:$dst, i32imm:$offset),
+                               (ins i64i32imm_brtarget:$dst, i32imm:$offset),
                                []>, Sched<[WriteJump]>;
   def TCRETURNri64   : PseudoI<(outs),
                                (ins ptr_rc_tailcall:$dst, i32imm:$offset),
@@ -367,7 +368,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
                                (ins i64mem_TC:$dst, i32imm:$offset),
                                []>, Sched<[WriteJumpLd]>, NotMemoryFoldable;
 
-  def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_pcrel:$dst),
+  def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_brtarget:$dst),
                            []>, Sched<[WriteJump]>;
 
   def TAILJMPr64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst),
@@ -415,10 +416,10 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
     isCodeGenOnly = 1, SchedRW = [WriteJump] in
   let Uses = [RSP, EFLAGS, SSP] in {
   def TCRETURNdi64cc : PseudoI<(outs),
-                           (ins i64i32imm_pcrel:$dst, i32imm:$offset,
+                           (ins i64i32imm_brtarget:$dst, i32imm:$offset,
                             i32imm:$cond), []>;
 
   // This gets substituted to a conditional jump instruction in MC lowering.
   def TAILJMPd64_CC : PseudoI<(outs),
-                              (ins i64i32imm_pcrel:$dst, i32imm:$cond), []>;
+                              (ins i64i32imm_brtarget:$dst, i32imm:$cond), []>;
 }

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td
index 8d7f7c8c7875..ca5425e8b89f 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/llvm/lib/Target/X86/X86InstrInfo.td
@@ -454,18 +454,6 @@ def i64mem_TC : Operand<i64> {
   let OperandType = "OPERAND_MEMORY";
 }
 
-let OperandType = "OPERAND_PCREL",
-    ParserMatchClass = X86AbsMemAsmOperand,
-    PrintMethod = "printPCRelImm" in {
-def i32imm_pcrel : Operand<i32>;
-def i16imm_pcrel : Operand<i16>;
-
-// Branch targets have OtherVT type and print as pc-relative values.
-def brtarget : Operand<OtherVT>;
-def brtarget8 : Operand<OtherVT>;
-
-}
-
 // Special parser to detect 16-bit mode to select 16-bit displacement.
 def X86AbsMem16AsmOperand : AsmOperandClass {
   let Name = "AbsMem16";
@@ -473,14 +461,26 @@ def X86AbsMem16AsmOperand : AsmOperandClass {
   let SuperClasses = [X86AbsMemAsmOperand];
 }
 
-// Branch targets have OtherVT type and print as pc-relative values.
-let OperandType = "OPERAND_PCREL",
-    PrintMethod = "printPCRelImm" in {
-let ParserMatchClass = X86AbsMem16AsmOperand in
-  def brtarget16 : Operand<OtherVT>;
-let ParserMatchClass = X86AbsMemAsmOperand in
-  def brtarget32 : Operand<OtherVT>;
+// Branch targets print as pc-relative values.
+class BranchTargetOperand<ValueType ty> : Operand<ty> {
+  let OperandType = "OPERAND_PCREL";
+  let PrintMethod = "printPCRelImm";
+  let ParserMatchClass = X86AbsMemAsmOperand;
+}
+
+def i32imm_brtarget : BranchTargetOperand<i32>;
+def i16imm_brtarget : BranchTargetOperand<i16>;
+
+// 64-bits but only 32 bits are significant, and those bits are treated as being
+// pc relative.
+def i64i32imm_brtarget : BranchTargetOperand<i64>;
+
+def brtarget : BranchTargetOperand<OtherVT>;
+def brtarget8 : BranchTargetOperand<OtherVT>;
+def brtarget16 : BranchTargetOperand<OtherVT> {
+  let ParserMatchClass = X86AbsMem16AsmOperand;
 }
+def brtarget32 : BranchTargetOperand<OtherVT>;
 
 let RenderMethod = "addSrcIdxOperands" in {
   def X86SrcIdx8Operand : AsmOperandClass {
@@ -756,14 +756,6 @@ def i64u8imm : Operand<i64> {
   let OperandType = "OPERAND_IMMEDIATE";
 }
 
-// 64-bits but only 32 bits are significant, and those bits are treated as being
-// pc relative.
-def i64i32imm_pcrel : Operand<i64> {
-  let PrintMethod = "printPCRelImm";
-  let ParserMatchClass = X86AbsMemAsmOperand;
-  let OperandType = "OPERAND_PCREL";
-}
-
 def lea64_32mem : Operand<i32> {
   let PrintMethod = "printanymem";
   let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);

diff  --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 300ba36a7007..3983252b0e04 100644
--- a/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -164,6 +164,11 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
       if (Op.Rec->isSubClassOf("OptionalDefOperand"))
         Res += "|(1<<MCOI::OptionalDef)";
 
+      // Branch target operands.  Check to see if the original unexpanded
+      // operand was of type BranchTargetOperand.
+      if (Op.Rec->isSubClassOf("BranchTargetOperand"))
+        Res += "|(1<<MCOI::BranchTarget)";
+
       // Fill in operand type.
       Res += ", ";
       assert(!Op.OperandType.empty() && "Invalid operand type.");

diff  --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 33dc6f3f9e23..1048ef81a378 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -879,9 +879,9 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
   TYPE("i128mem",             TYPE_M)
   TYPE("i256mem",             TYPE_M)
   TYPE("i512mem",             TYPE_M)
-  TYPE("i64i32imm_pcrel",     TYPE_REL)
-  TYPE("i16imm_pcrel",        TYPE_REL)
-  TYPE("i32imm_pcrel",        TYPE_REL)
+  TYPE("i64i32imm_brtarget",  TYPE_REL)
+  TYPE("i16imm_brtarget",     TYPE_REL)
+  TYPE("i32imm_brtarget",     TYPE_REL)
   TYPE("ccode",               TYPE_IMM)
   TYPE("AVX512RC",            TYPE_IMM)
   TYPE("brtarget32",          TYPE_REL)
@@ -1169,45 +1169,45 @@ RecognizableInstr::relocationEncodingFromString(const std::string &s,
   if(OpSize != X86Local::OpSize16) {
     // For instructions without an OpSize prefix, a declared 16-bit register or
     // immediate encoding is special.
-    ENCODING("i16imm",        ENCODING_IW)
+    ENCODING("i16imm",           ENCODING_IW)
   }
-  ENCODING("i16imm",          ENCODING_Iv)
-  ENCODING("i16i8imm",        ENCODING_IB)
-  ENCODING("i32imm",          ENCODING_Iv)
-  ENCODING("i32i8imm",        ENCODING_IB)
-  ENCODING("i64i32imm",       ENCODING_ID)
-  ENCODING("i64i8imm",        ENCODING_IB)
-  ENCODING("i8imm",           ENCODING_IB)
-  ENCODING("u8imm",           ENCODING_IB)
-  ENCODING("i16u8imm",        ENCODING_IB)
-  ENCODING("i32u8imm",        ENCODING_IB)
-  ENCODING("i64u8imm",        ENCODING_IB)
-  ENCODING("i64i32imm_pcrel", ENCODING_ID)
-  ENCODING("i16imm_pcrel",    ENCODING_IW)
-  ENCODING("i32imm_pcrel",    ENCODING_ID)
-  ENCODING("brtarget32",      ENCODING_ID)
-  ENCODING("brtarget16",      ENCODING_IW)
-  ENCODING("brtarget8",       ENCODING_IB)
-  ENCODING("i64imm",          ENCODING_IO)
-  ENCODING("offset16_8",      ENCODING_Ia)
-  ENCODING("offset16_16",     ENCODING_Ia)
-  ENCODING("offset16_32",     ENCODING_Ia)
-  ENCODING("offset32_8",      ENCODING_Ia)
-  ENCODING("offset32_16",     ENCODING_Ia)
-  ENCODING("offset32_32",     ENCODING_Ia)
-  ENCODING("offset32_64",     ENCODING_Ia)
-  ENCODING("offset64_8",      ENCODING_Ia)
-  ENCODING("offset64_16",     ENCODING_Ia)
-  ENCODING("offset64_32",     ENCODING_Ia)
-  ENCODING("offset64_64",     ENCODING_Ia)
-  ENCODING("srcidx8",         ENCODING_SI)
-  ENCODING("srcidx16",        ENCODING_SI)
-  ENCODING("srcidx32",        ENCODING_SI)
-  ENCODING("srcidx64",        ENCODING_SI)
-  ENCODING("dstidx8",         ENCODING_DI)
-  ENCODING("dstidx16",        ENCODING_DI)
-  ENCODING("dstidx32",        ENCODING_DI)
-  ENCODING("dstidx64",        ENCODING_DI)
+  ENCODING("i16imm",             ENCODING_Iv)
+  ENCODING("i16i8imm",           ENCODING_IB)
+  ENCODING("i32imm",             ENCODING_Iv)
+  ENCODING("i32i8imm",           ENCODING_IB)
+  ENCODING("i64i32imm",          ENCODING_ID)
+  ENCODING("i64i8imm",           ENCODING_IB)
+  ENCODING("i8imm",              ENCODING_IB)
+  ENCODING("u8imm",              ENCODING_IB)
+  ENCODING("i16u8imm",           ENCODING_IB)
+  ENCODING("i32u8imm",           ENCODING_IB)
+  ENCODING("i64u8imm",           ENCODING_IB)
+  ENCODING("i64i32imm_brtarget", ENCODING_ID)
+  ENCODING("i16imm_brtarget",    ENCODING_IW)
+  ENCODING("i32imm_brtarget",    ENCODING_ID)
+  ENCODING("brtarget32",         ENCODING_ID)
+  ENCODING("brtarget16",         ENCODING_IW)
+  ENCODING("brtarget8",          ENCODING_IB)
+  ENCODING("i64imm",             ENCODING_IO)
+  ENCODING("offset16_8",         ENCODING_Ia)
+  ENCODING("offset16_16",        ENCODING_Ia)
+  ENCODING("offset16_32",        ENCODING_Ia)
+  ENCODING("offset32_8",         ENCODING_Ia)
+  ENCODING("offset32_16",        ENCODING_Ia)
+  ENCODING("offset32_32",        ENCODING_Ia)
+  ENCODING("offset32_64",        ENCODING_Ia)
+  ENCODING("offset64_8",         ENCODING_Ia)
+  ENCODING("offset64_16",        ENCODING_Ia)
+  ENCODING("offset64_32",        ENCODING_Ia)
+  ENCODING("offset64_64",        ENCODING_Ia)
+  ENCODING("srcidx8",            ENCODING_SI)
+  ENCODING("srcidx16",           ENCODING_SI)
+  ENCODING("srcidx32",           ENCODING_SI)
+  ENCODING("srcidx64",           ENCODING_SI)
+  ENCODING("dstidx8",            ENCODING_DI)
+  ENCODING("dstidx16",           ENCODING_DI)
+  ENCODING("dstidx32",           ENCODING_DI)
+  ENCODING("dstidx64",           ENCODING_DI)
   errs() << "Unhandled relocation encoding " << s << "\n";
   llvm_unreachable("Unhandled relocation encoding");
 }


        


More information about the llvm-commits mailing list