[llvm] r335300 - Revert r335297 "[X86] Implement more of x86-64 large and medium PIC code models"
Reid Kleckner via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 21 15:19:05 PDT 2018
Author: rnk
Date: Thu Jun 21 15:19:05 2018
New Revision: 335300
URL: http://llvm.org/viewvc/llvm-project?rev=335300&view=rev
Log:
Revert r335297 "[X86] Implement more of x86-64 large and medium PIC code models"
MCJIT can't handle R_X86_64_GOT64 yet.
Removed:
llvm/trunk/test/CodeGen/X86/code-model.ll
Modified:
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/lib/Target/X86/X86InstrCompiler.td
llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
llvm/trunk/lib/Target/X86/X86MCInstLower.cpp
llvm/trunk/lib/Target/X86/X86Subtarget.cpp
llvm/trunk/lib/Target/X86/X86TargetMachine.cpp
llvm/trunk/test/CodeGen/X86/cleanuppad-large-codemodel.ll
llvm/trunk/test/CodeGen/X86/fast-isel-call-cleanup.ll
llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll
llvm/trunk/test/CodeGen/X86/hipe-cc64.ll
llvm/trunk/utils/UpdateTestChecks/asm.py
llvm/trunk/utils/update_llc_test_checks.py
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Thu Jun 21 15:19:05 2018
@@ -940,11 +940,11 @@ bool X86DAGToDAGISel::matchWrapper(SDVal
bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
- // In every code model except the 64-bit large code model, we can use an
- // address mode to get the GOT entry or the global itself.
+ // Only do this address mode folding for 64-bit if we're in the small code
+ // model.
+ // FIXME: But we can do GOTPCREL addressing in the medium code model.
CodeModel::Model M = TM.getCodeModel();
- if (Subtarget->is64Bit() && M != CodeModel::Small && M != CodeModel::Kernel &&
- !(M == CodeModel::Medium && IsRIPRel))
+ if (Subtarget->is64Bit() && M != CodeModel::Small && M != CodeModel::Kernel)
return true;
// Base and index reg must be 0 in order to use %rip as base.
Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Thu Jun 21 15:19:05 2018
@@ -37,10 +37,6 @@ let hasSideEffects = 0, isNotDuplicable
def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
"", []>;
-// 64-bit large code model PIC base construction.
-let hasSideEffects = 0, mayLoad = 1, isNotDuplicable = 1, SchedRW = [WriteJump] in
- def MOVGOT64r : PseudoI<(outs GR64:$reg),
- (ins GR64:$scratch, i64i32imm_pcrel:$got), []>;
// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
// a stack adjustment and the codegen must know that they may modify the stack
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Thu Jun 21 15:19:05 2018
@@ -11333,9 +11333,7 @@ isSafeToMoveRegClassDefs(const TargetReg
/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
///
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
- assert((!Subtarget.is64Bit() ||
- MF->getTarget().getCodeModel() == CodeModel::Medium ||
- MF->getTarget().getCodeModel() == CodeModel::Large) &&
+ assert(!Subtarget.is64Bit() &&
"X86-64 PIC uses RIP relative addressing");
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
@@ -11346,8 +11344,7 @@ unsigned X86InstrInfo::getGlobalBaseReg(
// Create the register. The code to initialize it is inserted
// later, by the CGBR pass (below).
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- GlobalBaseReg = RegInfo.createVirtualRegister(
- Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
+ GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
X86FI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
@@ -12721,10 +12718,9 @@ namespace {
static_cast<const X86TargetMachine *>(&MF.getTarget());
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- // Don't do anything in the 64-bit small and kernel code models. They use
- // RIP-relative addressing for everything.
- if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
- TM->getCodeModel() == CodeModel::Kernel))
+ // Don't do anything if this is 64-bit as 64-bit PIC
+ // uses RIP relative addressing.
+ if (STI.is64Bit())
return false;
// Only emit a global base reg in PIC mode.
@@ -12751,41 +12747,17 @@ namespace {
else
PC = GlobalBaseReg;
- if (STI.is64Bit()) {
- if (TM->getCodeModel() == CodeModel::Medium) {
- // In the medium code model, use a RIP-relative LEA to materialize the
- // GOT.
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
- .addReg(X86::RIP)
- .addImm(0)
- .addReg(0)
- .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
- .addReg(0);
- } else if (TM->getCodeModel() == CodeModel::Large) {
- // Loading the GOT in the large code model requires math with labels,
- // so we use a pseudo instruction and expand it during MC emission.
- unsigned Scratch = RegInfo.createVirtualRegister(&X86::GR64RegClass);
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVGOT64r), PC)
- .addReg(Scratch, RegState::Undef | RegState::Define)
- .addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
- } else {
- llvm_unreachable("unexpected code model");
- }
- } else {
- // Operand of MovePCtoStack is completely ignored by asm printer. It's
- // only used in JIT code emission as displacement to pc.
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
-
- // If we're using vanilla 'GOT' PIC style, we should use relative
- // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
- if (STI.isPICStyleGOT()) {
- // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
- // %some_register
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
- .addReg(PC)
- .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
- X86II::MO_GOT_ABSOLUTE_ADDRESS);
- }
+ // Operand of MovePCtoStack is completely ignored by asm printer. It's
+ // only used in JIT code emission as displacement to pc.
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
+
+ // If we're using vanilla 'GOT' PIC style, we should use relative addressing
+ // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
+ if (STI.isPICStyleGOT()) {
+ // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
+ .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
+ X86II::MO_GOT_ABSOLUTE_ADDRESS);
}
return true;
Modified: llvm/trunk/lib/Target/X86/X86MCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86MCInstLower.cpp?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86MCInstLower.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86MCInstLower.cpp Thu Jun 21 15:19:05 2018
@@ -1982,41 +1982,6 @@ void X86AsmPrinter::EmitInstruction(cons
return;
}
- case X86::MOVGOT64r: {
- // Materializes the GOT for the 64-bit large code model.
- MCSymbol *DotSym = OutContext.createTempSymbol();
- OutStreamer->EmitLabel(DotSym);
-
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned ScratchReg = MI->getOperand(1).getReg();
- MCSymbol *GOTSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
-
- // .LtmpN: leaq .LtmpN(%rip), %dst
- const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
- EmitAndCountInstruction(MCInstBuilder(X86::LEA64r)
- .addReg(DstReg) // dest
- .addReg(X86::RIP) // base
- .addImm(1) // scale
- .addReg(0) // index
- .addExpr(DotExpr) // disp
- .addReg(0)); // seg
-
- // movq $_GLOBAL_OFFSET_TABLE_ - .LtmpN, %scratch
- const MCExpr *GOTSymExpr = MCSymbolRefExpr::create(GOTSym, OutContext);
- const MCExpr *GOTDiffExpr =
- MCBinaryExpr::createSub(GOTSymExpr, DotExpr, OutContext);
- EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
- .addReg(ScratchReg) // dest
- .addExpr(GOTDiffExpr)); // disp
-
- // addq %scratch, %dst
- EmitAndCountInstruction(MCInstBuilder(X86::ADD64rr)
- .addReg(DstReg) // dest
- .addReg(DstReg) // dest
- .addReg(ScratchReg)); // src
- return;
- }
-
case X86::ADD32ri: {
// Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
Modified: llvm/trunk/lib/Target/X86/X86Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.cpp?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86Subtarget.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86Subtarget.cpp Thu Jun 21 15:19:05 2018
@@ -68,30 +68,14 @@ X86Subtarget::classifyGlobalReference(co
unsigned char
X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
- // If we're not PIC, it's not very interesting.
- if (!isPositionIndependent())
+ // 64 bits can use %rip addressing for anything local.
+ if (is64Bit())
return X86II::MO_NO_FLAG;
- // For 64-bit, we need to consider the code model.
- if (is64Bit()) {
- switch (TM.getCodeModel()) {
- // 64-bit small code model is simple: All rip-relative.
- case CodeModel::Small:
- case CodeModel::Kernel:
- return X86II::MO_NO_FLAG;
-
- // The large PIC code model uses GOTOFF.
- case CodeModel::Large:
- return X86II::MO_GOTOFF;
-
- // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data.
- case CodeModel::Medium:
- if (isa<Function>(GV))
- return X86II::MO_NO_FLAG; // All code is RIP-relative
- return X86II::MO_GOTOFF; // Local symbols use GOTOFF.
- }
- llvm_unreachable("invalid code model");
- }
+ // If this is for a position dependent executable, the static linker can
+ // figure it out.
+ if (!isPositionIndependent())
+ return X86II::MO_NO_FLAG;
// The COFF dynamic linker just patches the executable sections.
if (isTargetCOFF())
@@ -114,7 +98,7 @@ X86Subtarget::classifyLocalReference(con
unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
const Module &M) const {
// Large model never uses stubs.
- if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent())
+ if (TM.getCodeModel() == CodeModel::Large)
return X86II::MO_NO_FLAG;
// Absolute symbols can be referenced directly.
@@ -136,7 +120,7 @@ unsigned char X86Subtarget::classifyGlob
if (isTargetCOFF())
return X86II::MO_DLLIMPORT;
- if (is64Bit() && TM.getCodeModel() != CodeModel::Large)
+ if (is64Bit())
return X86II::MO_GOTPCREL;
if (isTargetDarwin()) {
Modified: llvm/trunk/lib/Target/X86/X86TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetMachine.cpp?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetMachine.cpp Thu Jun 21 15:19:05 2018
@@ -156,7 +156,6 @@ static std::string computeDataLayout(con
}
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
- bool JIT,
Optional<Reloc::Model> RM) {
bool is64Bit = TT.getArch() == Triple::x86_64;
if (!RM.hasValue()) {
@@ -168,8 +167,6 @@ static Reloc::Model getEffectiveRelocMod
return Reloc::PIC_;
return Reloc::DynamicNoPIC;
}
- if (JIT)
- return Reloc::Static;
if (TT.isOSWindows() && is64Bit)
return Reloc::PIC_;
return Reloc::Static;
@@ -213,7 +210,7 @@ X86TargetMachine::X86TargetMachine(const
CodeGenOpt::Level OL, bool JIT)
: LLVMTargetMachine(
T, computeDataLayout(TT), TT, CPU, FS, Options,
- getEffectiveRelocModel(TT, JIT, RM),
+ getEffectiveRelocModel(TT, RM),
getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL),
TLOF(createTLOF(getTargetTriple())) {
// Windows stack unwinder gets confused when execution flow "falls through"
Modified: llvm/trunk/test/CodeGen/X86/cleanuppad-large-codemodel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cleanuppad-large-codemodel.ll?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cleanuppad-large-codemodel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cleanuppad-large-codemodel.ll Thu Jun 21 15:19:05 2018
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-pc-windows-msvc -code-model=large -relocation-model=static -o - < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-pc-windows-msvc -code-model=large -o - < %s | FileCheck %s
declare i32 @__CxxFrameHandler3(...)
Removed: llvm/trunk/test/CodeGen/X86/code-model.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/code-model.ll?rev=335299&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/code-model.ll (original)
+++ llvm/trunk/test/CodeGen/X86/code-model.ll (removed)
@@ -1,384 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; Run with --no_x86_scrub_rip because we care a lot about how globals are
-; accessed in the code model.
-
-; RUN: llc < %s -relocation-model=static -code-model=small | FileCheck %s --check-prefix=CHECK --check-prefix=SMALL-STATIC
-; RUN: llc < %s -relocation-model=static -code-model=medium | FileCheck %s --check-prefix=CHECK --check-prefix=MEDIUM-STATIC
-; RUN: llc < %s -relocation-model=static -code-model=large | FileCheck %s --check-prefix=CHECK --check-prefix=LARGE-STATIC
-; RUN: llc < %s -relocation-model=pic -code-model=small | FileCheck %s --check-prefix=CHECK --check-prefix=SMALL-PIC
-; RUN: llc < %s -relocation-model=pic -code-model=medium | FileCheck %s --check-prefix=CHECK --check-prefix=MEDIUM-PIC
-; RUN: llc < %s -relocation-model=pic -code-model=large | FileCheck %s --check-prefix=CHECK --check-prefix=LARGE-PIC
-
-; Generated from this C source:
-;
-; static int static_data[10];
-; int global_data[10] = {1, 2};
-; extern int extern_data[10];
-;
-; int *lea_static_data() { return &static_data[0]; }
-; int *lea_global_data() { return &global_data[0]; }
-; int *lea_extern_data() { return &extern_data[0]; }
-;
-; static void static_fn(void) {}
-; void global_fn(void) {}
-; void extern_fn(void);
-;
-; typedef void (*void_fn)(void);
-; void_fn lea_static_fn() { return &static_fn; }
-; void_fn lea_global_fn() { return &global_fn; }
-; void_fn lea_extern_fn() { return &extern_fn; }
-
-
-; ModuleID = 'model.c'
-source_filename = "model.c"
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64--linux"
-
- at global_data = dso_local global [10 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0], align 16
- at static_data = internal global [10 x i32] zeroinitializer, align 16
- at extern_data = external global [10 x i32], align 16
-
-define dso_local i32* @lea_static_data() #0 {
-; SMALL-STATIC-LABEL: lea_static_data:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $static_data, %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: lea_static_data:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $static_data, %rax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: lea_static_data:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $static_data, %rax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: lea_static_data:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: leaq static_data(%rip), %rax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: lea_static_data:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: leaq _GLOBAL_OFFSET_TABLE_(%rip), %rcx
-; MEDIUM-PIC-NEXT: movabsq $static_data at GOTOFF, %rax
-; MEDIUM-PIC-NEXT: addq %rcx, %rax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: lea_static_data:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp0:
-; LARGE-PIC-NEXT: leaq .Ltmp0(%rip), %rcx
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp0, %rax
-; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $static_data at GOTOFF, %rax
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: retq
- ret i32* getelementptr inbounds ([10 x i32], [10 x i32]* @static_data, i64 0, i64 0)
-}
-
-define dso_local i32* @lea_global_data() #0 {
-; SMALL-STATIC-LABEL: lea_global_data:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $global_data, %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: lea_global_data:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $global_data, %rax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: lea_global_data:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $global_data, %rax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: lea_global_data:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: leaq global_data(%rip), %rax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: lea_global_data:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: leaq _GLOBAL_OFFSET_TABLE_(%rip), %rcx
-; MEDIUM-PIC-NEXT: movabsq $global_data at GOTOFF, %rax
-; MEDIUM-PIC-NEXT: addq %rcx, %rax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: lea_global_data:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp1:
-; LARGE-PIC-NEXT: leaq .Ltmp1(%rip), %rcx
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp1, %rax
-; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $global_data at GOTOFF, %rax
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: retq
- ret i32* getelementptr inbounds ([10 x i32], [10 x i32]* @global_data, i64 0, i64 0)
-}
-
-define dso_local i32* @lea_extern_data() #0 {
-; SMALL-STATIC-LABEL: lea_extern_data:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $extern_data, %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: lea_extern_data:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $extern_data, %rax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: lea_extern_data:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $extern_data, %rax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: lea_extern_data:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: movq extern_data at GOTPCREL(%rip), %rax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: lea_extern_data:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: movq extern_data at GOTPCREL(%rip), %rax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: lea_extern_data:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp2:
-; LARGE-PIC-NEXT: leaq .Ltmp2(%rip), %rax
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp2, %rcx
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: movabsq $extern_data at GOT, %rcx
-; LARGE-PIC-NEXT: movq (%rax,%rcx), %rax
-; LARGE-PIC-NEXT: retq
- ret i32* getelementptr inbounds ([10 x i32], [10 x i32]* @extern_data, i64 0, i64 0)
-}
-
-define dso_local i32 @load_global_data() #0 {
-; SMALL-STATIC-LABEL: load_global_data:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl global_data+8(%rip), %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: load_global_data:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $global_data, %rax
-; MEDIUM-STATIC-NEXT: movl 8(%rax), %eax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: load_global_data:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $global_data, %rax
-; LARGE-STATIC-NEXT: movl 8(%rax), %eax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: load_global_data:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: movl global_data+8(%rip), %eax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: load_global_data:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: leaq _GLOBAL_OFFSET_TABLE_(%rip), %rax
-; MEDIUM-PIC-NEXT: movabsq $global_data at GOTOFF, %rcx
-; MEDIUM-PIC-NEXT: movl 8(%rax,%rcx), %eax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: load_global_data:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp3:
-; LARGE-PIC-NEXT: leaq .Ltmp3(%rip), %rax
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp3, %rcx
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: movabsq $global_data at GOTOFF, %rcx
-; LARGE-PIC-NEXT: movl 8(%rax,%rcx), %eax
-; LARGE-PIC-NEXT: retq
- %rv = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @global_data, i64 0, i64 2)
- ret i32 %rv
-}
-
-define dso_local i32 @load_extern_data() #0 {
-; SMALL-STATIC-LABEL: load_extern_data:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl extern_data+8(%rip), %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: load_extern_data:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $extern_data, %rax
-; MEDIUM-STATIC-NEXT: movl 8(%rax), %eax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: load_extern_data:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $extern_data, %rax
-; LARGE-STATIC-NEXT: movl 8(%rax), %eax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: load_extern_data:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: movq extern_data at GOTPCREL(%rip), %rax
-; SMALL-PIC-NEXT: movl 8(%rax), %eax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: load_extern_data:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: movq extern_data at GOTPCREL(%rip), %rax
-; MEDIUM-PIC-NEXT: movl 8(%rax), %eax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: load_extern_data:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp4:
-; LARGE-PIC-NEXT: leaq .Ltmp4(%rip), %rax
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp4, %rcx
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: movabsq $extern_data at GOT, %rcx
-; LARGE-PIC-NEXT: movq (%rax,%rcx), %rax
-; LARGE-PIC-NEXT: movl 8(%rax), %eax
-; LARGE-PIC-NEXT: retq
- %rv = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @extern_data, i64 0, i64 2)
- ret i32 %rv
-}
-
-define dso_local void @global_fn() #0 {
-; CHECK-LABEL: global_fn:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- ret void
-}
-
-define internal void @static_fn() #0 {
-; CHECK-LABEL: static_fn:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- ret void
-}
-
-declare void @extern_fn()
-
-define dso_local void ()* @lea_static_fn() #0 {
-; SMALL-STATIC-LABEL: lea_static_fn:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $static_fn, %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: lea_static_fn:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $static_fn, %rax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: lea_static_fn:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $static_fn, %rax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: lea_static_fn:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: leaq static_fn(%rip), %rax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: lea_static_fn:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: movabsq $static_fn, %rax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: lea_static_fn:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp5:
-; LARGE-PIC-NEXT: leaq .Ltmp5(%rip), %rcx
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp5, %rax
-; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $static_fn at GOTOFF, %rax
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: retq
- ret void ()* @static_fn
-}
-
-define dso_local void ()* @lea_global_fn() #0 {
-; SMALL-STATIC-LABEL: lea_global_fn:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $global_fn, %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: lea_global_fn:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $global_fn, %rax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: lea_global_fn:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $global_fn, %rax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: lea_global_fn:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: leaq global_fn(%rip), %rax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: lea_global_fn:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: movabsq $global_fn, %rax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: lea_global_fn:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp6:
-; LARGE-PIC-NEXT: leaq .Ltmp6(%rip), %rcx
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp6, %rax
-; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $global_fn at GOTOFF, %rax
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: retq
- ret void ()* @global_fn
-}
-
-define dso_local void ()* @lea_extern_fn() #0 {
-; SMALL-STATIC-LABEL: lea_extern_fn:
-; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $extern_fn, %eax
-; SMALL-STATIC-NEXT: retq
-;
-; MEDIUM-STATIC-LABEL: lea_extern_fn:
-; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $extern_fn, %rax
-; MEDIUM-STATIC-NEXT: retq
-;
-; LARGE-STATIC-LABEL: lea_extern_fn:
-; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $extern_fn, %rax
-; LARGE-STATIC-NEXT: retq
-;
-; SMALL-PIC-LABEL: lea_extern_fn:
-; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: movq extern_fn at GOTPCREL(%rip), %rax
-; SMALL-PIC-NEXT: retq
-;
-; MEDIUM-PIC-LABEL: lea_extern_fn:
-; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: movq extern_fn at GOTPCREL(%rip), %rax
-; MEDIUM-PIC-NEXT: retq
-;
-; LARGE-PIC-LABEL: lea_extern_fn:
-; LARGE-PIC: # %bb.0:
-; LARGE-PIC-NEXT: .Ltmp7:
-; LARGE-PIC-NEXT: leaq .Ltmp7(%rip), %rax
-; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.Ltmp7, %rcx
-; LARGE-PIC-NEXT: addq %rcx, %rax
-; LARGE-PIC-NEXT: movabsq $extern_fn at GOT, %rcx
-; LARGE-PIC-NEXT: movq (%rax,%rcx), %rax
-; LARGE-PIC-NEXT: retq
- ret void ()* @extern_fn
-}
-
-attributes #0 = { noinline nounwind uwtable }
-
-!llvm.module.flags = !{!0, !1, !2}
-!llvm.ident = !{!3}
-
-!0 = !{i32 1, !"wchar_size", i32 4}
-!1 = !{i32 7, !"PIC Level", i32 2}
-!2 = !{i32 7, !"PIE Level", i32 2}
-!3 = !{!"clang version 7.0.0 "}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-call-cleanup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-call-cleanup.ll?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-call-cleanup.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-call-cleanup.ll Thu Jun 21 15:19:05 2018
@@ -1,4 +1,4 @@
-; RUN: llc -fast-isel-sink-local-values -fast-isel -O0 -code-model=large -mcpu=generic -mtriple=x86_64-linux -relocation-model=static < %s | FileCheck %s
+; RUN: llc -fast-isel-sink-local-values -fast-isel -O0 -code-model=large -mcpu=generic -mtriple=x86_64-apple-darwin10 -relocation-model=pic < %s | FileCheck %s
; Check that fast-isel cleans up when it fails to lower a call instruction.
define void @fastiselcall() {
@@ -9,7 +9,7 @@ entry:
; FastISel's local value code was dead, so it's gone.
; CHECK-NOT: movl $42,
; SDag-ISel's arg mov:
-; CHECK: movabsq $targetfn, %[[REG:[^ ]*]]
+; CHECK: movabsq $_targetfn, %[[REG:[^ ]*]]
; CHECK: movl $42, %edi
; CHECK: callq *%[[REG]]
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll Thu Jun 21 15:19:05 2018
@@ -16,11 +16,7 @@ define float @constpool_float(float %x)
;
; LARGE-LABEL: constpool_float:
; LARGE: ## %bb.0:
-; LARGE-NEXT: Ltmp0:
-; LARGE-NEXT: leaq {{.*}}(%rip), %rax
-; LARGE-NEXT: movabsq $__GLOBAL_OFFSET_TABLE_-Ltmp0, %rcx
-; LARGE-NEXT: addq %rcx, %rax
-; LARGE-NEXT: movabsq $LCPI0_0 at GOTOFF, %rax
+; LARGE-NEXT: movabsq $LCPI0_0, %rax
; LARGE-NEXT: addss (%rax), %xmm0
; LARGE-NEXT: retq
;
@@ -32,11 +28,7 @@ define float @constpool_float(float %x)
;
; LARGE_AVX-LABEL: constpool_float:
; LARGE_AVX: ## %bb.0:
-; LARGE_AVX-NEXT: Ltmp0:
-; LARGE_AVX-NEXT: leaq {{.*}}(%rip), %rax
-; LARGE_AVX-NEXT: movabsq $__GLOBAL_OFFSET_TABLE_-Ltmp0, %rcx
-; LARGE_AVX-NEXT: addq %rcx, %rax
-; LARGE_AVX-NEXT: movabsq $LCPI0_0 at GOTOFF, %rax
+; LARGE_AVX-NEXT: movabsq $LCPI0_0, %rax
; LARGE_AVX-NEXT: vaddss (%rax), %xmm0, %xmm0
; LARGE_AVX-NEXT: retq
@@ -53,11 +45,7 @@ define double @constpool_double(double %
;
; LARGE-LABEL: constpool_double:
; LARGE: ## %bb.0:
-; LARGE-NEXT: Ltmp1:
-; LARGE-NEXT: leaq {{.*}}(%rip), %rax
-; LARGE-NEXT: movabsq $__GLOBAL_OFFSET_TABLE_-Ltmp1, %rcx
-; LARGE-NEXT: addq %rcx, %rax
-; LARGE-NEXT: movabsq $LCPI1_0 at GOTOFF, %rax
+; LARGE-NEXT: movabsq $LCPI1_0, %rax
; LARGE-NEXT: addsd (%rax), %xmm0
; LARGE-NEXT: retq
;
@@ -69,11 +57,7 @@ define double @constpool_double(double %
;
; LARGE_AVX-LABEL: constpool_double:
; LARGE_AVX: ## %bb.0:
-; LARGE_AVX-NEXT: Ltmp1:
-; LARGE_AVX-NEXT: leaq {{.*}}(%rip), %rax
-; LARGE_AVX-NEXT: movabsq $__GLOBAL_OFFSET_TABLE_-Ltmp1, %rcx
-; LARGE_AVX-NEXT: addq %rcx, %rax
-; LARGE_AVX-NEXT: movabsq $LCPI1_0 at GOTOFF, %rax
+; LARGE_AVX-NEXT: movabsq $LCPI1_0, %rax
; LARGE_AVX-NEXT: vaddsd (%rax), %xmm0, %xmm0
; LARGE_AVX-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/hipe-cc64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hipe-cc64.ll?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hipe-cc64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hipe-cc64.ll Thu Jun 21 15:19:05 2018
@@ -1,4 +1,4 @@
-; RUN: llc < %s -stack-symbol-ordering=0 -tailcallopt -relocation-model=static -code-model=medium -stack-alignment=8 -mtriple=x86_64-linux-gnu -mcpu=opteron | FileCheck %s
+; RUN: llc < %s -stack-symbol-ordering=0 -tailcallopt -code-model=medium -stack-alignment=8 -mtriple=x86_64-linux-gnu -mcpu=opteron | FileCheck %s
; Check the HiPE calling convention works (x86-64)
Modified: llvm/trunk/utils/UpdateTestChecks/asm.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/UpdateTestChecks/asm.py?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/utils/UpdateTestChecks/asm.py (original)
+++ llvm/trunk/utils/UpdateTestChecks/asm.py Thu Jun 21 15:19:05 2018
@@ -110,9 +110,8 @@ def scrub_asm_x86(asm, args):
asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r'{{[-0-9]+}}(%\1{{[sb]}}p)\2', asm)
# Generically match the stack offset of a memory operand.
asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
- if getattr(args, 'x86_scrub_rip', False):
- # Generically match a RIP-relative memory operand.
- asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
+ # Generically match a RIP-relative memory operand.
+ asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
# Generically match a LCP symbol.
asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm)
if getattr(args, 'extra_scrub', False):
Modified: llvm/trunk/utils/update_llc_test_checks.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/update_llc_test_checks.py?rev=335300&r1=335299&r2=335300&view=diff
==============================================================================
--- llvm/trunk/utils/update_llc_test_checks.py (original)
+++ llvm/trunk/utils/update_llc_test_checks.py Thu Jun 21 15:19:05 2018
@@ -30,11 +30,6 @@ def main():
parser.add_argument(
'--extra_scrub', action='store_true',
help='Always use additional regex to further reduce diffs between various subtargets')
- parser.add_argument(
- '--x86_scrub_rip', action='store_true', default=True,
- help='Use more regex for x86 matching to reduce diffs between various subtargets')
- parser.add_argument(
- '--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip')
parser.add_argument('tests', nargs='+')
args = parser.parse_args()
More information about the llvm-commits
mailing list