[llvm] 5b22bcc - [X86][ELF] Prefer to lower MC_GlobalAddress operands to .Lfoo$local
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 30 17:53:00 PST 2020
Author: Fangrui Song
Date: 2020-01-30T17:52:35-08:00
New Revision: 5b22bcc2b70d82c73ce5428170dcc2038a729d74
URL: https://github.com/llvm/llvm-project/commit/5b22bcc2b70d82c73ce5428170dcc2038a729d74
DIFF: https://github.com/llvm/llvm-project/commit/5b22bcc2b70d82c73ce5428170dcc2038a729d74.diff
LOG: [X86][ELF] Prefer to lower MC_GlobalAddress operands to .Lfoo$local
For a MC_GlobalAddress reference to a dso_local external GlobalValue with a definition, emit .Lfoo$local to avoid a relocation.
-fno-pic and -fpie can infer dso_local but -fpic cannot. In the future,
we can explore the possibility of inferring dso_local with -fpic. As the
description of D73228 says, LLVM's existing IPO optimization behaviors
(like -fno-semantic-interposition) and a previous assembly behavior give
us enough license to be aggressive here.
Reviewed By: rnk
Differential Revision: https://reviews.llvm.org/D73230
Added:
Modified:
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/test/CodeGen/X86/code-model-elf.ll
llvm/test/CodeGen/X86/emutls.ll
llvm/test/CodeGen/X86/fold-add-pcrel.ll
llvm/test/CodeGen/X86/linux-preemption.ll
llvm/test/CodeGen/X86/oddsubvector.ll
llvm/test/CodeGen/X86/pr38795.ll
llvm/test/CodeGen/X86/tailcallpic1.ll
llvm/test/CodeGen/X86/tailcallpic3.ll
llvm/test/CodeGen/X86/tailccpic1.ll
llvm/test/CodeGen/X86/tls.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 44576a07d642..2eab23543820 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -137,6 +137,10 @@ MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
/// operand to an MCSymbol.
MCSymbol *X86MCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const {
+ const Triple &TT = TM.getTargetTriple();
+ if (MO.isGlobal() && TT.isOSBinFormatELF())
+ return AsmPrinter.getSymbolPreferLocal(*MO.getGlobal());
+
const DataLayout &DL = MF.getDataLayout();
assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) &&
"Isn't a symbol reference");
diff --git a/llvm/test/CodeGen/X86/code-model-elf.ll b/llvm/test/CodeGen/X86/code-model-elf.ll
index f7ffd6ea1eb7..748e2b0267d8 100644
--- a/llvm/test/CodeGen/X86/code-model-elf.ll
+++ b/llvm/test/CodeGen/X86/code-model-elf.ll
@@ -83,28 +83,28 @@ define dso_local i32* @lea_static_data() #0 {
define dso_local i32* @lea_global_data() #0 {
; SMALL-STATIC-LABEL: lea_global_data:
; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $global_data, %eax
+; SMALL-STATIC-NEXT: movl $.Lglobal_data$local, %eax
; SMALL-STATIC-NEXT: retq
;
; MEDIUM-STATIC-LABEL: lea_global_data:
; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $global_data, %rax
+; MEDIUM-STATIC-NEXT: movabsq $.Lglobal_data$local, %rax
; MEDIUM-STATIC-NEXT: retq
;
; LARGE-STATIC-LABEL: lea_global_data:
; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $global_data, %rax
+; LARGE-STATIC-NEXT: movabsq $.Lglobal_data$local, %rax
; LARGE-STATIC-NEXT: retq
;
; SMALL-PIC-LABEL: lea_global_data:
; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: leaq global_data(%rip), %rax
+; SMALL-PIC-NEXT: leaq .Lglobal_data$local(%rip), %rax
; SMALL-PIC-NEXT: retq
;
; MEDIUM-PIC-LABEL: lea_global_data:
; MEDIUM-PIC: # %bb.0:
; MEDIUM-PIC-NEXT: leaq _GLOBAL_OFFSET_TABLE_(%rip), %rcx
-; MEDIUM-PIC-NEXT: movabsq $global_data at GOTOFF, %rax
+; MEDIUM-PIC-NEXT: movabsq $.Lglobal_data$local at GOTOFF, %rax
; MEDIUM-PIC-NEXT: addq %rcx, %rax
; MEDIUM-PIC-NEXT: retq
;
@@ -114,7 +114,7 @@ define dso_local i32* @lea_global_data() #0 {
; LARGE-PIC-NEXT: leaq .L1$pb(%rip), %rax
; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L1$pb, %rcx
; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $global_data at GOTOFF, %rax
+; LARGE-PIC-NEXT: movabsq $.Lglobal_data$local at GOTOFF, %rax
; LARGE-PIC-NEXT: addq %rcx, %rax
; LARGE-PIC-NEXT: retq
ret i32* getelementptr inbounds ([10 x i32], [10 x i32]* @global_data, i64 0, i64 0)
@@ -161,30 +161,30 @@ define dso_local i32* @lea_extern_data() #0 {
define dso_local i32 @load_global_data() #0 {
; SMALL-STATIC-LABEL: load_global_data:
; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl global_data+8(%rip), %eax
+; SMALL-STATIC-NEXT: movl .Lglobal_data$local+8(%rip), %eax
; SMALL-STATIC-NEXT: retq
;
; MEDIUM-STATIC-LABEL: load_global_data:
; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $global_data, %rax
+; MEDIUM-STATIC-NEXT: movabsq $.Lglobal_data$local, %rax
; MEDIUM-STATIC-NEXT: movl 8(%rax), %eax
; MEDIUM-STATIC-NEXT: retq
;
; LARGE-STATIC-LABEL: load_global_data:
; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $global_data, %rax
+; LARGE-STATIC-NEXT: movabsq $.Lglobal_data$local, %rax
; LARGE-STATIC-NEXT: movl 8(%rax), %eax
; LARGE-STATIC-NEXT: retq
;
; SMALL-PIC-LABEL: load_global_data:
; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: movl global_data+8(%rip), %eax
+; SMALL-PIC-NEXT: movl .Lglobal_data$local+8(%rip), %eax
; SMALL-PIC-NEXT: retq
;
; MEDIUM-PIC-LABEL: load_global_data:
; MEDIUM-PIC: # %bb.0:
; MEDIUM-PIC-NEXT: leaq _GLOBAL_OFFSET_TABLE_(%rip), %rax
-; MEDIUM-PIC-NEXT: movabsq $global_data at GOTOFF, %rcx
+; MEDIUM-PIC-NEXT: movabsq $.Lglobal_data$local at GOTOFF, %rcx
; MEDIUM-PIC-NEXT: movl 8(%rax,%rcx), %eax
; MEDIUM-PIC-NEXT: retq
;
@@ -194,7 +194,7 @@ define dso_local i32 @load_global_data() #0 {
; LARGE-PIC-NEXT: leaq .L3$pb(%rip), %rax
; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L3$pb, %rcx
; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $global_data at GOTOFF, %rax
+; LARGE-PIC-NEXT: movabsq $.Lglobal_data$local at GOTOFF, %rax
; LARGE-PIC-NEXT: movl 8(%rcx,%rax), %eax
; LARGE-PIC-NEXT: retq
%rv = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @global_data, i64 0, i64 2)
@@ -302,27 +302,27 @@ define dso_local void ()* @lea_static_fn() #0 {
define dso_local void ()* @lea_global_fn() #0 {
; SMALL-STATIC-LABEL: lea_global_fn:
; SMALL-STATIC: # %bb.0:
-; SMALL-STATIC-NEXT: movl $global_fn, %eax
+; SMALL-STATIC-NEXT: movl $.Lglobal_fn$local, %eax
; SMALL-STATIC-NEXT: retq
;
; MEDIUM-STATIC-LABEL: lea_global_fn:
; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq $global_fn, %rax
+; MEDIUM-STATIC-NEXT: movabsq $.Lglobal_fn$local, %rax
; MEDIUM-STATIC-NEXT: retq
;
; LARGE-STATIC-LABEL: lea_global_fn:
; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq $global_fn, %rax
+; LARGE-STATIC-NEXT: movabsq $.Lglobal_fn$local, %rax
; LARGE-STATIC-NEXT: retq
;
; SMALL-PIC-LABEL: lea_global_fn:
; SMALL-PIC: # %bb.0:
-; SMALL-PIC-NEXT: leaq global_fn(%rip), %rax
+; SMALL-PIC-NEXT: leaq .Lglobal_fn$local(%rip), %rax
; SMALL-PIC-NEXT: retq
;
; MEDIUM-PIC-LABEL: lea_global_fn:
; MEDIUM-PIC: # %bb.0:
-; MEDIUM-PIC-NEXT: movabsq $global_fn, %rax
+; MEDIUM-PIC-NEXT: movabsq $.Lglobal_fn$local, %rax
; MEDIUM-PIC-NEXT: retq
;
; LARGE-PIC-LABEL: lea_global_fn:
@@ -331,7 +331,7 @@ define dso_local void ()* @lea_global_fn() #0 {
; LARGE-PIC-NEXT: leaq .L8$pb(%rip), %rax
; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L8$pb, %rcx
; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq $global_fn at GOTOFF, %rax
+; LARGE-PIC-NEXT: movabsq $.Lglobal_fn$local at GOTOFF, %rax
; LARGE-PIC-NEXT: addq %rcx, %rax
; LARGE-PIC-NEXT: retq
ret void ()* @global_fn
diff --git a/llvm/test/CodeGen/X86/emutls.ll b/llvm/test/CodeGen/X86/emutls.ll
index fecf8fdba4b7..67e80a360c05 100644
--- a/llvm/test/CodeGen/X86/emutls.ll
+++ b/llvm/test/CodeGen/X86/emutls.ll
@@ -139,7 +139,7 @@ entry:
define i32 @f7() {
; X32-LABEL: f7:
-; X32: movl $__emutls_v.i4, (%esp)
+; X32: movl $.L__emutls_v.i4$local, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $12, %esp
@@ -153,7 +153,7 @@ entry:
define i32* @f8() {
; X32-LABEL: f8:
-; X32: movl $__emutls_v.i4, (%esp)
+; X32: movl $.L__emutls_v.i4$local, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: addl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 4
diff --git a/llvm/test/CodeGen/X86/fold-add-pcrel.ll b/llvm/test/CodeGen/X86/fold-add-pcrel.ll
index 745a641f70ff..27a16b0ce61e 100644
--- a/llvm/test/CodeGen/X86/fold-add-pcrel.ll
+++ b/llvm/test/CodeGen/X86/fold-add-pcrel.ll
@@ -3,7 +3,7 @@
; RUN: llc -mtriple=x86_64 -code-model=medium -relocation-model=static < %s | FileCheck --check-prefixes=CHECK,MSTATIC %s
; RUN: llc -mtriple=x86_64 -code-model=medium -relocation-model=pic < %s | FileCheck --check-prefixes=CHECK,MPIC %s
- at foo = dso_local global i32 0
+ at foo = internal global i32 0
define dso_local i64 @zero() {
; CHECK-LABEL: zero:
diff --git a/llvm/test/CodeGen/X86/linux-preemption.ll b/llvm/test/CodeGen/X86/linux-preemption.ll
index 5305b7a1f534..5de8587038e7 100644
--- a/llvm/test/CodeGen/X86/linux-preemption.ll
+++ b/llvm/test/CodeGen/X86/linux-preemption.ll
@@ -40,9 +40,9 @@ define i32* @get_external_default_global() {
define i32* @get_strong_local_global() {
ret i32* @strong_local_global
}
-; CHECK: leaq strong_local_global(%rip), %rax
-; STATIC: movl $strong_local_global, %eax
-; CHECK32: leal strong_local_global at GOTOFF(%eax), %eax
+; CHECK: leaq .Lstrong_local_global$local(%rip), %rax
+; STATIC: movl $.Lstrong_local_global$local, %eax
+; CHECK32: leal .Lstrong_local_global$local at GOTOFF(%eax), %eax
@weak_local_global = weak dso_local global i32 42
define i32* @get_weak_local_global() {
@@ -175,9 +175,9 @@ define void()* @get_strong_local_function() {
}
; COMMON: {{^}}strong_local_function:
; COMMON-NEXT .Lstrong_local_function:
-; CHECK: leaq strong_local_function(%rip), %rax
-; STATIC: movl $strong_local_function, %eax
-; CHECK32: leal strong_local_function at GOTOFF(%eax), %eax
+; CHECK: leaq .Lstrong_local_function$local(%rip), %rax
+; STATIC: movl $.Lstrong_local_function$local, %eax
+; CHECK32: leal .Lstrong_local_function$local at GOTOFF(%eax), %eax
define weak dso_local void @weak_local_function() {
ret void
diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll
index 674c79de4c73..42c3137a905f 100644
--- a/llvm/test/CodeGen/X86/oddsubvector.ll
+++ b/llvm/test/CodeGen/X86/oddsubvector.ll
@@ -187,8 +187,8 @@ define <16 x i32> @PR42819(<8 x i32>* %a0) {
define void @PR42833() {
; SSE2-LABEL: PR42833:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm1
-; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm0
+; SSE2-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm0
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: addl {{.*}}(%rip), %eax
; SSE2-NEXT: movd %eax, %xmm2
@@ -208,34 +208,34 @@ define void @PR42833() {
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm4[0],xmm5[1,2,3]
-; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm3
+; SSE2-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm3
; SSE2-NEXT: psubd %xmm1, %xmm3
; SSE2-NEXT: paddd %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
-; SSE2-NEXT: movaps %xmm5, c+{{.*}}(%rip)
-; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm1
-; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm4
-; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm5
-; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm6
-; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm7
+; SSE2-NEXT: movdqa %xmm1, .Lc$local+{{.*}}(%rip)
+; SSE2-NEXT: movaps %xmm5, .Lc$local+{{.*}}(%rip)
+; SSE2-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm4
+; SSE2-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm5
+; SSE2-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm6
+; SSE2-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm7
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE2-NEXT: psubd %xmm0, %xmm7
; SSE2-NEXT: psubd %xmm4, %xmm6
; SSE2-NEXT: psubd %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm5, d+{{.*}}(%rip)
-; SSE2-NEXT: movdqa %xmm6, d+{{.*}}(%rip)
-; SSE2-NEXT: movdqa %xmm3, d+{{.*}}(%rip)
-; SSE2-NEXT: movdqa %xmm7, d+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm5, .Ld$local+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm6, .Ld$local+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm3, .Ld$local+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm7, .Ld$local+{{.*}}(%rip)
; SSE2-NEXT: paddd %xmm4, %xmm4
; SSE2-NEXT: paddd %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
-; SSE2-NEXT: movdqa %xmm4, c+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm1, .Lc$local+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm4, .Lc$local+{{.*}}(%rip)
; SSE2-NEXT: retq
;
; SSE42-LABEL: PR42833:
; SSE42: # %bb.0:
-; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm1
-; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm0
+; SSE42-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm1
+; SSE42-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm0
; SSE42-NEXT: movd %xmm0, %eax
; SSE42-NEXT: addl {{.*}}(%rip), %eax
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = <u,1,1,1>
@@ -247,39 +247,39 @@ define void @PR42833() {
; SSE42-NEXT: cvttps2dq %xmm2, %xmm2
; SSE42-NEXT: pmulld %xmm0, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3,4,5,6,7]
-; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm3
+; SSE42-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm3
; SSE42-NEXT: psubd %xmm1, %xmm3
; SSE42-NEXT: paddd %xmm1, %xmm1
-; SSE42-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
-; SSE42-NEXT: movdqa %xmm2, c+{{.*}}(%rip)
-; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm1
-; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm2
-; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm4
-; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm5
-; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm6
+; SSE42-NEXT: movdqa %xmm1, .Lc$local+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm2, .Lc$local+{{.*}}(%rip)
+; SSE42-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm1
+; SSE42-NEXT: movdqa .Lc$local+{{.*}}(%rip), %xmm2
+; SSE42-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm4
+; SSE42-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm5
+; SSE42-NEXT: movdqa .Ld$local+{{.*}}(%rip), %xmm6
; SSE42-NEXT: pinsrd $0, %eax, %xmm0
; SSE42-NEXT: psubd %xmm0, %xmm6
; SSE42-NEXT: psubd %xmm2, %xmm5
; SSE42-NEXT: psubd %xmm1, %xmm4
-; SSE42-NEXT: movdqa %xmm4, d+{{.*}}(%rip)
-; SSE42-NEXT: movdqa %xmm5, d+{{.*}}(%rip)
-; SSE42-NEXT: movdqa %xmm3, d+{{.*}}(%rip)
-; SSE42-NEXT: movdqa %xmm6, d+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm4, .Ld$local+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm5, .Ld$local+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm3, .Ld$local+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm6, .Ld$local+{{.*}}(%rip)
; SSE42-NEXT: paddd %xmm2, %xmm2
; SSE42-NEXT: paddd %xmm1, %xmm1
-; SSE42-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
-; SSE42-NEXT: movdqa %xmm2, c+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm1, .Lc$local+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm2, .Lc$local+{{.*}}(%rip)
; SSE42-NEXT: retq
;
; AVX1-LABEL: PR42833:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm0
+; AVX1-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: addl {{.*}}(%rip), %eax
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <u,1,1,1>
; AVX1-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; AVX1-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
@@ -287,111 +287,111 @@ define void @PR42833() {
; AVX1-NEXT: vpslld $1, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7]
-; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm2
-; AVX1-NEXT: vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vmovups %ymm1, c+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm2
+; AVX1-NEXT: vpsubd .Lc$local+{{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vmovups %ymm1, .Lc$local+{{.*}}(%rip)
; AVX1-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
+; AVX1-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
-; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; AVX1-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
+; AVX1-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm4
-; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm5
+; AVX1-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm4
+; AVX1-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm5
; AVX1-NEXT: vpsubd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vmovdqa %xmm2, d+{{.*}}(%rip)
-; AVX1-NEXT: vmovdqa %xmm4, d+{{.*}}(%rip)
-; AVX1-NEXT: vmovdqa %xmm1, d+{{.*}}(%rip)
-; AVX1-NEXT: vmovdqa %xmm0, d+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm2, .Ld$local+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm4, .Ld$local+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm1, .Ld$local+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm0, .Ld$local+{{.*}}(%rip)
; AVX1-NEXT: vpaddd %xmm3, %xmm3, %xmm0
; AVX1-NEXT: vpaddd %xmm5, %xmm5, %xmm1
-; AVX1-NEXT: vmovdqa %xmm1, c+{{.*}}(%rip)
-; AVX1-NEXT: vmovdqa %xmm0, c+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm1, .Lc$local+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm0, .Lc$local+{{.*}}(%rip)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR42833:
; AVX2: # %bb.0:
; AVX2-NEXT: movl {{.*}}(%rip), %eax
-; AVX2-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0
-; AVX2-NEXT: addl c+{{.*}}(%rip), %eax
+; AVX2-NEXT: vmovdqu .Lc$local+{{.*}}(%rip), %ymm0
+; AVX2-NEXT: addl .Lc$local+{{.*}}(%rip), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],mem[1,2,3,4,5,6,7]
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3,4,5,6,7]
-; AVX2-NEXT: vmovdqu %ymm2, c+{{.*}}(%rip)
-; AVX2-NEXT: vmovdqu c+{{.*}}(%rip), %ymm2
-; AVX2-NEXT: vmovdqu d+{{.*}}(%rip), %ymm3
-; AVX2-NEXT: vmovdqu d+{{.*}}(%rip), %ymm4
+; AVX2-NEXT: vmovdqu %ymm2, .Lc$local+{{.*}}(%rip)
+; AVX2-NEXT: vmovdqu .Lc$local+{{.*}}(%rip), %ymm2
+; AVX2-NEXT: vmovdqu .Ld$local+{{.*}}(%rip), %ymm3
+; AVX2-NEXT: vmovdqu .Ld$local+{{.*}}(%rip), %ymm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; AVX2-NEXT: vpsubd %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpsubd %ymm2, %ymm3, %ymm1
-; AVX2-NEXT: vmovdqu %ymm1, d+{{.*}}(%rip)
-; AVX2-NEXT: vmovdqu %ymm0, d+{{.*}}(%rip)
+; AVX2-NEXT: vmovdqu %ymm1, .Ld$local+{{.*}}(%rip)
+; AVX2-NEXT: vmovdqu %ymm0, .Ld$local+{{.*}}(%rip)
; AVX2-NEXT: vpaddd %ymm2, %ymm2, %ymm0
-; AVX2-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX2-NEXT: vmovdqu %ymm0, .Lc$local+{{.*}}(%rip)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR42833:
; AVX512: # %bb.0:
; AVX512-NEXT: movl {{.*}}(%rip), %eax
-; AVX512-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0
-; AVX512-NEXT: vmovdqu64 c+{{.*}}(%rip), %zmm1
-; AVX512-NEXT: addl c+{{.*}}(%rip), %eax
+; AVX512-NEXT: vmovdqu .Lc$local+{{.*}}(%rip), %ymm0
+; AVX512-NEXT: vmovdqu64 .Lc$local+{{.*}}(%rip), %zmm1
+; AVX512-NEXT: addl .Lc$local+{{.*}}(%rip), %eax
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],mem[1,2,3,4,5,6,7]
; AVX512-NEXT: vpaddd %ymm2, %ymm0, %ymm3
; AVX512-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7]
-; AVX512-NEXT: vmovdqa c+{{.*}}(%rip), %xmm2
-; AVX512-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip)
-; AVX512-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0
-; AVX512-NEXT: vmovdqu64 d+{{.*}}(%rip), %zmm3
+; AVX512-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm2
+; AVX512-NEXT: vmovdqu %ymm0, .Lc$local+{{.*}}(%rip)
+; AVX512-NEXT: vmovdqu .Lc$local+{{.*}}(%rip), %ymm0
+; AVX512-NEXT: vmovdqu64 .Ld$local+{{.*}}(%rip), %zmm3
; AVX512-NEXT: vpinsrd $0, %eax, %xmm2, %xmm2
; AVX512-NEXT: vinserti32x4 $0, %xmm2, %zmm1, %zmm1
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm1
; AVX512-NEXT: vpsubd %zmm1, %zmm3, %zmm1
-; AVX512-NEXT: vmovdqu64 %zmm1, d+{{.*}}(%rip)
+; AVX512-NEXT: vmovdqu64 %zmm1, .Ld$local+{{.*}}(%rip)
; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0
-; AVX512-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX512-NEXT: vmovdqu %ymm0, .Lc$local+{{.*}}(%rip)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; XOP-LABEL: PR42833:
; XOP: # %bb.0:
-; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm0
+; XOP-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
; XOP-NEXT: addl {{.*}}(%rip), %eax
; XOP-NEXT: vmovdqa {{.*#+}} xmm1 = <u,1,1,1>
; XOP-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm2
-; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; XOP-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
; XOP-NEXT: vpshld %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpslld $1, %xmm3, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7]
-; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm2
-; XOP-NEXT: vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT: vmovups %ymm1, c+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm2
+; XOP-NEXT: vpsubd .Lc$local+{{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT: vmovups %ymm1, .Lc$local+{{.*}}(%rip)
; XOP-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
-; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
+; XOP-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
; XOP-NEXT: vpsubd %xmm0, %xmm1, %xmm0
-; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
-; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; XOP-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
+; XOP-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
; XOP-NEXT: vpsubd %xmm3, %xmm1, %xmm1
-; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm4
-; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm5
+; XOP-NEXT: vmovdqa .Ld$local+{{.*}}(%rip), %xmm4
+; XOP-NEXT: vmovdqa .Lc$local+{{.*}}(%rip), %xmm5
; XOP-NEXT: vpsubd %xmm5, %xmm4, %xmm4
-; XOP-NEXT: vmovdqa %xmm2, d+{{.*}}(%rip)
-; XOP-NEXT: vmovdqa %xmm4, d+{{.*}}(%rip)
-; XOP-NEXT: vmovdqa %xmm1, d+{{.*}}(%rip)
-; XOP-NEXT: vmovdqa %xmm0, d+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm2, .Ld$local+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm4, .Ld$local+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm1, .Ld$local+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm0, .Ld$local+{{.*}}(%rip)
; XOP-NEXT: vpaddd %xmm3, %xmm3, %xmm0
; XOP-NEXT: vpaddd %xmm5, %xmm5, %xmm1
-; XOP-NEXT: vmovdqa %xmm1, c+{{.*}}(%rip)
-; XOP-NEXT: vmovdqa %xmm0, c+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm1, .Lc$local+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm0, .Lc$local+{{.*}}(%rip)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%1 = load i32, i32* @b, align 4
diff --git a/llvm/test/CodeGen/X86/pr38795.ll b/llvm/test/CodeGen/X86/pr38795.ll
index d805dcad8b6e..3c44798a805f 100644
--- a/llvm/test/CodeGen/X86/pr38795.ll
+++ b/llvm/test/CodeGen/X86/pr38795.ll
@@ -93,7 +93,7 @@ define dso_local void @fn() {
; CHECK-NEXT: # %bb.18: # %if.then41
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl $fn, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl $.Lfn$local, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $.str, (%esp)
; CHECK-NEXT: calll printf
; CHECK-NEXT: .LBB0_19: # %for.end46
diff --git a/llvm/test/CodeGen/X86/tailcallpic1.ll b/llvm/test/CodeGen/X86/tailcallpic1.ll
index ed101fcccd2d..717cc1fddec9 100644
--- a/llvm/test/CodeGen/X86/tailcallpic1.ll
+++ b/llvm/test/CodeGen/X86/tailcallpic1.ll
@@ -12,5 +12,5 @@ define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
entry:
%tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
ret i32 %tmp11
-; CHECK: jmp tailcallee
+; CHECK: jmp .Ltailcallee$local
}
diff --git a/llvm/test/CodeGen/X86/tailcallpic3.ll b/llvm/test/CodeGen/X86/tailcallpic3.ll
index edc58052d82f..13b160aae2f6 100644
--- a/llvm/test/CodeGen/X86/tailcallpic3.ll
+++ b/llvm/test/CodeGen/X86/tailcallpic3.ll
@@ -16,7 +16,7 @@ entry:
ret void
}
; CHECK: tailcall_hidden:
-; CHECK: jmp tailcallee_hidden
+; CHECK: jmp .Ltailcallee_hidden$local
define internal void @tailcallee_internal() {
entry:
diff --git a/llvm/test/CodeGen/X86/tailccpic1.ll b/llvm/test/CodeGen/X86/tailccpic1.ll
index de8f2219bc2f..dbdc56aa61c7 100644
--- a/llvm/test/CodeGen/X86/tailccpic1.ll
+++ b/llvm/test/CodeGen/X86/tailccpic1.ll
@@ -12,5 +12,5 @@ define tailcc i32 @tailcaller(i32 %in1, i32 %in2) {
entry:
%tmp11 = tail call tailcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
ret i32 %tmp11
-; CHECK: jmp tailcallee
+; CHECK: jmp .Ltailcallee$local
}
diff --git a/llvm/test/CodeGen/X86/tls.ll b/llvm/test/CodeGen/X86/tls.ll
index 759f3d7c8550..b1d29b34a958 100644
--- a/llvm/test/CodeGen/X86/tls.ll
+++ b/llvm/test/CodeGen/X86/tls.ll
@@ -210,10 +210,10 @@ entry:
define i32 @f7() {
; X86_LINUX-LABEL: f7:
-; X86_LINUX: movl %gs:i4 at NTPOFF, %eax
+; X86_LINUX: movl %gs:.Li4$local at NTPOFF, %eax
; X86_LINUX-NEXT: ret
; X64_LINUX-LABEL: f7:
-; X64_LINUX: movl %fs:i4 at TPOFF, %eax
+; X64_LINUX: movl %fs:.Li4$local at TPOFF, %eax
; X64_LINUX-NEXT: ret
; MINGW32-LABEL: _f7:
; MINGW32: movl __tls_index, %eax
@@ -230,11 +230,11 @@ entry:
define i32* @f8() {
; X86_LINUX-LABEL: f8:
; X86_LINUX: movl %gs:0, %eax
-; X86_LINUX-NEXT: leal i4 at NTPOFF(%eax), %eax
+; X86_LINUX-NEXT: leal .Li4$local at NTPOFF(%eax), %eax
; X86_LINUX-NEXT: ret
; X64_LINUX-LABEL: f8:
; X64_LINUX: movq %fs:0, %rax
-; X64_LINUX-NEXT: leaq i4 at TPOFF(%rax), %rax
+; X64_LINUX-NEXT: leaq .Li4$local at TPOFF(%rax), %rax
; X64_LINUX-NEXT: ret
; MINGW32-LABEL: _f8:
; MINGW32: movl __tls_index, %eax
More information about the llvm-commits
mailing list