[llvm] 872c5fb - [AsmPrinter] Don't generate .Lfoo$local for -fno-PIC and -fPIE

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Mon May 25 23:36:00 PDT 2020


Author: Fangrui Song
Date: 2020-05-25T23:35:49-07:00
New Revision: 872c5fb1432493c0a09b6f210765c0d94ce9b5d0

URL: https://github.com/llvm/llvm-project/commit/872c5fb1432493c0a09b6f210765c0d94ce9b5d0
DIFF: https://github.com/llvm/llvm-project/commit/872c5fb1432493c0a09b6f210765c0d94ce9b5d0.diff

LOG: [AsmPrinter] Don't generate .Lfoo$local for -fno-PIC and -fPIE

-fno-PIC and -fPIE code generally cannot be linked in -shared mode and there is no benefit accessing via local aliases.

Actually, a .Lfoo$local reference will be converted to a STT_SECTION (if no section relaxation) reference which will cause the section symbol (sizeof(Elf64_Sym)=24) to be generated.

Added: 
    

Modified: 
    llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
    llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
    llvm/test/CodeGen/X86/code-model-elf.ll
    llvm/test/CodeGen/X86/emutls.ll
    llvm/test/CodeGen/X86/indirect-branch-tracking-eh2.ll
    llvm/test/CodeGen/X86/lifetime-alias.ll
    llvm/test/CodeGen/X86/linux-preemption.ll
    llvm/test/CodeGen/X86/oddsubvector.ll
    llvm/test/CodeGen/X86/pr38795.ll
    llvm/test/CodeGen/X86/semantic-interposition-comdat.ll
    llvm/test/CodeGen/X86/tls.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 5fba0f01ba52..1a2b3761b3a7 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -462,10 +462,14 @@ MCSymbol *AsmPrinter::getSymbolPreferLocal(const GlobalValue &GV) const {
   // assembler would otherwise be conservative and assume a global default
   // visibility symbol can be interposable, even if the code generator already
   // assumed it.
-  if (TM.getTargetTriple().isOSBinFormatELF() && GV.canBenefitFromLocalAlias())
-    if (GV.isDSOLocal() || (TM.getTargetTriple().isX86() &&
-                            GV.getParent()->noSemanticInterposition()))
-      return getSymbolWithGlobalValueBase(&GV, "$local");
+  if (TM.getTargetTriple().isOSBinFormatELF() && GV.canBenefitFromLocalAlias()) {
+    const Module &M = *GV.getParent();
+    if (TM.getRelocationModel() != Reloc::Static &&
+        M.getPIELevel() == PIELevel::Default)
+      if (GV.isDSOLocal() || (TM.getTargetTriple().isX86() &&
+                              GV.getParent()->noSemanticInterposition()))
+        return getSymbolWithGlobalValueBase(&GV, "$local");
+  }
   return TM.getSymbol(&GV);
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
index 1b0c7c346887..90a5e2453a77 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_lane.ll
@@ -10,7 +10,6 @@ declare half @llvm.fma.f16(half, half, half) #1
 
 define dso_local <4 x half> @t_vfma_lane_f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfma_lane_f16:
-; CHECK:       .Lt_vfma_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -24,7 +23,6 @@ entry:
 
 define dso_local <8 x half> @t_vfmaq_lane_f16(<8 x half> %a, <8 x half> %b, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmaq_lane_f16:
-; CHECK:       .Lt_vfmaq_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -38,7 +36,6 @@ entry:
 
 define dso_local <4 x half> @t_vfma_laneq_f16(<4 x half> %a, <4 x half> %b, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfma_laneq_f16:
-; CHECK:       .Lt_vfma_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmla v0.4h, v1.4h, v2.h[0]
@@ -51,7 +48,6 @@ entry:
 
 define dso_local <8 x half> @t_vfmaq_laneq_f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmaq_laneq_f16:
-; CHECK:       .Lt_vfmaq_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmla v0.8h, v1.8h, v2.h[0]
@@ -64,7 +60,6 @@ entry:
 
 define dso_local <4 x half> @t_vfma_n_f16(<4 x half> %a, <4 x half> %b, half %c) {
 ; CHECK-LABEL: t_vfma_n_f16:
-; CHECK:       .Lt_vfma_n_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -79,7 +74,6 @@ entry:
 
 define dso_local <8 x half> @t_vfmaq_n_f16(<8 x half> %a, <8 x half> %b, half %c) {
 ; CHECK-LABEL: t_vfmaq_n_f16:
-; CHECK:       .Lt_vfmaq_n_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -94,7 +88,6 @@ entry:
 
 define dso_local half @t_vfmah_lane_f16(half %a, half %b, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmah_lane_f16:
-; CHECK:       .Lt_vfmah_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -108,7 +101,6 @@ entry:
 
 define dso_local half @t_vfmah_laneq_f16(half %a, half %b, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmah_laneq_f16:
-; CHECK:       .Lt_vfmah_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmla h0, h1, v2.h[0]
@@ -121,7 +113,6 @@ entry:
 
 define dso_local <4 x half> @t_vfms_lane_f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfms_lane_f16:
-; CHECK:       .Lt_vfms_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -136,7 +127,6 @@ entry:
 
 define dso_local <8 x half> @t_vfmsq_lane_f16(<8 x half> %a, <8 x half> %b, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmsq_lane_f16:
-; CHECK:       .Lt_vfmsq_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -151,7 +141,6 @@ entry:
 
 define dso_local <4 x half> @t_vfms_laneq_f16(<4 x half> %a, <4 x half> %b, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfms_laneq_f16:
-; CHECK:       .Lt_vfms_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmls v0.4h, v1.4h, v2.h[0]
@@ -165,7 +154,6 @@ entry:
 
 define dso_local <8 x half> @t_vfmsq_laneq_f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmsq_laneq_f16:
-; CHECK:       .Lt_vfmsq_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmls v0.8h, v1.8h, v2.h[0]
@@ -179,7 +167,6 @@ entry:
 
 define dso_local <4 x half> @t_vfms_n_f16(<4 x half> %a, <4 x half> %b, half %c) {
 ; CHECK-LABEL: t_vfms_n_f16:
-; CHECK:       .Lt_vfms_n_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -195,7 +182,6 @@ entry:
 
 define dso_local <8 x half> @t_vfmsq_n_f16(<8 x half> %a, <8 x half> %b, half %c) {
 ; CHECK-LABEL: t_vfmsq_n_f16:
-; CHECK:       .Lt_vfmsq_n_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -211,7 +197,6 @@ entry:
 
 define dso_local half @t_vfmsh_lane_f16(half %a, half %b, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmsh_lane_f16:
-; CHECK:       .Lt_vfmsh_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -226,7 +211,6 @@ entry:
 
 define dso_local half @t_vfmsh_laneq_f16(half %a, half %b, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vfmsh_laneq_f16:
-; CHECK:       .Lt_vfmsh_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmls h0, h1, v2.h[0]
@@ -240,7 +224,6 @@ entry:
 
 define dso_local <4 x half> @t_vmul_laneq_f16(<4 x half> %a, <8 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmul_laneq_f16:
-; CHECK:       .Lt_vmul_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmul v0.4h, v0.4h, v1.h[0]
@@ -253,7 +236,6 @@ entry:
 
 define dso_local <8 x half> @t_vmulq_laneq_f16(<8 x half> %a, <8 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulq_laneq_f16:
-; CHECK:       .Lt_vmulq_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmul v0.8h, v0.8h, v1.h[0]
@@ -266,7 +248,6 @@ entry:
 
 define dso_local half @t_vmulh_lane_f16(half %a, <4 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vmulh_lane_f16:
-; CHECK:       .Lt_vmulh_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
@@ -280,7 +261,6 @@ entry:
 
 define dso_local half @t_vmulh_laneq_f16(half %a, <8 x half> %c, i32 %lane) {
 ; CHECK-LABEL: t_vmulh_laneq_f16:
-; CHECK:       .Lt_vmulh_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmul h0, h0, v1.h[0]
@@ -293,7 +273,6 @@ entry:
 
 define dso_local half @t_vmulx_f16(half %a, half %b) {
 ; CHECK-LABEL: t_vmulx_f16:
-; CHECK:       .Lt_vmulx_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmulx h0, h0, h1
@@ -305,7 +284,6 @@ entry:
 
 define dso_local half @t_vmulxh_lane_f16(half %a, <4 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulxh_lane_f16:
-; CHECK:       .Lt_vmulxh_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
@@ -319,7 +297,6 @@ entry:
 
 define dso_local <4 x half> @t_vmulx_lane_f16(<4 x half> %a, <4 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulx_lane_f16:
-; CHECK:       .Lt_vmulx_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
@@ -333,7 +310,6 @@ entry:
 
 define dso_local <8 x half> @t_vmulxq_lane_f16(<8 x half> %a, <4 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulxq_lane_f16:
-; CHECK:       .Lt_vmulxq_lane_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
@@ -347,7 +323,6 @@ entry:
 
 define dso_local <4 x half> @t_vmulx_laneq_f16(<4 x half> %a, <8 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulx_laneq_f16:
-; CHECK:       .Lt_vmulx_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmulx v0.4h, v0.4h, v1.h[0]
@@ -360,7 +335,6 @@ entry:
 
 define dso_local <8 x half> @t_vmulxq_laneq_f16(<8 x half> %a, <8 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulxq_laneq_f16:
-; CHECK:       .Lt_vmulxq_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmulx v0.8h, v0.8h, v1.h[0]
@@ -373,7 +347,6 @@ entry:
 
 define dso_local half @t_vmulxh_laneq_f16(half %a, <8 x half> %b, i32 %lane) {
 ; CHECK-LABEL: t_vmulxh_laneq_f16:
-; CHECK:       .Lt_vmulxh_laneq_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmulx h0, h0, v1.h[7]
@@ -386,7 +359,6 @@ entry:
 
 define dso_local <4 x half> @t_vmulx_n_f16(<4 x half> %a, half %c) {
 ; CHECK-LABEL: t_vmulx_n_f16:
-; CHECK:       .Lt_vmulx_n_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $h1 killed $h1 def $q1
@@ -402,7 +374,6 @@ entry:
 
 define dso_local <8 x half> @t_vmulxq_n_f16(<8 x half> %a, half %c) {
 ; CHECK-LABEL: t_vmulxq_n_f16:
-; CHECK:       .Lt_vmulxq_n_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $h1 killed $h1 def $q1
@@ -418,7 +389,6 @@ entry:
 
 define dso_local half @t_vfmah_lane3_f16(half %a, half %b, <4 x half> %c) {
 ; CHECK-LABEL: t_vfmah_lane3_f16:
-; CHECK:       .Lt_vfmah_lane3_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -432,7 +402,6 @@ entry:
 
 define dso_local half @t_vfmah_laneq7_f16(half %a, half %b, <8 x half> %c) {
 ; CHECK-LABEL: t_vfmah_laneq7_f16:
-; CHECK:       .Lt_vfmah_laneq7_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmla h0, h1, v2.h[7]
@@ -445,7 +414,6 @@ entry:
 
 define dso_local half @t_vfmsh_lane3_f16(half %a, half %b, <4 x half> %c) {
 ; CHECK-LABEL: t_vfmsh_lane3_f16:
-; CHECK:       .Lt_vfmsh_lane3_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
@@ -460,7 +428,6 @@ entry:
 
 define dso_local half @t_vfmsh_laneq7_f16(half %a, half %b, <8 x half> %c) {
 ; CHECK-LABEL: t_vfmsh_laneq7_f16:
-; CHECK:       .Lt_vfmsh_laneq7_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fmls h0, h1, v2.h[7]
@@ -474,7 +441,6 @@ entry:
 
 define dso_local half @t_fadd_vfmah_f16(half %a, half %b, <4 x half> %c, <4 x half> %d) {
 ; CHECK-LABEL: t_fadd_vfmah_f16:
-; CHECK:       .Lt_fadd_vfmah_f16$local:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    fadd v2.4h, v2.4h, v3.4h

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
index 46355b35d0de..8fd152869b23 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
@@ -4,7 +4,6 @@
 @v = common dso_local global i32* null, align 8
 
 ; CHECK-LABEL:  foo:                                    // @foo
-; CHECK-NEXT:   .Lfoo$local:
 ; CHECK-NEXT:   // %bb.0:                               // %entry
 ; CHECK-NEXT:       paciasp
 ; CHECK-NOT:        OUTLINED_FUNCTION_
@@ -23,7 +22,6 @@ entry:
 }
 
 ; CHECK-LABEL:  bar:                                    // @bar
-; CHECK-NEXT:   .Lbar$local:
 ; CHECK-NEXT:   // %bb.0:                               // %entry
 ; CHECK-NEXT:       paciasp
 ; CHECK-NOT:        OUTLINED_FUNCTION_

diff  --git a/llvm/test/CodeGen/X86/code-model-elf.ll b/llvm/test/CodeGen/X86/code-model-elf.ll
index 748e2b0267d8..f7ffd6ea1eb7 100644
--- a/llvm/test/CodeGen/X86/code-model-elf.ll
+++ b/llvm/test/CodeGen/X86/code-model-elf.ll
@@ -83,28 +83,28 @@ define dso_local i32* @lea_static_data() #0 {
 define dso_local i32* @lea_global_data() #0 {
 ; SMALL-STATIC-LABEL: lea_global_data:
 ; SMALL-STATIC:       # %bb.0:
-; SMALL-STATIC-NEXT:    movl $.Lglobal_data$local, %eax
+; SMALL-STATIC-NEXT:    movl $global_data, %eax
 ; SMALL-STATIC-NEXT:    retq
 ;
 ; MEDIUM-STATIC-LABEL: lea_global_data:
 ; MEDIUM-STATIC:       # %bb.0:
-; MEDIUM-STATIC-NEXT:    movabsq $.Lglobal_data$local, %rax
+; MEDIUM-STATIC-NEXT:    movabsq $global_data, %rax
 ; MEDIUM-STATIC-NEXT:    retq
 ;
 ; LARGE-STATIC-LABEL: lea_global_data:
 ; LARGE-STATIC:       # %bb.0:
-; LARGE-STATIC-NEXT:    movabsq $.Lglobal_data$local, %rax
+; LARGE-STATIC-NEXT:    movabsq $global_data, %rax
 ; LARGE-STATIC-NEXT:    retq
 ;
 ; SMALL-PIC-LABEL: lea_global_data:
 ; SMALL-PIC:       # %bb.0:
-; SMALL-PIC-NEXT:    leaq .Lglobal_data$local(%rip), %rax
+; SMALL-PIC-NEXT:    leaq global_data(%rip), %rax
 ; SMALL-PIC-NEXT:    retq
 ;
 ; MEDIUM-PIC-LABEL: lea_global_data:
 ; MEDIUM-PIC:       # %bb.0:
 ; MEDIUM-PIC-NEXT:    leaq _GLOBAL_OFFSET_TABLE_(%rip), %rcx
-; MEDIUM-PIC-NEXT:    movabsq $.Lglobal_data$local at GOTOFF, %rax
+; MEDIUM-PIC-NEXT:    movabsq $global_data at GOTOFF, %rax
 ; MEDIUM-PIC-NEXT:    addq %rcx, %rax
 ; MEDIUM-PIC-NEXT:    retq
 ;
@@ -114,7 +114,7 @@ define dso_local i32* @lea_global_data() #0 {
 ; LARGE-PIC-NEXT:    leaq .L1$pb(%rip), %rax
 ; LARGE-PIC-NEXT:    movabsq $_GLOBAL_OFFSET_TABLE_-.L1$pb, %rcx
 ; LARGE-PIC-NEXT:    addq %rax, %rcx
-; LARGE-PIC-NEXT:    movabsq $.Lglobal_data$local at GOTOFF, %rax
+; LARGE-PIC-NEXT:    movabsq $global_data at GOTOFF, %rax
 ; LARGE-PIC-NEXT:    addq %rcx, %rax
 ; LARGE-PIC-NEXT:    retq
   ret i32* getelementptr inbounds ([10 x i32], [10 x i32]* @global_data, i64 0, i64 0)
@@ -161,30 +161,30 @@ define dso_local i32* @lea_extern_data() #0 {
 define dso_local i32 @load_global_data() #0 {
 ; SMALL-STATIC-LABEL: load_global_data:
 ; SMALL-STATIC:       # %bb.0:
-; SMALL-STATIC-NEXT:    movl .Lglobal_data$local+8(%rip), %eax
+; SMALL-STATIC-NEXT:    movl global_data+8(%rip), %eax
 ; SMALL-STATIC-NEXT:    retq
 ;
 ; MEDIUM-STATIC-LABEL: load_global_data:
 ; MEDIUM-STATIC:       # %bb.0:
-; MEDIUM-STATIC-NEXT:    movabsq $.Lglobal_data$local, %rax
+; MEDIUM-STATIC-NEXT:    movabsq $global_data, %rax
 ; MEDIUM-STATIC-NEXT:    movl 8(%rax), %eax
 ; MEDIUM-STATIC-NEXT:    retq
 ;
 ; LARGE-STATIC-LABEL: load_global_data:
 ; LARGE-STATIC:       # %bb.0:
-; LARGE-STATIC-NEXT:    movabsq $.Lglobal_data$local, %rax
+; LARGE-STATIC-NEXT:    movabsq $global_data, %rax
 ; LARGE-STATIC-NEXT:    movl 8(%rax), %eax
 ; LARGE-STATIC-NEXT:    retq
 ;
 ; SMALL-PIC-LABEL: load_global_data:
 ; SMALL-PIC:       # %bb.0:
-; SMALL-PIC-NEXT:    movl .Lglobal_data$local+8(%rip), %eax
+; SMALL-PIC-NEXT:    movl global_data+8(%rip), %eax
 ; SMALL-PIC-NEXT:    retq
 ;
 ; MEDIUM-PIC-LABEL: load_global_data:
 ; MEDIUM-PIC:       # %bb.0:
 ; MEDIUM-PIC-NEXT:    leaq _GLOBAL_OFFSET_TABLE_(%rip), %rax
-; MEDIUM-PIC-NEXT:    movabsq $.Lglobal_data$local at GOTOFF, %rcx
+; MEDIUM-PIC-NEXT:    movabsq $global_data at GOTOFF, %rcx
 ; MEDIUM-PIC-NEXT:    movl 8(%rax,%rcx), %eax
 ; MEDIUM-PIC-NEXT:    retq
 ;
@@ -194,7 +194,7 @@ define dso_local i32 @load_global_data() #0 {
 ; LARGE-PIC-NEXT:    leaq .L3$pb(%rip), %rax
 ; LARGE-PIC-NEXT:    movabsq $_GLOBAL_OFFSET_TABLE_-.L3$pb, %rcx
 ; LARGE-PIC-NEXT:    addq %rax, %rcx
-; LARGE-PIC-NEXT:    movabsq $.Lglobal_data$local at GOTOFF, %rax
+; LARGE-PIC-NEXT:    movabsq $global_data at GOTOFF, %rax
 ; LARGE-PIC-NEXT:    movl 8(%rcx,%rax), %eax
 ; LARGE-PIC-NEXT:    retq
   %rv = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @global_data, i64 0, i64 2)
@@ -302,27 +302,27 @@ define dso_local void ()* @lea_static_fn() #0 {
 define dso_local void ()* @lea_global_fn() #0 {
 ; SMALL-STATIC-LABEL: lea_global_fn:
 ; SMALL-STATIC:       # %bb.0:
-; SMALL-STATIC-NEXT:    movl $.Lglobal_fn$local, %eax
+; SMALL-STATIC-NEXT:    movl $global_fn, %eax
 ; SMALL-STATIC-NEXT:    retq
 ;
 ; MEDIUM-STATIC-LABEL: lea_global_fn:
 ; MEDIUM-STATIC:       # %bb.0:
-; MEDIUM-STATIC-NEXT:    movabsq $.Lglobal_fn$local, %rax
+; MEDIUM-STATIC-NEXT:    movabsq $global_fn, %rax
 ; MEDIUM-STATIC-NEXT:    retq
 ;
 ; LARGE-STATIC-LABEL: lea_global_fn:
 ; LARGE-STATIC:       # %bb.0:
-; LARGE-STATIC-NEXT:    movabsq $.Lglobal_fn$local, %rax
+; LARGE-STATIC-NEXT:    movabsq $global_fn, %rax
 ; LARGE-STATIC-NEXT:    retq
 ;
 ; SMALL-PIC-LABEL: lea_global_fn:
 ; SMALL-PIC:       # %bb.0:
-; SMALL-PIC-NEXT:    leaq .Lglobal_fn$local(%rip), %rax
+; SMALL-PIC-NEXT:    leaq global_fn(%rip), %rax
 ; SMALL-PIC-NEXT:    retq
 ;
 ; MEDIUM-PIC-LABEL: lea_global_fn:
 ; MEDIUM-PIC:       # %bb.0:
-; MEDIUM-PIC-NEXT:    movabsq $.Lglobal_fn$local, %rax
+; MEDIUM-PIC-NEXT:    movabsq $global_fn, %rax
 ; MEDIUM-PIC-NEXT:    retq
 ;
 ; LARGE-PIC-LABEL: lea_global_fn:
@@ -331,7 +331,7 @@ define dso_local void ()* @lea_global_fn() #0 {
 ; LARGE-PIC-NEXT:    leaq .L8$pb(%rip), %rax
 ; LARGE-PIC-NEXT:    movabsq $_GLOBAL_OFFSET_TABLE_-.L8$pb, %rcx
 ; LARGE-PIC-NEXT:    addq %rax, %rcx
-; LARGE-PIC-NEXT:    movabsq $.Lglobal_fn$local at GOTOFF, %rax
+; LARGE-PIC-NEXT:    movabsq $global_fn at GOTOFF, %rax
 ; LARGE-PIC-NEXT:    addq %rcx, %rax
 ; LARGE-PIC-NEXT:    retq
   ret void ()* @global_fn

diff  --git a/llvm/test/CodeGen/X86/emutls.ll b/llvm/test/CodeGen/X86/emutls.ll
index 8d836ef733b5..1e706c1267d1 100644
--- a/llvm/test/CodeGen/X86/emutls.ll
+++ b/llvm/test/CodeGen/X86/emutls.ll
@@ -135,7 +135,7 @@ entry:
 
 define i32 @f7() {
 ; X32-LABEL: f7:
-; X32:         movl $.L__emutls_v.i4$local, (%esp)
+; X32:         movl $__emutls_v.i4, (%esp)
 ; X32-NEXT:    calll __emutls_get_address
 ; X32-NEXT:    movl (%eax), %eax
 ; X32-NEXT:    addl $12, %esp
@@ -148,7 +148,7 @@ entry:
 
 define i32* @f8() {
 ; X32-LABEL: f8:
-; X32:         movl $.L__emutls_v.i4$local, (%esp)
+; X32:         movl $__emutls_v.i4, (%esp)
 ; X32-NEXT:    calll __emutls_get_address
 ; X32-NEXT:    addl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 4
@@ -258,14 +258,12 @@ entry:
 ; X32-NEXT: .long 15
 
 ; X32-LABEL: __emutls_v.i4:
-; X32-NEXT: .L__emutls_v.i4$local:
 ; X32-NEXT: .long 4
 ; X32-NEXT: .long 4
 ; X32-NEXT: .long 0
 ; X32-NEXT: .long __emutls_t.i4
 
 ; X32-LABEL: __emutls_t.i4:
-; X32-NEXT: .L__emutls_t.i4$local:
 ; X32-NEXT: .long 15
 
 ; X32-NOT:   __emutls_v.i5:
@@ -312,14 +310,12 @@ entry:
 ; X64-NEXT: .long 15
 
 ; X64-LABEL: __emutls_v.i4:
-; X64-NEXT: .L__emutls_v.i4$local:
 ; X64-NEXT: .quad 4
 ; X64-NEXT: .quad 4
 ; X64-NEXT: .quad 0
 ; X64-NEXT: .quad __emutls_t.i4
 
 ; X64-LABEL: __emutls_t.i4:
-; X64-NEXT: .L__emutls_t.i4$local:
 ; X64-NEXT: .long 15
 
 ; X64-NOT:   __emutls_v.i5:

diff  --git a/llvm/test/CodeGen/X86/indirect-branch-tracking-eh2.ll b/llvm/test/CodeGen/X86/indirect-branch-tracking-eh2.ll
index 312707a029cd..6e41c94e979a 100644
--- a/llvm/test/CodeGen/X86/indirect-branch-tracking-eh2.ll
+++ b/llvm/test/CodeGen/X86/indirect-branch-tracking-eh2.ll
@@ -4,7 +4,6 @@
 ; NUM-COUNT-3: endbr64
 
 ;SJLJ:       main:                                  # @main
-;SJLJ-NEXT: .Lmain$local:
 ;SJLJ-NEXT: .Lfunc_begin0:
 ;SJLJ-NEXT: # %bb.0:                                # %entry
 ;SJLJ-NEXT:         endbr64

diff  --git a/llvm/test/CodeGen/X86/lifetime-alias.ll b/llvm/test/CodeGen/X86/lifetime-alias.ll
index e57f1726a4ee..010dc33b5051 100644
--- a/llvm/test/CodeGen/X86/lifetime-alias.ll
+++ b/llvm/test/CodeGen/X86/lifetime-alias.ll
@@ -70,9 +70,9 @@ define i8 @main() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__g
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq $0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
-; CHECK-NEXT:    movq %rax, .Ldo_not_optimize${{.*}}(%rip)
+; CHECK-NEXT:    movq %rax, do_not_optimize{{.*}}(%rip)
 ; CHECK-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
-; CHECK-NEXT:    movq %rax, .Ldo_not_optimize${{.*}}(%rip)
+; CHECK-NEXT:    movq %rax, do_not_optimize{{.*}}(%rip)
 ; CHECK-NEXT:    cmpb $0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    jns .LBB0_1
 ; CHECK-NEXT:  # %bb.2: # %_ZNSt3__312basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED2Ev.exit50

diff  --git a/llvm/test/CodeGen/X86/linux-preemption.ll b/llvm/test/CodeGen/X86/linux-preemption.ll
index 7d22b7513218..49a7becf1343 100644
--- a/llvm/test/CodeGen/X86/linux-preemption.ll
+++ b/llvm/test/CodeGen/X86/linux-preemption.ll
@@ -41,7 +41,7 @@ define i32* @get_strong_local_global() {
   ret i32* @strong_local_global
 }
 ; CHECK: leaq .Lstrong_local_global$local(%rip), %rax
-; STATIC: movl $.Lstrong_local_global$local, %eax
+; STATIC: movl $strong_local_global, %eax
 ; CHECK32: leal .Lstrong_local_global$local at GOTOFF(%eax), %eax
 
 @weak_local_global = weak dso_local global i32 42
@@ -109,7 +109,7 @@ define i32* @get_strong_local_alias() {
   ret i32* @strong_local_alias
 }
 ; CHECK: leaq .Lstrong_local_alias$local(%rip), %rax
-; STATIC: movl $.Lstrong_local_alias$local, %eax
+; STATIC: movl $strong_local_alias, %eax
 ; CHECK32: leal .Lstrong_local_alias$local at GOTOFF(%eax), %eax
 
 @weak_local_alias = weak dso_local alias i32, i32* @aliasee
@@ -174,9 +174,9 @@ define void()* @get_strong_local_function() {
   ret void()* @strong_local_function
 }
 ; COMMON:     {{^}}strong_local_function:
-; COMMON-NEXT: .Lstrong_local_function$local:
+; CHECK-NEXT: .Lstrong_local_function$local:
 ; CHECK: leaq .Lstrong_local_function$local(%rip), %rax
-; STATIC: movl $.Lstrong_local_function$local, %eax
+; STATIC: movl $strong_local_function, %eax
 ; CHECK32: leal .Lstrong_local_function$local at GOTOFF(%eax), %eax
 
 define weak dso_local void @weak_local_function() {
@@ -226,8 +226,11 @@ define void()* @get_external_preemptable_function() {
 ; STATIC: movl $external_preemptable_function, %eax
 ; CHECK32: movl external_preemptable_function at GOT(%eax), %eax
 
+!llvm.module.flags = !{!0}
+!0 = !{i32 7, !"PIC Level", i32 2}
+
 ; COMMON:     {{^}}strong_local_global:
-; COMMON-NEXT: .Lstrong_local_global$local:
+; CHECK-NEXT: .Lstrong_local_global$local:
 
 ; COMMON:      .globl strong_default_alias
 ; COMMON-NEXT: .set strong_default_alias, aliasee
@@ -235,7 +238,7 @@ define void()* @get_external_preemptable_function() {
 ; COMMON-NEXT: .set weak_default_alias, aliasee
 ; COMMON-NEXT: .globl strong_local_alias
 ; COMMON-NEXT: .set strong_local_alias, aliasee
-; COMMON-NEXT: .set .Lstrong_local_alias$local, aliasee
+; CHECK-NEXT:  .set .Lstrong_local_alias$local, aliasee
 ; COMMON-NEXT: .weak weak_local_alias
 ; COMMON-NEXT: .set weak_local_alias, aliasee
 ; COMMON-NEXT: .globl strong_preemptable_alias

diff  --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll
index 8d3e01f86def..46ff47b2a100 100644
--- a/llvm/test/CodeGen/X86/oddsubvector.ll
+++ b/llvm/test/CodeGen/X86/oddsubvector.ll
@@ -187,189 +187,189 @@ define <16 x i32> @PR42819(<8 x i32>* %a0) {
 define void @PR42833() {
 ; SSE2-LABEL: PR42833:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    addl .Lb${{.*}}(%rip), %eax
+; SSE2-NEXT:    addl b(%rip), %eax
 ; SSE2-NEXT:    movd %eax, %xmm2
 ; SSE2-NEXT:    movd %eax, %xmm3
 ; SSE2-NEXT:    paddd %xmm0, %xmm3
-; SSE2-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm4
+; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm4
 ; SSE2-NEXT:    psubd %xmm1, %xmm4
 ; SSE2-NEXT:    paddd %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm5
 ; SSE2-NEXT:    paddd %xmm0, %xmm5
 ; SSE2-NEXT:    movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3]
-; SSE2-NEXT:    movdqa %xmm1, .Lc$local+{{.*}}(%rip)
-; SSE2-NEXT:    movaps %xmm5, .Lc$local+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm3
-; SSE2-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm5
-; SSE2-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm6
-; SSE2-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm7
+; SSE2-NEXT:    movdqa %xmm1, c+{{.*}}(%rip)
+; SSE2-NEXT:    movaps %xmm5, c+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm3
+; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm5
+; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm6
+; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm7
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE2-NEXT:    psubd %xmm0, %xmm7
 ; SSE2-NEXT:    psubd %xmm3, %xmm6
 ; SSE2-NEXT:    psubd %xmm1, %xmm5
-; SSE2-NEXT:    movdqa %xmm5, .Ld$local+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm6, .Ld$local+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm4, .Ld$local+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm7, .Ld$local+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm5, d+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm6, d+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm4, d+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm7, d+{{.*}}(%rip)
 ; SSE2-NEXT:    paddd %xmm3, %xmm3
 ; SSE2-NEXT:    paddd %xmm1, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, .Lc$local+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm3, .Lc$local+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm1, c+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm3, c+{{.*}}(%rip)
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: PR42833:
 ; SSE42:       # %bb.0:
-; SSE42-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm0
-; SSE42-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm1
+; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm0
+; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm1
 ; SSE42-NEXT:    movd %xmm1, %eax
-; SSE42-NEXT:    addl .Lb${{.*}}(%rip), %eax
+; SSE42-NEXT:    addl b(%rip), %eax
 ; SSE42-NEXT:    movd %eax, %xmm2
 ; SSE42-NEXT:    paddd %xmm1, %xmm2
-; SSE42-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm3
+; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm3
 ; SSE42-NEXT:    psubd %xmm0, %xmm3
 ; SSE42-NEXT:    paddd %xmm0, %xmm0
 ; SSE42-NEXT:    movdqa %xmm1, %xmm4
 ; SSE42-NEXT:    paddd %xmm1, %xmm4
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
-; SSE42-NEXT:    movdqa %xmm0, .Lc$local+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm4, .Lc$local+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm0
-; SSE42-NEXT:    movdqa .Lc$local+{{.*}}(%rip), %xmm2
-; SSE42-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm4
-; SSE42-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm5
-; SSE42-NEXT:    movdqa .Ld$local+{{.*}}(%rip), %xmm6
+; SSE42-NEXT:    movdqa %xmm0, c+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm4, c+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm0
+; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm2
+; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm4
+; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm5
+; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm6
 ; SSE42-NEXT:    pinsrd $0, %eax, %xmm1
 ; SSE42-NEXT:    psubd %xmm1, %xmm6
 ; SSE42-NEXT:    psubd %xmm2, %xmm5
 ; SSE42-NEXT:    psubd %xmm0, %xmm4
-; SSE42-NEXT:    movdqa %xmm4, .Ld$local+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm5, .Ld$local+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm3, .Ld$local+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm6, .Ld$local+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm4, d+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm5, d+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm3, d+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm6, d+{{.*}}(%rip)
 ; SSE42-NEXT:    paddd %xmm2, %xmm2
 ; SSE42-NEXT:    paddd %xmm0, %xmm0
-; SSE42-NEXT:    movdqa %xmm0, .Lc$local+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm2, .Lc$local+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm0, c+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm2, c+{{.*}}(%rip)
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: PR42833:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm0
+; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    addl .Lb${{.*}}(%rip), %eax
+; AVX1-NEXT:    addl b(%rip), %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm1
 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm2
-; AVX1-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
+; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm2
-; AVX1-NEXT:    vpsubd .Lc$local+{{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT:    vmovups %ymm1, .Lc$local+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm2
+; AVX1-NEXT:    vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vmovups %ymm1, c+{{.*}}(%rip)
 ; AVX1-NEXT:    vpinsrd $0, %eax, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
+; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
 ; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
-; AVX1-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
+; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
+; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
 ; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm4
-; AVX1-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm5
+; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm4
+; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm5
 ; AVX1-NEXT:    vpsubd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqa %xmm2, .Ld$local+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm4, .Ld$local+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm1, .Ld$local+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm0, .Ld$local+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm2, d+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm4, d+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm1, d+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm0, d+{{.*}}(%rip)
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm3, %xmm0
 ; AVX1-NEXT:    vpaddd %xmm5, %xmm5, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, .Lc$local+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm0, .Lc$local+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm1, c+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm0, c+{{.*}}(%rip)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: PR42833:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    movl .Lb${{.*}}(%rip), %eax
-; AVX2-NEXT:    vmovdqu .Lc$local+{{.*}}(%rip), %ymm0
-; AVX2-NEXT:    addl .Lc$local+{{.*}}(%rip), %eax
+; AVX2-NEXT:    movl b(%rip), %eax
+; AVX2-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm0
+; AVX2-NEXT:    addl c+{{.*}}(%rip), %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm0, %ymm3
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4,5,6,7]
-; AVX2-NEXT:    vmovdqu %ymm2, .Lc$local+{{.*}}(%rip)
-; AVX2-NEXT:    vmovdqu .Lc$local+{{.*}}(%rip), %ymm2
-; AVX2-NEXT:    vmovdqu .Ld$local+{{.*}}(%rip), %ymm3
-; AVX2-NEXT:    vmovdqu .Ld$local+{{.*}}(%rip), %ymm4
+; AVX2-NEXT:    vmovdqu %ymm2, c+{{.*}}(%rip)
+; AVX2-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm2
+; AVX2-NEXT:    vmovdqu d+{{.*}}(%rip), %ymm3
+; AVX2-NEXT:    vmovdqu d+{{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
 ; AVX2-NEXT:    vpsubd %ymm0, %ymm4, %ymm0
 ; AVX2-NEXT:    vpsubd %ymm2, %ymm3, %ymm1
-; AVX2-NEXT:    vmovdqu %ymm1, .Ld$local+{{.*}}(%rip)
-; AVX2-NEXT:    vmovdqu %ymm0, .Ld$local+{{.*}}(%rip)
+; AVX2-NEXT:    vmovdqu %ymm1, d+{{.*}}(%rip)
+; AVX2-NEXT:    vmovdqu %ymm0, d+{{.*}}(%rip)
 ; AVX2-NEXT:    vpaddd %ymm2, %ymm2, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, .Lc$local+{{.*}}(%rip)
+; AVX2-NEXT:    vmovdqu %ymm0, c+{{.*}}(%rip)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: PR42833:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movl .Lb${{.*}}(%rip), %eax
-; AVX512-NEXT:    vmovdqu .Lc$local+{{.*}}(%rip), %ymm0
-; AVX512-NEXT:    vmovdqu64 .Lc$local+{{.*}}(%rip), %zmm1
-; AVX512-NEXT:    addl .Lc$local+{{.*}}(%rip), %eax
+; AVX512-NEXT:    movl b(%rip), %eax
+; AVX512-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm0
+; AVX512-NEXT:    vmovdqu64 c+{{.*}}(%rip), %zmm1
+; AVX512-NEXT:    addl c+{{.*}}(%rip), %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm2
 ; AVX512-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7]
-; AVX512-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm2
-; AVX512-NEXT:    vmovdqu %ymm0, .Lc$local+{{.*}}(%rip)
-; AVX512-NEXT:    vmovdqu .Lc$local+{{.*}}(%rip), %ymm0
-; AVX512-NEXT:    vmovdqu64 .Ld$local+{{.*}}(%rip), %zmm3
+; AVX512-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm2
+; AVX512-NEXT:    vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX512-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm0
+; AVX512-NEXT:    vmovdqu64 d+{{.*}}(%rip), %zmm3
 ; AVX512-NEXT:    vpinsrd $0, %eax, %xmm2, %xmm2
 ; AVX512-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
 ; AVX512-NEXT:    vpsubd %zmm1, %zmm3, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, .Ld$local+{{.*}}(%rip)
+; AVX512-NEXT:    vmovdqu64 %zmm1, d+{{.*}}(%rip)
 ; AVX512-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
-; AVX512-NEXT:    vmovdqu %ymm0, .Lc$local+{{.*}}(%rip)
+; AVX512-NEXT:    vmovdqu %ymm0, c+{{.*}}(%rip)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
 ;
 ; XOP-LABEL: PR42833:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm0
+; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm0
 ; XOP-NEXT:    vmovd %xmm0, %eax
-; XOP-NEXT:    addl .Lb${{.*}}(%rip), %eax
+; XOP-NEXT:    addl b(%rip), %eax
 ; XOP-NEXT:    vmovd %eax, %xmm1
 ; XOP-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; XOP-NEXT:    vpaddd %xmm0, %xmm0, %xmm2
-; XOP-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
+; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
 ; XOP-NEXT:    vpaddd %xmm3, %xmm3, %xmm3
 ; XOP-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 ; XOP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7]
-; XOP-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm2
-; XOP-NEXT:    vpsubd .Lc$local+{{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT:    vmovups %ymm1, .Lc$local+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm2
+; XOP-NEXT:    vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vmovups %ymm1, c+{{.*}}(%rip)
 ; XOP-NEXT:    vpinsrd $0, %eax, %xmm0, %xmm0
-; XOP-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
+; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
 ; XOP-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
-; XOP-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm1
-; XOP-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm3
+; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
+; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
 ; XOP-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; XOP-NEXT:    vmovdqa .Ld$local+{{.*}}(%rip), %xmm4
-; XOP-NEXT:    vmovdqa .Lc$local+{{.*}}(%rip), %xmm5
+; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm4
+; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm5
 ; XOP-NEXT:    vpsubd %xmm5, %xmm4, %xmm4
-; XOP-NEXT:    vmovdqa %xmm2, .Ld$local+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm4, .Ld$local+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm1, .Ld$local+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm0, .Ld$local+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm2, d+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm4, d+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm1, d+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm0, d+{{.*}}(%rip)
 ; XOP-NEXT:    vpaddd %xmm3, %xmm3, %xmm0
 ; XOP-NEXT:    vpaddd %xmm5, %xmm5, %xmm1
-; XOP-NEXT:    vmovdqa %xmm1, .Lc$local+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm0, .Lc$local+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm1, c+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm0, c+{{.*}}(%rip)
 ; XOP-NEXT:    vzeroupper
 ; XOP-NEXT:    retq
   %1 = load i32, i32* @b, align 4

diff  --git a/llvm/test/CodeGen/X86/pr38795.ll b/llvm/test/CodeGen/X86/pr38795.ll
index 3c44798a805f..d805dcad8b6e 100644
--- a/llvm/test/CodeGen/X86/pr38795.ll
+++ b/llvm/test/CodeGen/X86/pr38795.ll
@@ -93,7 +93,7 @@ define dso_local void @fn() {
 ; CHECK-NEXT:  # %bb.18: # %if.then41
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movl $.Lfn$local, {{[0-9]+}}(%esp)
+; CHECK-NEXT:    movl $fn, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    movl $.str, (%esp)
 ; CHECK-NEXT:    calll printf
 ; CHECK-NEXT:  .LBB0_19: # %for.end46

diff  --git a/llvm/test/CodeGen/X86/semantic-interposition-comdat.ll b/llvm/test/CodeGen/X86/semantic-interposition-comdat.ll
index 06574056298d..d0efd4d11c95 100644
--- a/llvm/test/CodeGen/X86/semantic-interposition-comdat.ll
+++ b/llvm/test/CodeGen/X86/semantic-interposition-comdat.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple x86_64-unknown-linux-gnu %s -o - | FileCheck %s
+; RUN: llc -mtriple=x86_64 -relocation-model=pic < %s | FileCheck %s
 
 $comdat_func = comdat any
 
@@ -21,3 +21,8 @@ entry:
   call void @func()
   ret void
 }
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 1, !"SemanticInterposition", i32 0}
+!1 = !{i32 7, !"PIC Level", i32 2}

diff  --git a/llvm/test/CodeGen/X86/tls.ll b/llvm/test/CodeGen/X86/tls.ll
index b1d29b34a958..759f3d7c8550 100644
--- a/llvm/test/CodeGen/X86/tls.ll
+++ b/llvm/test/CodeGen/X86/tls.ll
@@ -210,10 +210,10 @@ entry:
 
 define i32 @f7() {
 ; X86_LINUX-LABEL: f7:
-; X86_LINUX:      movl %gs:.Li4$local at NTPOFF, %eax
+; X86_LINUX:      movl %gs:i4 at NTPOFF, %eax
 ; X86_LINUX-NEXT: ret
 ; X64_LINUX-LABEL: f7:
-; X64_LINUX:      movl %fs:.Li4$local at TPOFF, %eax
+; X64_LINUX:      movl %fs:i4 at TPOFF, %eax
 ; X64_LINUX-NEXT: ret
 ; MINGW32-LABEL: _f7:
 ; MINGW32: movl __tls_index, %eax
@@ -230,11 +230,11 @@ entry:
 define i32* @f8() {
 ; X86_LINUX-LABEL: f8:
 ; X86_LINUX:      movl %gs:0, %eax
-; X86_LINUX-NEXT: leal .Li4$local at NTPOFF(%eax), %eax
+; X86_LINUX-NEXT: leal i4 at NTPOFF(%eax), %eax
 ; X86_LINUX-NEXT: ret
 ; X64_LINUX-LABEL: f8:
 ; X64_LINUX:      movq %fs:0, %rax
-; X64_LINUX-NEXT: leaq .Li4$local at TPOFF(%rax), %rax
+; X64_LINUX-NEXT: leaq i4 at TPOFF(%rax), %rax
 ; X64_LINUX-NEXT: ret
 ; MINGW32-LABEL: _f8:
 ; MINGW32: movl __tls_index, %eax


        


More information about the llvm-commits mailing list