[llvm] 7e5508e - [RISCV][test] Add explicit dso_local to definitions in ELF static relocation model tests

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 30 15:28:16 PST 2020


Author: Fangrui Song
Date: 2020-12-30T15:28:11-08:00
New Revision: 7e5508e6a8a5e6f70634a5631affe646c8836690

URL: https://github.com/llvm/llvm-project/commit/7e5508e6a8a5e6f70634a5631affe646c8836690
DIFF: https://github.com/llvm/llvm-project/commit/7e5508e6a8a5e6f70634a5631affe646c8836690.diff

LOG: [RISCV][test] Add explicit dso_local to definitions in ELF static relocation model tests

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/double-mem.ll
    llvm/test/CodeGen/RISCV/float-mem.ll
    llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
    llvm/test/CodeGen/RISCV/half-mem.ll
    llvm/test/CodeGen/RISCV/mem.ll
    llvm/test/CodeGen/RISCV/mem64.ll
    llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index fa6f791b7591..b9302dcfd98e 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
-define double @fld(double *%a) nounwind {
+define dso_local double @fld(double *%a) nounwind {
 ; RV32IFD-LABEL: fld:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -33,7 +33,7 @@ define double @fld(double *%a) nounwind {
   ret double %4
 }
 
-define void @fsd(double *%a, double %b, double %c) nounwind {
+define dso_local void @fsd(double *%a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fsd:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -67,9 +67,9 @@ define void @fsd(double *%a, double %b, double %c) nounwind {
 }
 
 ; Check load and store to a global
- at G = global double 0.0
+ at G = dso_local global double 0.0
 
-define double @fld_fsd_global(double %a, double %b) nounwind {
+define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fld_fsd_global:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -117,7 +117,7 @@ define double @fld_fsd_global(double %a, double %b) nounwind {
 }
 
 ; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
-define double @fld_fsd_constant(double %a) nounwind {
+define dso_local double @fld_fsd_constant(double %a) nounwind {
 ; RV32IFD-LABEL: fld_fsd_constant:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -154,7 +154,7 @@ define double @fld_fsd_constant(double %a) nounwind {
 
 declare void @notdead(i8*)
 
-define double @fld_stack(double %a) nounwind {
+define dso_local double @fld_stack(double %a) nounwind {
 ; RV32IFD-LABEL: fld_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
@@ -198,7 +198,7 @@ define double @fld_stack(double %a) nounwind {
   ret double %4
 }
 
-define void @fsd_stack(double %a, double %b) nounwind {
+define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fsd_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
@@ -239,7 +239,7 @@ define void @fsd_stack(double %a, double %b) nounwind {
 }
 
 ; Test selection of store<ST4[%a], trunc to f32>, ..
-define void @fsd_trunc(float* %a, double %b) nounwind noinline optnone {
+define dso_local void @fsd_trunc(float* %a, double %b) nounwind noinline optnone {
 ; RV32IFD-LABEL: fsd_trunc:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index c609f6c0a88f..2d47fc8776fb 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64IF %s
 
-define float @flw(float *%a) nounwind {
+define dso_local float @flw(float *%a) nounwind {
 ; RV32IF-LABEL: flw:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    flw ft0, 0(a0)
@@ -29,7 +29,7 @@ define float @flw(float *%a) nounwind {
   ret float %4
 }
 
-define void @fsw(float *%a, float %b, float %c) nounwind {
+define dso_local void @fsw(float *%a, float %b, float %c) nounwind {
 ; Use %b and %c in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
 ; RV32IF-LABEL: fsw:
@@ -57,9 +57,9 @@ define void @fsw(float *%a, float %b, float %c) nounwind {
 }
 
 ; Check load and store to a global
- at G = global float 0.0
+ at G = dso_local global float 0.0
 
-define float @flw_fsw_global(float %a, float %b) nounwind {
+define dso_local float @flw_fsw_global(float %a, float %b) nounwind {
 ; Use %a and %b in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
 ; RV32IF-LABEL: flw_fsw_global:
@@ -99,7 +99,7 @@ define float @flw_fsw_global(float %a, float %b) nounwind {
 }
 
 ; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
-define float @flw_fsw_constant(float %a) nounwind {
+define dso_local float @flw_fsw_constant(float %a) nounwind {
 ; RV32IF-LABEL: flw_fsw_constant:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a1, 912092
@@ -130,7 +130,7 @@ define float @flw_fsw_constant(float %a) nounwind {
 
 declare void @notdead(i8*)
 
-define float @flw_stack(float %a) nounwind {
+define dso_local float @flw_stack(float %a) nounwind {
 ; RV32IF-LABEL: flw_stack:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
@@ -170,7 +170,7 @@ define float @flw_stack(float %a) nounwind {
   ret float %4
 }
 
-define void @fsw_stack(float %a, float %b) nounwind {
+define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fsw_stack:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 803778de1365..a2972908d3c7 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -11,14 +11,14 @@
 ; Check if we do the fold under various conditions. If off1 is (the low part of)
 ; an address the fold's safety depends on the variable's alignment.
 
- at g_0 = global i64 0
- at g_1 = global i64 0, align 1
- at g_2 = global i64 0, align 2
- at g_4 = global i64 0, align 4
- at g_8 = global i64 0, align 8
- at g_16 = global i64 0, align 16
+ at g_0 = dso_local global i64 0
+ at g_1 = dso_local global i64 0, align 1
+ at g_2 = dso_local global i64 0, align 2
+ at g_4 = dso_local global i64 0, align 4
+ at g_8 = dso_local global i64 0, align 8
+ at g_16 = dso_local global i64 0, align 16
 
-define i64 @load_g_0() nounwind {
+define dso_local i64 @load_g_0() nounwind {
 ; RV32I-LABEL: load_g_0:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(g_0)
@@ -36,7 +36,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_g_1() nounwind {
+define dso_local i64 @load_g_1() nounwind {
 ; RV32I-LABEL: load_g_1:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(g_1)
@@ -55,7 +55,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_g_2() nounwind {
+define dso_local i64 @load_g_2() nounwind {
 ; RV32I-LABEL: load_g_2:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(g_2)
@@ -74,7 +74,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_g_4() nounwind {
+define dso_local i64 @load_g_4() nounwind {
 ; RV32I-LABEL: load_g_4:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(g_4)
@@ -93,7 +93,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_g_8() nounwind {
+define dso_local i64 @load_g_8() nounwind {
 ; RV32I-LABEL: load_g_8:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(g_8)
@@ -111,7 +111,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_g_16() nounwind {
+define dso_local i64 @load_g_16() nounwind {
 ; RV32I-LABEL: load_g_16:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(g_16)
@@ -129,7 +129,7 @@ entry:
   ret i64 %0
 }
 
-define void @store_g_4() nounwind {
+define dso_local void @store_g_4() nounwind {
 ; RV32I-LABEL: store_g_4:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(g_4)
@@ -148,7 +148,7 @@ entry:
    ret void
 }
 
-define void @store_g_8() nounwind {
+define dso_local void @store_g_8() nounwind {
 ; RV32I-LABEL: store_g_8:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(g_8)
@@ -171,7 +171,7 @@ entry:
 @ga_8 = dso_local local_unnamed_addr global [2 x i64] zeroinitializer, align 8
 @ga_16 = dso_local local_unnamed_addr global [2 x i64] zeroinitializer, align 16
 
-define i64 @load_ga_8() nounwind {
+define dso_local i64 @load_ga_8() nounwind {
 ; RV32I-LABEL: load_ga_8:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(ga_8)
@@ -190,7 +190,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_ga_16() nounwind {
+define dso_local i64 @load_ga_16() nounwind {
 ; RV32I-LABEL: load_ga_16:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a1, %hi(ga_16)
@@ -210,10 +210,10 @@ entry:
 
 ; Check for folds in accesses to thread-local variables.
 
- at tl_4 = thread_local global i64 0, align 4
- at tl_8 = thread_local global i64 0, align 8
+ at tl_4 = dso_local thread_local global i64 0, align 4
+ at tl_8 = dso_local thread_local global i64 0, align 8
 
-define i64 @load_tl_4() nounwind {
+define dso_local i64 @load_tl_4() nounwind {
 ; RV32I-LABEL: load_tl_4:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %tprel_hi(tl_4)
@@ -234,7 +234,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_tl_8() nounwind {
+define dso_local i64 @load_tl_8() nounwind {
 ; RV32I-LABEL: load_tl_8:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %tprel_hi(tl_8)
@@ -254,7 +254,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_const_ok() nounwind {
+define dso_local i64 @load_const_ok() nounwind {
 ; RV32I-LABEL: load_const_ok:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lw a0, 2040(zero)
@@ -270,7 +270,7 @@ entry:
   ret i64 %0
 }
 
-define i64 @load_cost_overflow() nounwind {
+define dso_local i64 @load_cost_overflow() nounwind {
 ; RV32I-LABEL: load_cost_overflow:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, 1

diff  --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index dbef64c88c20..bb1e72bff719 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -27,7 +27,7 @@ define half @flh(half *%a) nounwind {
   ret half %4
 }
 
-define void @fsh(half *%a, half %b, half %c) nounwind {
+define dso_local void @fsh(half *%a, half %b, half %c) nounwind {
 ; Use %b and %c in an FP op to ensure half precision floating point registers
 ; are used, even for the soft half ABI
 ; RV32IZFH-LABEL: fsh:
@@ -51,7 +51,7 @@ define void @fsh(half *%a, half %b, half %c) nounwind {
 }
 
 ; Check load and store to a global
- at G = global half 0.0
+ at G = dso_local global half 0.0
 
 define half @flh_fsh_global(half %a, half %b) nounwind {
 ; Use %a and %b in an FP op to ensure half precision floating point registers
@@ -152,7 +152,7 @@ define half @flh_stack(half %a) nounwind {
   ret half %4
 }
 
-define void @fsh_stack(half %a, half %b) nounwind {
+define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: fsh_stack:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll
index 5a2eeee41d0b..083909c72529 100644
--- a/llvm/test/CodeGen/RISCV/mem.ll
+++ b/llvm/test/CodeGen/RISCV/mem.ll
@@ -4,7 +4,7 @@
 
 ; Check indexed and unindexed, sext, zext and anyext loads
 
-define i32 @lb(i8 *%a) nounwind {
+define dso_local i32 @lb(i8 *%a) nounwind {
 ; RV32I-LABEL: lb:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lb a1, 1(a0)
@@ -19,7 +19,7 @@ define i32 @lb(i8 *%a) nounwind {
   ret i32 %3
 }
 
-define i32 @lh(i16 *%a) nounwind {
+define dso_local i32 @lh(i16 *%a) nounwind {
 ; RV32I-LABEL: lh:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lh a1, 4(a0)
@@ -34,7 +34,7 @@ define i32 @lh(i16 *%a) nounwind {
   ret i32 %3
 }
 
-define i32 @lw(i32 *%a) nounwind {
+define dso_local i32 @lw(i32 *%a) nounwind {
 ; RV32I-LABEL: lw:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a1, 12(a0)
@@ -47,7 +47,7 @@ define i32 @lw(i32 *%a) nounwind {
   ret i32 %2
 }
 
-define i32 @lbu(i8 *%a) nounwind {
+define dso_local i32 @lbu(i8 *%a) nounwind {
 ; RV32I-LABEL: lbu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a1, 4(a0)
@@ -63,7 +63,7 @@ define i32 @lbu(i8 *%a) nounwind {
   ret i32 %6
 }
 
-define i32 @lhu(i16 *%a) nounwind {
+define dso_local i32 @lhu(i16 *%a) nounwind {
 ; RV32I-LABEL: lhu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lhu a1, 10(a0)
@@ -81,7 +81,7 @@ define i32 @lhu(i16 *%a) nounwind {
 
 ; Check indexed and unindexed stores
 
-define void @sb(i8 *%a, i8 %b) nounwind {
+define dso_local void @sb(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: sb:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sb a1, 0(a0)
@@ -93,7 +93,7 @@ define void @sb(i8 *%a, i8 %b) nounwind {
   ret void
 }
 
-define void @sh(i16 *%a, i16 %b) nounwind {
+define dso_local void @sh(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: sh:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sh a1, 0(a0)
@@ -105,7 +105,7 @@ define void @sh(i16 *%a, i16 %b) nounwind {
   ret void
 }
 
-define void @sw(i32 *%a, i32 %b) nounwind {
+define dso_local void @sw(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: sw:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw a1, 0(a0)
@@ -118,7 +118,7 @@ define void @sw(i32 *%a, i32 %b) nounwind {
 }
 
 ; Check load and store to an i1 location
-define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
+define dso_local i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
 ; RV32I-LABEL: load_sext_zext_anyext_i1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a1, 1(a0)
@@ -140,7 +140,7 @@ define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
   ret i32 %7
 }
 
-define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
+define dso_local i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
 ; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a1, 1(a0)
@@ -163,9 +163,9 @@ define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
 }
 
 ; Check load and store to a global
- at G = global i32 0
+ at G = dso_local global i32 0
 
-define i32 @lw_sw_global(i32 %a) nounwind {
+define dso_local i32 @lw_sw_global(i32 %a) nounwind {
 ; RV32I-LABEL: lw_sw_global:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, %hi(G)
@@ -185,7 +185,7 @@ define i32 @lw_sw_global(i32 %a) nounwind {
 }
 
 ; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
-define i32 @lw_sw_constant(i32 %a) nounwind {
+define dso_local i32 @lw_sw_constant(i32 %a) nounwind {
 ; RV32I-LABEL: lw_sw_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 912092

diff  --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index 522786b867f6..4fcc6643bf7c 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -4,7 +4,7 @@
 
 ; Check indexed and unindexed, sext, zext and anyext loads
 
-define i64 @lb(i8 *%a) nounwind {
+define dso_local i64 @lb(i8 *%a) nounwind {
 ; RV64I-LABEL: lb:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lb a1, 1(a0)
@@ -19,7 +19,7 @@ define i64 @lb(i8 *%a) nounwind {
   ret i64 %3
 }
 
-define i64 @lh(i16 *%a) nounwind {
+define dso_local i64 @lh(i16 *%a) nounwind {
 ; RV64I-LABEL: lh:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lh a1, 4(a0)
@@ -34,7 +34,7 @@ define i64 @lh(i16 *%a) nounwind {
   ret i64 %3
 }
 
-define i64 @lw(i32 *%a) nounwind {
+define dso_local i64 @lw(i32 *%a) nounwind {
 ; RV64I-LABEL: lw:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lw a1, 12(a0)
@@ -49,7 +49,7 @@ define i64 @lw(i32 *%a) nounwind {
   ret i64 %3
 }
 
-define i64 @lbu(i8 *%a) nounwind {
+define dso_local i64 @lbu(i8 *%a) nounwind {
 ; RV64I-LABEL: lbu:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lbu a1, 4(a0)
@@ -65,7 +65,7 @@ define i64 @lbu(i8 *%a) nounwind {
   ret i64 %6
 }
 
-define i64 @lhu(i16 *%a) nounwind {
+define dso_local i64 @lhu(i16 *%a) nounwind {
 ; RV64I-LABEL: lhu:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lhu a1, 10(a0)
@@ -81,7 +81,7 @@ define i64 @lhu(i16 *%a) nounwind {
   ret i64 %6
 }
 
-define i64 @lwu(i32 *%a) nounwind {
+define dso_local i64 @lwu(i32 *%a) nounwind {
 ; RV64I-LABEL: lwu:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lwu a1, 24(a0)
@@ -99,7 +99,7 @@ define i64 @lwu(i32 *%a) nounwind {
 
 ; Check indexed and unindexed stores
 
-define void @sb(i8 *%a, i8 %b) nounwind {
+define dso_local void @sb(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: sb:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sb a1, 0(a0)
@@ -111,7 +111,7 @@ define void @sb(i8 *%a, i8 %b) nounwind {
   ret void
 }
 
-define void @sh(i16 *%a, i16 %b) nounwind {
+define dso_local void @sh(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: sh:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sh a1, 0(a0)
@@ -123,7 +123,7 @@ define void @sh(i16 *%a, i16 %b) nounwind {
   ret void
 }
 
-define void @sw(i32 *%a, i32 %b) nounwind {
+define dso_local void @sw(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: sw:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sw a1, 0(a0)
@@ -137,7 +137,7 @@ define void @sw(i32 *%a, i32 %b) nounwind {
 
 ; 64-bit loads and stores
 
-define i64 @ld(i64 *%a) nounwind {
+define dso_local i64 @ld(i64 *%a) nounwind {
 ; RV64I-LABEL: ld:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    ld a1, 80(a0)
@@ -150,7 +150,7 @@ define i64 @ld(i64 *%a) nounwind {
   ret i64 %2
 }
 
-define void @sd(i64 *%a, i64 %b) nounwind {
+define dso_local void @sd(i64 *%a, i64 %b) nounwind {
 ; RV64I-LABEL: sd:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sd a1, 0(a0)
@@ -163,7 +163,7 @@ define void @sd(i64 *%a, i64 %b) nounwind {
 }
 
 ; Check load and store to an i1 location
-define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
+define dso_local i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
 ; RV64I-LABEL: load_sext_zext_anyext_i1:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lbu a1, 1(a0)
@@ -185,7 +185,7 @@ define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
   ret i64 %7
 }
 
-define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
+define dso_local i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
 ; RV64I-LABEL: load_sext_zext_anyext_i1_i16:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lbu a1, 1(a0)
@@ -208,9 +208,9 @@ define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
 }
 
 ; Check load and store to a global
- at G = global i64 0
+ at G = dso_local global i64 0
 
-define i64 @ld_sd_global(i64 %a) nounwind {
+define dso_local i64 @ld_sd_global(i64 %a) nounwind {
 ; RV64I-LABEL: ld_sd_global:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, %hi(G)

diff  --git a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
index 7af31e3bdfba..fd7bd17fb0ed 100644
--- a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
+++ b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
@@ -4,9 +4,9 @@
 
 ; TODO: lbu and lhu should be selected to avoid the unnecessary masking.
 
- at bytes = global [5 x i8] zeroinitializer, align 1
+ at bytes = dso_local global [5 x i8] zeroinitializer, align 1
 
-define i32 @test_zext_i8() nounwind {
+define dso_local i32 @test_zext_i8() nounwind {
 ; RV32I-LABEL: test_zext_i8:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(bytes)
@@ -38,9 +38,9 @@ if.end:
   ret i32 0
 }
 
- at shorts = global [5 x i16] zeroinitializer, align 2
+ at shorts = dso_local global [5 x i16] zeroinitializer, align 2
 
-define i32 @test_zext_i16() nounwind {
+define dso_local i32 @test_zext_i16() nounwind {
 ; RV32I-LABEL: test_zext_i16:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(shorts)


        


More information about the llvm-commits mailing list