[llvm] [LoongArch] Support emulated TLS (PR #92483)

via llvm-commits llvm-commits at lists.llvm.org
Thu May 16 18:48:50 PDT 2024


https://github.com/wangleiat created https://github.com/llvm/llvm-project/pull/92483

Some developers are currently porting OpenHOS to LoongArch, which
requires support for emulated TLS. We should support it like RISC-V.


>From 2628badf5c683a80157f61f5f0ae6df6ff05f37c Mon Sep 17 00:00:00 2001
From: wanglei <wanglei at loongson.cn>
Date: Fri, 17 May 2024 09:48:40 +0800
Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?=
 =?UTF-8?q?l=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Created using spr 1.3.5-bogner
---
 .../LoongArch/LoongArchISelLowering.cpp       |   3 +
 llvm/test/CodeGen/LoongArch/emutls.ll         | 133 ++++++++++++++++++
 2 files changed, 136 insertions(+)
 create mode 100644 llvm/test/CodeGen/LoongArch/emutls.ll

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index fe2c613b1b30f..0b22ba50ee30f 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -934,6 +934,9 @@ LoongArchTargetLowering::lowerGlobalTLSAddress(SDValue Op,
   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
   assert(N->getOffset() == 0 && "unexpected offset in global node");
 
+  if (DAG.getTarget().useEmulatedTLS())
+    return LowerToTLSEmulatedModel(N, DAG);
+
   bool IsDesc = DAG.getTarget().useTLSDESC();
 
   switch (getTargetMachine().getTLSModel(N->getGlobal())) {
diff --git a/llvm/test/CodeGen/LoongArch/emutls.ll b/llvm/test/CodeGen/LoongArch/emutls.ll
new file mode 100644
index 0000000000000..56ec8e3715f38
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/emutls.ll
@@ -0,0 +1,133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch32 -emulated-tls -relocation-model=pic < %s \
+; RUN:     | FileCheck -check-prefix=LA32 %s
+; RUN: llc --mtriple=loongarch64 -emulated-tls -relocation-model=pic < %s \
+; RUN:     | FileCheck -check-prefix=LA64 %s
+
+ at external_x = external thread_local global i32, align 8
+ at y = thread_local global i8 7, align 2
+ at internal_z = internal thread_local global i64 9, align 16
+
+define ptr @get_external_x() nounwind {
+; LA32-LABEL: get_external_x:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    pcalau12i $a0, %got_pc_hi20(__emutls_v.external_x)
+; LA32-NEXT:    ld.w $a0, $a0, %got_pc_lo12(__emutls_v.external_x)
+; LA32-NEXT:    bl %plt(__emutls_get_address)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: get_external_x:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcalau12i $a0, %got_pc_hi20(__emutls_v.external_x)
+; LA64-NEXT:    ld.d $a0, $a0, %got_pc_lo12(__emutls_v.external_x)
+; LA64-NEXT:    bl %plt(__emutls_get_address)
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  ret ptr @external_x
+}
+
+define ptr @get_y() nounwind {
+; LA32-LABEL: get_y:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    pcalau12i $a0, %got_pc_hi20(__emutls_v.y)
+; LA32-NEXT:    ld.w $a0, $a0, %got_pc_lo12(__emutls_v.y)
+; LA32-NEXT:    bl %plt(__emutls_get_address)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: get_y:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcalau12i $a0, %got_pc_hi20(__emutls_v.y)
+; LA64-NEXT:    ld.d $a0, $a0, %got_pc_lo12(__emutls_v.y)
+; LA64-NEXT:    bl %plt(__emutls_get_address)
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  ret ptr @y
+}
+
+define ptr @get_internal_z() nounwind {
+; LA32-LABEL: get_internal_z:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    pcalau12i $a0, %pc_hi20(__emutls_v.internal_z)
+; LA32-NEXT:    addi.w $a0, $a0, %pc_lo12(__emutls_v.internal_z)
+; LA32-NEXT:    bl %plt(__emutls_get_address)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: get_internal_z:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcalau12i $a0, %pc_hi20(__emutls_v.internal_z)
+; LA64-NEXT:    addi.d $a0, $a0, %pc_lo12(__emutls_v.internal_z)
+; LA64-NEXT:    bl %plt(__emutls_get_address)
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  ret ptr @internal_z
+}
+
+; UTC_ARGS: --disable
+
+; LA32:        .data
+; LA32:        .globl __emutls_v.y
+; LA32:        .p2align 2
+; LA32-LABEL:  __emutls_v.y:
+; LA32-NEXT:     .word 1
+; LA32-NEXT:     .word 2
+; LA32-NEXT:     .word 0
+; LA32-NEXT:     .word __emutls_t.y
+; LA32:        .section .rodata,
+; LA32-LABEL:  __emutls_t.y:
+; LA32-NEXT:     .byte 7
+; LA32:        .data
+; LA32:        .p2align 2
+; LA32-LABEL:  __emutls_v.internal_z:
+; LA32-NEXT:     .word 8
+; LA32-NEXT:     .word 16
+; LA32-NEXT:     .word 0
+; LA32-NEXT:     .word __emutls_t.internal_z
+; LA32:        .section .rodata,
+; LA32-LABEL:  __emutls_t.internal_z:
+; LA32-NEXT:     .dword 9
+
+; LA64:        .data
+; LA64:        .globl __emutls_v.y
+; LA64:        .p2align 3
+; LA64-LABEL:  __emutls_v.y:
+; LA64-NEXT:     .dword 1
+; LA64-NEXT:     .dword 2
+; LA64-NEXT:     .dword 0
+; LA64-NEXT:     .dword __emutls_t.y
+; LA64:        .section .rodata,
+; LA64-LABEL:  __emutls_t.y:
+; LA64-NEXT:     .byte 7
+; LA64:        .data
+; LA64:        .p2align 3
+; LA64-LABEL:  __emutls_v.internal_z:
+; LA64-NEXT:     .dword 8
+; LA64-NEXT:     .dword 16
+; LA64-NEXT:     .dword 0
+; LA64-NEXT:     .dword __emutls_t.internal_z
+; LA64:        .section .rodata,
+; LA64-LABEL:  __emutls_t.internal_z:
+; LA64-NEXT:     .dword 9



More information about the llvm-commits mailing list