[llvm] 535a419 - [AIX][TLS] Generate 64-bit general-dynamic access code sequence
Lei Huang via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 8 14:41:37 PST 2021
Author: Lei Huang
Date: 2021-03-08T16:41:25-06:00
New Revision: 535a4192a9d19ef5d474f1d6bfa1ab9f08b2b1c7
URL: https://github.com/llvm/llvm-project/commit/535a4192a9d19ef5d474f1d6bfa1ab9f08b2b1c7
DIFF: https://github.com/llvm/llvm-project/commit/535a4192a9d19ef5d474f1d6bfa1ab9f08b2b1c7.diff
LOG: [AIX][TLS] Generate 64-bit general-dynamic access code sequence
Add support for the TLS general dynamic access model to assembly
files on AIX 64-bit.
Reviewed By: sfertile
Differential Revision: https://reviews.llvm.org/D98078
Added:
Modified:
llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCInstr64Bit.td
llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
Removed:
llvm/test/CodeGen/PowerPC/aix-tls-checks.ll
################################################################################
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 73658dc7f614..aa0707abf616 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -1155,7 +1155,8 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
// Into: BL8_NOP_TLS __tls_get_addr(sym at tlsgd)
case PPC::GETtlsADDRPCREL:
case PPC::GETtlsADDR32AIX:
- // Transform: %r3 = GETtlsADDR32AIX %r3, %r4
+ case PPC::GETtlsADDR64AIX:
+ // Transform: %r3 = GETtlsADDRNNAIX %r3, %r4 (for NN == 32/64).
// Into: BLA .__tls_get_addr()
// Unlike on Linux, there is no symbol or relocation needed for this call.
case PPC::GETtlsADDR32: {
@@ -2312,6 +2313,7 @@ void PPCAIXAsmPrinter::emitInstruction(const MachineInstr *MI) {
switch (MI->getOpcode()) {
default:
break;
+ case PPC::GETtlsADDR64AIX:
case PPC::GETtlsADDR32AIX: {
// The reference to .__tls_get_addr is unknown to the assembler
// so we need to emit an external symbol reference.
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 81916d4ccffe..4fa1689a77c4 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3131,9 +3131,6 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
if (DAG.getTarget().useEmulatedTLS())
report_fatal_error("Emulated TLS is not yet supported on AIX");
- if (Subtarget.isPPC64())
- report_fatal_error("TLS is not yet supported on AIX PPC64");
-
SDLoc dl(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index dd459cecbb2f..4fcc48e4bae6 100644
--- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -1301,6 +1301,18 @@ let Defs = [X0,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7] in
def GETtlsldADDR : GETtlsldADDRPseudo <"#GETtlsldADDR">;
let Defs = [X0,X2,X4,X5,X6,X7,X8,X9,X10,X11,X12,LR8,CTR8,CR0,CR1,CR5,CR6,CR7] in
def GETtlsldADDRPCREL : GETtlsldADDRPseudo <"#GETtlsldADDRPCREL">;
+
+// On AIX, the call to __tls_get_addr needs two inputs in X3/X4 for the
+// offset and region handle respectively. The call is not followed by a nop
+// so we don't need to mark it with a size of 8 bytes. Finally, the assembly
+// manual mentions this exact set of registers as the clobbered set, others
+// are guaranteed not to be clobbered.
+let Defs = [X0,X4,X5,X11,LR8,CR0] in
+def GETtlsADDR64AIX :
+ PPCEmitTimePseudo<(outs g8rc:$rD),(ins g8rc:$offset, g8rc:$handle),
+ "GETtlsADDR64AIX",
+ [(set i64:$rD,
+ (PPCgetTlsAddr i64:$offset, i64:$handle))]>, isPPC64;
}
// Combined op for ADDItlsgdL and GETtlsADDR, late expanded. X3 and LR8
@@ -1326,6 +1338,13 @@ def ADDItlsldL : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm6
[(set i64:$rD,
(PPCaddiTlsldL i64:$reg, tglobaltlsaddr:$disp))]>,
isPPC64;
+// This pseudo is expanded to two copies to put the variable offset in R4 and
+// the region handle in R3 and GETtlsADDR64AIX.
+def TLSGDAIX8 :
+ PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc:$offset, g8rc:$handle),
+ "#TLSGDAIX8",
+ [(set i64:$rD,
+ (PPCTlsgdAIX i64:$offset, i64:$handle))]>;
// Combined op for ADDItlsldL and GETtlsADDR, late expanded. X3 and LR8
// are true defines, while the rest of the Defs are clobbers.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
@@ -1594,6 +1613,10 @@ def : Pat<(add i64:$in, (PPChi tjumptable:$g, 0)),
def : Pat<(add i64:$in, (PPChi tblockaddress:$g, 0)),
(ADDIS8 $in, tblockaddress:$g)>;
+// AIX 64-bit small code model TLS access.
+def : Pat<(i64 (PPCtoc_entry tglobaltlsaddr:$disp, i64:$reg)),
+ (i64 (LDtoc tglobaltlsaddr:$disp, i64:$reg))>;
+
// Patterns to match r+r indexed loads and stores for
// addresses without at least 4-byte alignment.
def : Pat<(i64 (NonDSFormSextLoadi32 xoaddr:$src)),
diff --git a/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
index 612df87c1d5d..3186d197931d 100644
--- a/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
@@ -62,7 +62,8 @@ namespace {
MI.getOpcode() != PPC::ADDItlsldLADDR &&
MI.getOpcode() != PPC::ADDItlsgdLADDR32 &&
MI.getOpcode() != PPC::ADDItlsldLADDR32 &&
- MI.getOpcode() != PPC::TLSGDAIX && !IsPCREL) {
+ MI.getOpcode() != PPC::TLSGDAIX &&
+ MI.getOpcode() != PPC::TLSGDAIX8 && !IsPCREL) {
// Although we create ADJCALLSTACKDOWN and ADJCALLSTACKUP
// as scheduling fences, we skip creating fences if we already
// have existing ADJCALLSTACKDOWN/UP to avoid nesting,
@@ -109,6 +110,11 @@ namespace {
Opc1 = PPC::ADDItlsldL32;
Opc2 = PPC::GETtlsldADDR32;
break;
+ case PPC::TLSGDAIX8:
+ // TLSGDAIX8 is expanded to two copies and GET_TLS_ADDR, so we only
+ // set Opc2 here.
+ Opc2 = PPC::GETtlsADDR64AIX;
+ break;
case PPC::TLSGDAIX:
// TLSGDAIX is expanded to two copies and GET_TLS_ADDR, so we only
// set Opc2 here.
@@ -140,7 +146,7 @@ namespace {
if (IsAIX) {
// The variable offset and region handle are copied in r4 and r3. The
- // copies are followed by the GETtlsADDR32AIX instruction.
+ // copies are followed by GETtlsADDR32AIX/GETtlsADDR64AIX.
BuildMI(MBB, I, DL, TII->get(TargetOpcode::COPY), GPR4)
.addReg(MI.getOperand(1).getReg());
BuildMI(MBB, I, DL, TII->get(TargetOpcode::COPY), GPR3)
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-checks.ll b/llvm/test/CodeGen/PowerPC/aix-tls-checks.ll
deleted file mode 100644
index 1a7757ce1da4..000000000000
--- a/llvm/test/CodeGen/PowerPC/aix-tls-checks.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: not --crash llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
-; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s - 2>&1 | FileCheck %s
-
-; CHECK: TLS is not yet supported on AIX PPC64
-
- at tls1 = thread_local global i32 0, align 4
-
-define i32* @getTls1Addr() {
-entry:
- ret i32* @tls1
-}
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
index b3fbec3ff2d0..b38c2e34e04a 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
@@ -4,6 +4,12 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
; RUN: -mtriple powerpc-ibm-aix-xcoff --code-model=large < %s \
; RUN: | FileCheck %s --check-prefix=LARGE32
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large < %s \
+; RUN: | FileCheck %s --check-prefix=LARGE64
@TGInit = thread_local global double 1.000000e+00, align 8
@TWInit = weak thread_local global double 1.000000e+00, align 8
@@ -42,6 +48,36 @@ define void @storesTGUninit(double %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTGUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C0(2)
+; SMALL64-NEXT: ld 4, L..C1(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stfd 1, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTGUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C0 at u(2)
+; LARGE64-NEXT: addis 4, L..C1 at u(2)
+; LARGE64-NEXT: ld 3, L..C0 at l(3)
+; LARGE64-NEXT: ld 4, L..C1 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stfd 1, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store double %Val, double* @TGUninit, align 8
ret void
@@ -78,6 +114,36 @@ define void @storesTGInit(double %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTGInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C2(2)
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stfd 1, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTGInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C2 at u(2)
+; LARGE64-NEXT: addis 4, L..C3 at u(2)
+; LARGE64-NEXT: ld 3, L..C2 at l(3)
+; LARGE64-NEXT: ld 4, L..C3 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stfd 1, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store double %Val, double* @TGInit, align 8
ret void
@@ -114,6 +180,36 @@ define void @storesTIInit(double %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTIInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C4(2)
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stfd 1, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTIInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C4 at u(2)
+; LARGE64-NEXT: addis 4, L..C5 at u(2)
+; LARGE64-NEXT: ld 3, L..C4 at l(3)
+; LARGE64-NEXT: ld 4, L..C5 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stfd 1, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store double %Val, double* @TIInit, align 8
ret void
@@ -150,6 +246,36 @@ define void @storesTWInit(double %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTWInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C6(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stfd 1, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTWInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C6 at u(2)
+; LARGE64-NEXT: addis 4, L..C7 at u(2)
+; LARGE64-NEXT: ld 3, L..C6 at l(3)
+; LARGE64-NEXT: ld 4, L..C7 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stfd 1, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store double %Val, double* @TWInit, align 8
ret void
@@ -193,6 +319,43 @@ define double @loadsTGUninit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTGUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C0(2)
+; SMALL64-NEXT: ld 4, L..C1(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lfd 0, 0(3)
+; SMALL64-NEXT: lfd 1, 0(4)
+; SMALL64-NEXT: fadd 1, 0, 1
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTGUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C0 at u(2)
+; LARGE64-NEXT: addis 4, L..C1 at u(2)
+; LARGE64-NEXT: ld 3, L..C0 at l(3)
+; LARGE64-NEXT: ld 4, L..C1 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lfd 0, 0(3)
+; LARGE64-NEXT: ld 3, L..C8 at l(4)
+; LARGE64-NEXT: lfd 1, 0(3)
+; LARGE64-NEXT: fadd 1, 0, 1
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load double, double* @TGUninit, align 8
%1 = load double, double* @GInit, align 8
@@ -238,6 +401,43 @@ define double @loadsTGInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTGInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C2(2)
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lfd 0, 0(3)
+; SMALL64-NEXT: lfd 1, 0(4)
+; SMALL64-NEXT: fadd 1, 0, 1
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTGInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C2 at u(2)
+; LARGE64-NEXT: addis 4, L..C3 at u(2)
+; LARGE64-NEXT: ld 3, L..C2 at l(3)
+; LARGE64-NEXT: ld 4, L..C3 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lfd 0, 0(3)
+; LARGE64-NEXT: ld 3, L..C8 at l(4)
+; LARGE64-NEXT: lfd 1, 0(3)
+; LARGE64-NEXT: fadd 1, 0, 1
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load double, double* @TGInit, align 8
%1 = load double, double* @GInit, align 8
@@ -283,6 +483,43 @@ define double @loadsTIInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTIInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C4(2)
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lfd 0, 0(3)
+; SMALL64-NEXT: lfd 1, 0(4)
+; SMALL64-NEXT: fadd 1, 0, 1
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTIInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C4 at u(2)
+; LARGE64-NEXT: addis 4, L..C5 at u(2)
+; LARGE64-NEXT: ld 3, L..C4 at l(3)
+; LARGE64-NEXT: ld 4, L..C5 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lfd 0, 0(3)
+; LARGE64-NEXT: ld 3, L..C8 at l(4)
+; LARGE64-NEXT: lfd 1, 0(3)
+; LARGE64-NEXT: fadd 1, 0, 1
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load double, double* @TIInit, align 8
%1 = load double, double* @GInit, align 8
@@ -328,6 +565,43 @@ define double @loadsTWInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTWInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C6(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lfd 0, 0(3)
+; SMALL64-NEXT: lfd 1, 0(4)
+; SMALL64-NEXT: fadd 1, 0, 1
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTWInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C6 at u(2)
+; LARGE64-NEXT: addis 4, L..C7 at u(2)
+; LARGE64-NEXT: ld 3, L..C6 at l(3)
+; LARGE64-NEXT: ld 4, L..C7 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lfd 0, 0(3)
+; LARGE64-NEXT: ld 3, L..C8 at l(4)
+; LARGE64-NEXT: lfd 1, 0(3)
+; LARGE64-NEXT: fadd 1, 0, 1
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load double, double* @TWInit, align 8
%1 = load double, double* @GInit, align 8
@@ -377,5 +651,46 @@ entry:
; LARGE32-LABEL: L..C8:
; LARGE32-NEXT: .tc GInit[TE],GInit[RW]
+; SMALL64-LABEL: .toc
+; SMALL64-LABEL: L..C0:
+; SMALL64-NEXT: .tc .TGUninit[TC],TGUninit[TL]@m
+; SMALL64-LABEL: L..C1:
+; SMALL64-NEXT: .tc TGUninit[TC],TGUninit[TL]
+; SMALL64-LABEL: L..C2:
+; SMALL64-NEXT: .tc .TGInit[TC],TGInit[TL]@m
+; SMALL64-LABEL: L..C3:
+; SMALL64-NEXT: .tc TGInit[TC],TGInit[TL]
+; SMALL64-LABEL: L..C4:
+; SMALL64-NEXT: .tc .TIInit[TC],TIInit[TL]@m
+; SMALL64-LABEL: L..C5:
+; SMALL64-NEXT: .tc TIInit[TC],TIInit[TL]
+; SMALL64-LABEL: L..C6:
+; SMALL64-NEXT: .tc .TWInit[TC],TWInit[TL]@m
+; SMALL64-LABEL: L..C7:
+; SMALL64-NEXT: .tc TWInit[TC],TWInit[TL]
+; SMALL64-LABEL: L..C8:
+; SMALL64-NEXT: .tc GInit[TC],GInit[RW]
+
+; LARGE64-LABEL: .toc
+; LARGE64-LABEL: L..C0:
+; LARGE64-NEXT: .tc .TGUninit[TE],TGUninit[TL]@m
+; LARGE64-LABEL: L..C1:
+; LARGE64-NEXT: .tc TGUninit[TE],TGUninit[TL]
+; LARGE64-LABEL: L..C2:
+; LARGE64-NEXT: .tc .TGInit[TE],TGInit[TL]@m
+; LARGE64-LABEL: L..C3:
+; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]
+; LARGE64-LABEL: L..C4:
+; LARGE64-NEXT: .tc .TIInit[TE],TIInit[TL]@m
+; LARGE64-LABEL: L..C5:
+; LARGE64-NEXT: .tc TIInit[TE],TIInit[TL]
+; LARGE64-LABEL: L..C6:
+; LARGE64-NEXT: .tc .TWInit[TE],TWInit[TL]@m
+; LARGE64-LABEL: L..C7:
+; LARGE64-NEXT: .tc TWInit[TE],TWInit[TL]
+; LARGE64-LABEL: L..C8:
+; LARGE64-NEXT: .tc GInit[TE],GInit[RW]
+
+
attributes #0 = { nofree norecurse nounwind willreturn writeonly "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-rop-protection,-spe,-vsx" }
attributes #1 = { norecurse nounwind readonly willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-rop-protection,-spe,-vsx" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
index 46b317ce34ef..300e9b2463c9 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
@@ -4,6 +4,12 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
; RUN: -mtriple powerpc-ibm-aix-xcoff --code-model=large < %s \
; RUN: | FileCheck %s --check-prefix=LARGE32
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large < %s \
+; RUN: | FileCheck %s --check-prefix=LARGE64
@TGInit = thread_local global i32 1, align 4
@GInit = global i32 1, align 4
@@ -44,6 +50,38 @@ define void @storesTGUninit(i32 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTGUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C0(2)
+; SMALL64-NEXT: ld 4, L..C1(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stw 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTGUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C0 at u(2)
+; LARGE64-NEXT: addis 4, L..C1 at u(2)
+; LARGE64-NEXT: ld 3, L..C0 at l(3)
+; LARGE64-NEXT: ld 4, L..C1 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stw 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i32 %Val, i32* @TGUninit, align 4
ret void
@@ -82,6 +120,38 @@ define void @storesTGInit(i32 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTGInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C2(2)
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stw 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTGInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C2 at u(2)
+; LARGE64-NEXT: addis 4, L..C3 at u(2)
+; LARGE64-NEXT: ld 3, L..C2 at l(3)
+; LARGE64-NEXT: ld 4, L..C3 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stw 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i32 %Val, i32* @TGInit, align 4
ret void
@@ -120,6 +190,38 @@ define void @storesTIUninit(i32 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTIUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C4(2)
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stw 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTIUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C4 at u(2)
+; LARGE64-NEXT: addis 4, L..C5 at u(2)
+; LARGE64-NEXT: ld 3, L..C4 at l(3)
+; LARGE64-NEXT: ld 4, L..C5 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stw 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i32 %Val, i32* @TIUninit, align 4
ret void
@@ -158,6 +260,38 @@ define void @storesTWUninit(i32 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTWUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C6(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: stw 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTWUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C6 at u(2)
+; LARGE64-NEXT: addis 4, L..C7 at u(2)
+; LARGE64-NEXT: ld 3, L..C6 at l(3)
+; LARGE64-NEXT: ld 4, L..C7 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: stw 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i32 %Val, i32* @TWUninit, align 4
ret void
@@ -201,6 +335,43 @@ define i32 @loadsTGUninit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTGUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C0(2)
+; SMALL64-NEXT: ld 4, L..C1(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lwz 3, 0(3)
+; SMALL64-NEXT: lwz 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTGUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C0 at u(2)
+; LARGE64-NEXT: addis 4, L..C1 at u(2)
+; LARGE64-NEXT: ld 3, L..C0 at l(3)
+; LARGE64-NEXT: ld 4, L..C1 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lwz 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: lwz 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i32, i32* @TGUninit, align 4
%1 = load i32, i32* @GInit, align 4
@@ -246,6 +417,43 @@ define i32 @loadsTGInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTGInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C2(2)
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lwz 3, 0(3)
+; SMALL64-NEXT: lwz 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTGInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C2 at u(2)
+; LARGE64-NEXT: addis 4, L..C3 at u(2)
+; LARGE64-NEXT: ld 3, L..C2 at l(3)
+; LARGE64-NEXT: ld 4, L..C3 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lwz 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: lwz 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i32, i32* @TGInit, align 4
%1 = load i32, i32* @GInit, align 4
@@ -291,6 +499,43 @@ define i32 @loadsTIUninit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTIUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C4(2)
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lwz 3, 0(3)
+; SMALL64-NEXT: lwz 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTIUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C4 at u(2)
+; LARGE64-NEXT: addis 4, L..C5 at u(2)
+; LARGE64-NEXT: ld 3, L..C4 at l(3)
+; LARGE64-NEXT: ld 4, L..C5 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lwz 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: lwz 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i32, i32* @TIUninit, align 4
%1 = load i32, i32* @GInit, align 4
@@ -336,6 +581,43 @@ define i32 @loadsTWUninit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTWUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C6(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: lwz 3, 0(3)
+; SMALL64-NEXT: lwz 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTWUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C6 at u(2)
+; LARGE64-NEXT: addis 4, L..C7 at u(2)
+; LARGE64-NEXT: ld 3, L..C6 at l(3)
+; LARGE64-NEXT: ld 4, L..C7 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: lwz 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: lwz 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i32, i32* @TWUninit, align 4
%1 = load i32, i32* @GInit, align 4
@@ -385,5 +667,45 @@ entry:
; LARGE32-LABEL: L..C8:
; LARGE32-NEXT: .tc GInit[TE],GInit[RW]
+; SMALL64-LABEL: .toc
+; SMALL64-LABEL: L..C0:
+; SMALL64-NEXT: .tc .TGUninit[TC],TGUninit[TL]@m
+; SMALL64-LABEL: L..C1:
+; SMALL64-NEXT: .tc TGUninit[TC],TGUninit[TL]
+; SMALL64-LABEL: L..C2:
+; SMALL64-NEXT: .tc .TGInit[TC],TGInit[TL]@m
+; SMALL64-LABEL: L..C3:
+; SMALL64-NEXT: .tc TGInit[TC],TGInit[TL]
+; SMALL64-LABEL: L..C4:
+; SMALL64-NEXT: .tc .TIUninit[TC],TIUninit[UL]@m
+; SMALL64-LABEL: L..C5:
+; SMALL64-NEXT: .tc TIUninit[TC],TIUninit[UL]
+; SMALL64-LABEL: L..C6:
+; SMALL64-NEXT: .tc .TWUninit[TC],TWUninit[TL]@m
+; SMALL64-LABEL: L..C7:
+; SMALL64-NEXT: .tc TWUninit[TC],TWUninit[TL]
+; SMALL64-LABEL: L..C8:
+; SMALL64-NEXT: .tc GInit[TC],GInit[RW]
+
+; LARGE64-LABEL: .toc
+; LARGE64-LABEL: L..C0:
+; LARGE64-NEXT: .tc .TGUninit[TE],TGUninit[TL]@m
+; LARGE64-LABEL: L..C1:
+; LARGE64-NEXT: .tc TGUninit[TE],TGUninit[TL]
+; LARGE64-LABEL: L..C2:
+; LARGE64-NEXT: .tc .TGInit[TE],TGInit[TL]@m
+; LARGE64-LABEL: L..C3:
+; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]
+; LARGE64-LABEL: L..C4:
+; LARGE64-NEXT: .tc .TIUninit[TE],TIUninit[UL]@m
+; LARGE64-LABEL: L..C5:
+; LARGE64-NEXT: .tc TIUninit[TE],TIUninit[UL]
+; LARGE64-LABEL: L..C6:
+; LARGE64-NEXT: .tc .TWUninit[TE],TWUninit[TL]@m
+; LARGE64-LABEL: L..C7:
+; LARGE64-NEXT: .tc TWUninit[TE],TWUninit[TL]
+; LARGE64-LABEL: L..C8:
+; LARGE64-NEXT: .tc GInit[TE],GInit[RW]
+
attributes #0 = { nofree norecurse nounwind willreturn writeonly "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-rop-protection,-spe,-vsx" }
attributes #1 = { norecurse nounwind readonly willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-rop-protection,-spe,-vsx" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
index df333329d5a5..0fdc51216b0c 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
@@ -4,6 +4,12 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
; RUN: -mtriple powerpc-ibm-aix-xcoff --code-model=large < %s \
; RUN: | FileCheck %s --check-prefix=LARGE32
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large < %s \
+; RUN: | FileCheck %s --check-prefix=LARGE64
@TGInit = thread_local global i64 1, align 8
@TWInit = weak thread_local global i64 1, align 8
@@ -48,6 +54,38 @@ define void @storesTGInit(i64 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTGInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C0(2)
+; SMALL64-NEXT: ld 4, L..C1(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: std 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTGInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C0 at u(2)
+; LARGE64-NEXT: addis 4, L..C1 at u(2)
+; LARGE64-NEXT: ld 3, L..C0 at l(3)
+; LARGE64-NEXT: ld 4, L..C1 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: std 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i64 %Val, i64* @TGInit, align 8
ret void
@@ -90,6 +128,38 @@ define void @storesTIUninit(i64 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTIUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C2(2)
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: std 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTIUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C2 at u(2)
+; LARGE64-NEXT: addis 4, L..C3 at u(2)
+; LARGE64-NEXT: ld 3, L..C2 at l(3)
+; LARGE64-NEXT: ld 4, L..C3 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: std 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i64 %Val, i64* @TIUninit, align 8
ret void
@@ -132,6 +202,38 @@ define void @storesTIInit(i64 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTIInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C4(2)
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: std 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTIInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C4 at u(2)
+; LARGE64-NEXT: addis 4, L..C5 at u(2)
+; LARGE64-NEXT: ld 3, L..C4 at l(3)
+; LARGE64-NEXT: ld 4, L..C5 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: std 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i64 %Val, i64* @TIInit, align 8
ret void
@@ -174,6 +276,38 @@ define void @storesTWInit(i64 %Val) #0 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: storesTWInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: mr 6, 3
+; SMALL64-NEXT: ld 3, L..C6(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: std 6, 0(3)
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: storesTWInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: mr 6, 3
+; LARGE64-NEXT: addis 3, L..C6 at u(2)
+; LARGE64-NEXT: addis 4, L..C7 at u(2)
+; LARGE64-NEXT: ld 3, L..C6 at l(3)
+; LARGE64-NEXT: ld 4, L..C7 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: std 6, 0(3)
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
store i64 %Val, i64* @TWInit, align 8
ret void
@@ -223,6 +357,43 @@ define i64 @loadsTGInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTGInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C0(2)
+; SMALL64-NEXT: ld 4, L..C1(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: ld 3, 0(3)
+; SMALL64-NEXT: ld 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTGInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C0 at u(2)
+; LARGE64-NEXT: addis 4, L..C1 at u(2)
+; LARGE64-NEXT: ld 3, L..C0 at l(3)
+; LARGE64-NEXT: ld 4, L..C1 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: ld 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: ld 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i64, i64* @TGInit, align 8
%1 = load i64, i64* @GInit, align 8
@@ -274,6 +445,43 @@ define i64 @loadsTIUninit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTIUninit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C2(2)
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: ld 3, 0(3)
+; SMALL64-NEXT: ld 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTIUninit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C2 at u(2)
+; LARGE64-NEXT: addis 4, L..C3 at u(2)
+; LARGE64-NEXT: ld 3, L..C2 at l(3)
+; LARGE64-NEXT: ld 4, L..C3 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: ld 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: ld 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i64, i64* @TIUninit, align 8
%1 = load i64, i64* @GInit, align 8
@@ -325,6 +533,43 @@ define i64 @loadsTIInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTIInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C4(2)
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: ld 3, 0(3)
+; SMALL64-NEXT: ld 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTIInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C4 at u(2)
+; LARGE64-NEXT: addis 4, L..C5 at u(2)
+; LARGE64-NEXT: ld 3, L..C4 at l(3)
+; LARGE64-NEXT: ld 4, L..C5 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: ld 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: ld 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i64, i64* @TIInit, align 8
%1 = load i64, i64* @GInit, align 8
@@ -376,6 +621,43 @@ define i64 @loadsTWInit() #1 {
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
; LARGE32-NEXT: blr
+;
+; SMALL64-LABEL: loadsTWInit:
+; SMALL64: # %bb.0: # %entry
+; SMALL64-NEXT: mflr 0
+; SMALL64-NEXT: std 0, 16(1)
+; SMALL64-NEXT: stdu 1, -48(1)
+; SMALL64-NEXT: ld 3, L..C6(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: bla .__tls_get_addr
+; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: ld 3, 0(3)
+; SMALL64-NEXT: ld 4, 0(4)
+; SMALL64-NEXT: add 3, 4, 3
+; SMALL64-NEXT: addi 1, 1, 48
+; SMALL64-NEXT: ld 0, 16(1)
+; SMALL64-NEXT: mtlr 0
+; SMALL64-NEXT: blr
+;
+; LARGE64-LABEL: loadsTWInit:
+; LARGE64: # %bb.0: # %entry
+; LARGE64-NEXT: mflr 0
+; LARGE64-NEXT: std 0, 16(1)
+; LARGE64-NEXT: stdu 1, -48(1)
+; LARGE64-NEXT: addis 3, L..C6 at u(2)
+; LARGE64-NEXT: addis 4, L..C7 at u(2)
+; LARGE64-NEXT: ld 3, L..C6 at l(3)
+; LARGE64-NEXT: ld 4, L..C7 at l(4)
+; LARGE64-NEXT: bla .__tls_get_addr
+; LARGE64-NEXT: addis 4, L..C8 at u(2)
+; LARGE64-NEXT: ld 3, 0(3)
+; LARGE64-NEXT: ld 4, L..C8 at l(4)
+; LARGE64-NEXT: ld 4, 0(4)
+; LARGE64-NEXT: add 3, 4, 3
+; LARGE64-NEXT: addi 1, 1, 48
+; LARGE64-NEXT: ld 0, 16(1)
+; LARGE64-NEXT: mtlr 0
+; LARGE64-NEXT: blr
entry:
%0 = load i64, i64* @TWInit, align 8
%1 = load i64, i64* @GInit, align 8
@@ -424,5 +706,46 @@ entry:
; LARGE32-NEXT: .tc TWInit[TE],TWInit[TL]
; LARGE32-LABEL: L..C8:
; LARGE32-NEXT: .tc GInit[TE],GInit[RW]
+
+; SMALL64-LABEL: .toc
+; SMALL64-LABEL: L..C0:
+; SMALL64-NEXT: .tc .TGInit[TC],TGInit[TL]@m
+; SMALL64-LABEL: L..C1:
+; SMALL64-NEXT: .tc TGInit[TC],TGInit[TL]
+; SMALL64-LABEL: L..C2:
+; SMALL64-NEXT: .tc .TIUninit[TC],TIUninit[UL]@m
+; SMALL64-LABEL: L..C3:
+; SMALL64-NEXT: .tc TIUninit[TC],TIUninit[UL]
+; SMALL64-LABEL: L..C4:
+; SMALL64-NEXT: .tc .TIInit[TC],TIInit[TL]@m
+; SMALL64-LABEL: L..C5:
+; SMALL64-NEXT: .tc TIInit[TC],TIInit[TL]
+; SMALL64-LABEL: L..C6:
+; SMALL64-NEXT: .tc .TWInit[TC],TWInit[TL]@m
+; SMALL64-LABEL: L..C7:
+; SMALL64-NEXT: .tc TWInit[TC],TWInit[TL]
+; SMALL64-LABEL: L..C8:
+; SMALL64-NEXT: .tc GInit[TC],GInit[RW]
+
+; LARGE64-LABEL: .toc
+; LARGE64-LABEL: L..C0:
+; LARGE64-NEXT: .tc .TGInit[TE],TGInit[TL]@m
+; LARGE64-LABEL: L..C1:
+; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]
+; LARGE64-LABEL: L..C2:
+; LARGE64-NEXT: .tc .TIUninit[TE],TIUninit[UL]@m
+; LARGE64-LABEL: L..C3:
+; LARGE64-NEXT: .tc TIUninit[TE],TIUninit[UL]
+; LARGE64-LABEL: L..C4:
+; LARGE64-NEXT: .tc .TIInit[TE],TIInit[TL]@m
+; LARGE64-LABEL: L..C5:
+; LARGE64-NEXT: .tc TIInit[TE],TIInit[TL]
+; LARGE64-LABEL: L..C6:
+; LARGE64-NEXT: .tc .TWInit[TE],TWInit[TL]@m
+; LARGE64-LABEL: L..C7:
+; LARGE64-NEXT: .tc TWInit[TE],TWInit[TL]
+; LARGE64-LABEL: L..C8:
+; LARGE64-NEXT: .tc GInit[TE],GInit[RW]
+
attributes #0 = { nofree norecurse nounwind willreturn writeonly "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-rop-protection,-spe,-vsx" }
attributes #1 = { norecurse nounwind readonly willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-rop-protection,-spe,-vsx" }
More information about the llvm-commits
mailing list