[llvm] [PowerPC] Utilize getReservedRegs to find asm clobberable registers. (PR #107863)
zhijian lin via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 31 10:22:14 PDT 2024
https://github.com/diggerlin updated https://github.com/llvm/llvm-project/pull/107863
>From 153d1d9427ceffa6aba5a81fa0f6207149f03560 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Fri, 13 Sep 2024 14:42:29 -0400
Subject: [PATCH 1/3] Implement isAsmClobberable() and make r2 allocatable on
AIX64 for some leaf functions.
---
llvm/lib/Target/PowerPC/PPCFrameLowering.cpp | 8 ++
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 +
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp | 29 ++--
llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll | 14 +-
llvm/test/CodeGen/PowerPC/aix-cc-abi.ll | 8 +-
.../PowerPC/aix-inline-asm-clobber-warning.ll | 13 ++
llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir | 5 +-
llvm/test/CodeGen/PowerPC/inc-of-add.ll | 126 +++++++++---------
.../PowerPC/inline-asm-clobber-warning.ll | 25 +++-
llvm/test/CodeGen/PowerPC/ldst-16-byte.mir | 70 +++++-----
llvm/test/CodeGen/PowerPC/mflr-store.mir | 4 +-
...ole-replaceInstr-after-eliminate-extsw.mir | 11 +-
.../PowerPC/tocdata-non-zero-addend.mir | 2 +
13 files changed, 180 insertions(+), 137 deletions(-)
create mode 100644 llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index a57ed33bda9c77..2af557db301e5c 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -2772,6 +2772,14 @@ void PPCFrameLowering::updateCalleeSaves(const MachineFunction &MF,
MCPhysReg Cand = CSRegs[i];
if (!SavedRegs.test(Cand))
continue;
+ // When R2/X2 is a CSR and not used for passing arguments, it is allocated
+ // earlier than other volatile registers. R2/X2 is not contiguous with
+ // R13/X13 to R31/X31.
+ if (Cand == PPC::X2 || Cand == PPC::R2) {
+ SavedRegs.set(Cand);
+ continue;
+ }
+
if (PPC::GPRCRegClass.contains(Cand) && Cand < LowestGPR)
LowestGPR = Cand;
else if (PPC::G8RCRegClass.contains(Cand) && Cand < LowestG8R)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 459a96eca1ff20..4ee9f3301e3bc1 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3434,6 +3434,8 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
if (Subtarget.hasAIXShLibTLSModelOpt())
updateForAIXShLibTLSModelOpt(Model, DAG, getTargetMachine());
+ setUsesTOCBasePtr(DAG);
+
bool IsTLSLocalExecModel = Model == TLSModel::LocalExec;
if (IsTLSLocalExecModel || Model == TLSModel::InitialExec) {
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 9e8da59615dfb3..e2b415cf728a2f 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -380,6 +380,8 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
markSuperRegs(Reserved, PPC::VRSAVE);
+ const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ bool UsesTOCBasePtr = FuncInfo->usesTOCBasePtr();
// The SVR4 ABI reserves r2 and r13
if (Subtarget.isSVR4ABI()) {
// We only reserve r2 if we need to use the TOC pointer. If we have no
@@ -387,16 +389,15 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// no constant-pool loads, etc.) and we have no potential uses inside an
// inline asm block, then we can treat r2 has an ordinary callee-saved
// register.
- const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
- markSuperRegs(Reserved, PPC::R2); // System-reserved register
- markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register
+ if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm())
+ markSuperRegs(Reserved, PPC::R2); // System-reserved register.
+ markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register.
}
- // Always reserve r2 on AIX for now.
- // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions.
if (Subtarget.isAIXABI())
- markSuperRegs(Reserved, PPC::R2); // System-reserved register
+ // We only reserve r2 if we need to use the TOC pointer on AIX.
+ if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm())
+ markSuperRegs(Reserved, PPC::R2); // System-reserved register.
// On PPC64, r13 is the thread pointer. Never allocate this register.
if (TM.isPPC64())
@@ -441,14 +442,12 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
bool PPCRegisterInfo::isAsmClobberable(const MachineFunction &MF,
MCRegister PhysReg) const {
- // We cannot use getReservedRegs() to find the registers that are not asm
- // clobberable because there are some reserved registers which can be
- // clobbered by inline asm. For example, when LR is clobbered, the register is
- // saved and restored. We will hardcode the registers that are not asm
- // cloberable in this function.
-
- // The stack pointer (R1/X1) is not clobberable by inline asm
- return PhysReg != PPC::R1 && PhysReg != PPC::X1;
+ // CTR and LR registers are always reserved, but they are asm clobberable.
+ if (PhysReg == PPC::CTR || PhysReg == PPC::CTR8 || PhysReg == PPC::LR ||
+ PhysReg == PPC::LR8)
+ return true;
+
+ return !getReservedRegs(MF).test(PhysReg);
}
bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
index ccc36530c7957b..03478150ed3152 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
@@ -1146,11 +1146,11 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; 64BIT-NEXT: renamable $r11 = LWZ 0, %fixed-stack.1, implicit-def $x11 :: (load (s32) from %fixed-stack.1)
; 64BIT-NEXT: renamable $x12 = LWZ8 0, %fixed-stack.4 :: (load (s32) from %fixed-stack.4)
; 64BIT-NEXT: renamable $x0 = LWA 0, %fixed-stack.0 :: (load (s32) from %fixed-stack.0)
- ; 64BIT-NEXT: renamable $x31 = LD 0, %fixed-stack.2 :: (load (s64) from %fixed-stack.2)
- ; 64BIT-NEXT: renamable $x30 = LWA 0, %fixed-stack.3 :: (load (s32) from %fixed-stack.3)
- ; 64BIT-NEXT: renamable $r29 = LWZ 0, %fixed-stack.5, implicit-def $x29 :: (load (s32) from %fixed-stack.5)
- ; 64BIT-NEXT: renamable $x28 = LWA 0, %fixed-stack.6 :: (load (s32) from %fixed-stack.6)
- ; 64BIT-NEXT: renamable $x27 = LD 0, %fixed-stack.7 :: (load (s64) from %fixed-stack.7, align 16)
+ ; 64BIT-NEXT: renamable $x2 = LD 0, %fixed-stack.2 :: (load (s64) from %fixed-stack.2)
+ ; 64BIT-NEXT: renamable $x31 = LWA 0, %fixed-stack.3 :: (load (s32) from %fixed-stack.3)
+ ; 64BIT-NEXT: renamable $r30 = LWZ 0, %fixed-stack.5, implicit-def $x30 :: (load (s32) from %fixed-stack.5)
+ ; 64BIT-NEXT: renamable $x29 = LWA 0, %fixed-stack.6 :: (load (s32) from %fixed-stack.6)
+ ; 64BIT-NEXT: renamable $x28 = LD 0, %fixed-stack.7 :: (load (s64) from %fixed-stack.7, align 16)
; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6
@@ -1159,12 +1159,12 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r9, implicit killed $x9
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r10, implicit killed $x10
; 64BIT-NEXT: renamable $x3 = EXTSW_32_64 killed renamable $r3
- ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x27
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x28
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x29
- ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x12
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x30
+ ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x12
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x31
+ ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x2
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x11
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x0
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
index 78d60f06c06786..433d4273444660 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
@@ -1240,11 +1240,11 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; ASM64PWR4-NEXT: lwz 5, 132(1)
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: add 3, 3, 12
-; ASM64PWR4-NEXT: std 31, -8(1) # 8-byte Folded Spill
+; ASM64PWR4-NEXT: std 2, -8(1) # 8-byte Folded Spill
; ASM64PWR4-NEXT: add 3, 3, 5
-; ASM64PWR4-NEXT: lwz 31, 140(1)
+; ASM64PWR4-NEXT: lwz 2, 140(1)
; ASM64PWR4-NEXT: lwa 11, 148(1)
-; ASM64PWR4-NEXT: add 3, 3, 31
+; ASM64PWR4-NEXT: add 3, 3, 2
; ASM64PWR4-NEXT: add 3, 3, 11
; ASM64PWR4-NEXT: ld 4, 152(1)
; ASM64PWR4-NEXT: lwz 0, 164(1)
@@ -1252,7 +1252,7 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; ASM64PWR4-NEXT: lwa 5, 172(1)
; ASM64PWR4-NEXT: add 3, 3, 0
; ASM64PWR4-NEXT: add 3, 3, 5
-; ASM64PWR4-NEXT: ld 31, -8(1) # 8-byte Folded Reload
+; ASM64PWR4-NEXT: ld 2, -8(1) # 8-byte Folded Reload
; ASM64PWR4-NEXT: blr
entry:
%add = add nsw i32 %i1, %i2
diff --git a/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll b/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
new file mode 100644
index 00000000000000..0eb8cb207fd348
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=powerpc-unknown-aix-xcoff -verify-machineinstrs \
+; RUN: -mcpu=pwr7 -mattr=+altivec -O0 2>&1 | FileCheck %s
+
+; CHECK: warning: inline asm clobber list contains reserved registers: R2
+; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
+
+ at a = external global i32, align 4
+
+define void @bar() {
+ store i32 0, ptr @a, align 4
+ call void asm sideeffect "li 2, 1", "~{r2}"()
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir b/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
index 7d96f7feabe2be..407018c3e847e9 100644
--- a/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
+++ b/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
@@ -17,6 +17,5 @@ body: |
BLR8 implicit $lr8, implicit undef $rm, implicit $x3, implicit $f1
...
# CHECK-DAG: AllocationOrder(VFRC) = [ $vf2 $vf3 $vf4 $vf5 $vf0 $vf1 $vf6 $vf7 $vf8 $vf9 $vf10 $vf11 $vf12 $vf13 $vf14 $vf15 $vf16 $vf17 $vf18 $vf19 $vf31 $vf30 $vf29 $vf28 $vf27 $vf26 $vf25 $vf24 $vf23 $vf22 $vf21 $vf20 ]
-# CHECK-DAG: AllocationOrder(G8RC_and_G8RC_NOX0) = [ $x3 $x4 $x5 $x6 $x7 $x8 $x9 $x10 $x11 $x12 $x31 $x30 $x29 $x28 $x27 $x26 $x25 $x24 $x23 $x22 $x21 $x20 $x19 $x18 $x17 $x16 $x15 $x1
-# CHECK-DAG: 4 ]
-# CHECK-DAG: AllocationOrder(F8RC) = [ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f31 $f30 $f29 $f28 $f27 $f26 $f25 $f24 $f23 $f22 $f21 $f20 $f19 $f18 $f17 $f16 $f15 $f14 ]
\ No newline at end of file
+# CHECK-DAG: AllocationOrder(G8RC_and_G8RC_NOX0) = [ $x3 $x4 $x5 $x6 $x7 $x8 $x9 $x10 $x11 $x12 $x2 $x31 $x30 $x29 $x28 $x27 $x26 $x25 $x24 $x23 $x22 $x21 $x20 $x19 $x18 $x17 $x16 $x15 $x14 ]
+# CHECK-DAG: AllocationOrder(F8RC) = [ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f31 $f30 $f29 $f28 $f27 $f26 $f25 $f24 $f23 $f22 $f21 $f20 $f19 $f18 $f17 $f16 $f15 $f14 ]
diff --git a/llvm/test/CodeGen/PowerPC/inc-of-add.ll b/llvm/test/CodeGen/PowerPC/inc-of-add.ll
index c6d6f6a17b1b50..7bd748377f589c 100644
--- a/llvm/test/CodeGen/PowerPC/inc-of-add.ll
+++ b/llvm/test/CodeGen/PowerPC/inc-of-add.ll
@@ -166,81 +166,81 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
;
; AIX-PPC64-LABEL: vector_i128_i8:
; AIX-PPC64: # %bb.0:
-; AIX-PPC64-NEXT: std 22, -80(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: lbz 22, 207(1)
; AIX-PPC64-NEXT: std 23, -72(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: lbz 23, 207(1)
; AIX-PPC64-NEXT: std 24, -64(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: std 26, -48(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 25, -56(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 27, -40(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 26, -48(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 30, -16(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 29, -24(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 28, -32(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: std 27, -40(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 2, -80(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 31, -8(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: std 30, -16(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: lbz 23, 199(1)
-; AIX-PPC64-NEXT: lbz 24, 191(1)
-; AIX-PPC64-NEXT: add 6, 22, 6
-; AIX-PPC64-NEXT: lbz 22, 231(1)
-; AIX-PPC64-NEXT: add 5, 23, 5
-; AIX-PPC64-NEXT: lbz 23, 223(1)
-; AIX-PPC64-NEXT: add 4, 24, 4
-; AIX-PPC64-NEXT: lbz 24, 215(1)
-; AIX-PPC64-NEXT: add 9, 22, 9
-; AIX-PPC64-NEXT: lbz 26, 127(1)
-; AIX-PPC64-NEXT: add 8, 23, 8
-; AIX-PPC64-NEXT: lbz 22, 255(1)
-; AIX-PPC64-NEXT: add 7, 24, 7
-; AIX-PPC64-NEXT: lbz 25, 119(1)
+; AIX-PPC64-NEXT: lbz 24, 199(1)
+; AIX-PPC64-NEXT: lbz 25, 191(1)
+; AIX-PPC64-NEXT: add 6, 23, 6
+; AIX-PPC64-NEXT: lbz 23, 231(1)
+; AIX-PPC64-NEXT: add 5, 24, 5
+; AIX-PPC64-NEXT: lbz 24, 223(1)
+; AIX-PPC64-NEXT: add 4, 25, 4
+; AIX-PPC64-NEXT: lbz 25, 215(1)
+; AIX-PPC64-NEXT: add 9, 23, 9
+; AIX-PPC64-NEXT: lbz 27, 127(1)
+; AIX-PPC64-NEXT: add 8, 24, 8
+; AIX-PPC64-NEXT: lbz 23, 255(1)
+; AIX-PPC64-NEXT: add 7, 25, 7
+; AIX-PPC64-NEXT: lbz 26, 119(1)
; AIX-PPC64-NEXT: addi 9, 9, 1
-; AIX-PPC64-NEXT: lbz 23, 247(1)
-; AIX-PPC64-NEXT: add 26, 22, 26
-; AIX-PPC64-NEXT: lbz 24, 239(1)
+; AIX-PPC64-NEXT: lbz 24, 247(1)
+; AIX-PPC64-NEXT: add 27, 23, 27
+; AIX-PPC64-NEXT: lbz 25, 239(1)
; AIX-PPC64-NEXT: addi 8, 8, 1
-; AIX-PPC64-NEXT: lbz 29, 151(1)
-; AIX-PPC64-NEXT: add 25, 23, 25
-; AIX-PPC64-NEXT: lbz 22, 279(1)
-; AIX-PPC64-NEXT: add 10, 24, 10
-; AIX-PPC64-NEXT: lbz 28, 143(1)
+; AIX-PPC64-NEXT: lbz 30, 151(1)
+; AIX-PPC64-NEXT: add 26, 24, 26
+; AIX-PPC64-NEXT: lbz 23, 279(1)
+; AIX-PPC64-NEXT: add 10, 25, 10
+; AIX-PPC64-NEXT: lbz 29, 143(1)
; AIX-PPC64-NEXT: addi 10, 10, 1
-; AIX-PPC64-NEXT: lbz 23, 271(1)
-; AIX-PPC64-NEXT: add 29, 22, 29
-; AIX-PPC64-NEXT: lbz 27, 135(1)
+; AIX-PPC64-NEXT: lbz 24, 271(1)
+; AIX-PPC64-NEXT: add 30, 23, 30
+; AIX-PPC64-NEXT: lbz 28, 135(1)
; AIX-PPC64-NEXT: addi 7, 7, 1
-; AIX-PPC64-NEXT: lbz 24, 263(1)
-; AIX-PPC64-NEXT: add 28, 23, 28
+; AIX-PPC64-NEXT: lbz 25, 263(1)
+; AIX-PPC64-NEXT: add 29, 24, 29
; AIX-PPC64-NEXT: lbz 11, 183(1)
; AIX-PPC64-NEXT: addi 6, 6, 1
-; AIX-PPC64-NEXT: lbz 22, 311(1)
-; AIX-PPC64-NEXT: add 27, 24, 27
+; AIX-PPC64-NEXT: lbz 23, 311(1)
+; AIX-PPC64-NEXT: add 28, 25, 28
; AIX-PPC64-NEXT: lbz 12, 175(1)
; AIX-PPC64-NEXT: addi 5, 5, 1
; AIX-PPC64-NEXT: lbz 0, 303(1)
-; AIX-PPC64-NEXT: add 11, 22, 11
-; AIX-PPC64-NEXT: lbz 31, 167(1)
+; AIX-PPC64-NEXT: add 11, 23, 11
+; AIX-PPC64-NEXT: lbz 2, 167(1)
; AIX-PPC64-NEXT: addi 11, 11, 1
-; AIX-PPC64-NEXT: lbz 23, 295(1)
+; AIX-PPC64-NEXT: lbz 24, 295(1)
; AIX-PPC64-NEXT: add 12, 0, 12
-; AIX-PPC64-NEXT: lbz 30, 159(1)
+; AIX-PPC64-NEXT: lbz 31, 159(1)
; AIX-PPC64-NEXT: addi 4, 4, 1
-; AIX-PPC64-NEXT: lbz 24, 287(1)
-; AIX-PPC64-NEXT: add 31, 23, 31
+; AIX-PPC64-NEXT: lbz 25, 287(1)
+; AIX-PPC64-NEXT: add 2, 24, 2
; AIX-PPC64-NEXT: stb 11, 15(3)
; AIX-PPC64-NEXT: addi 11, 12, 1
-; AIX-PPC64-NEXT: add 30, 24, 30
+; AIX-PPC64-NEXT: add 31, 25, 31
; AIX-PPC64-NEXT: stb 11, 14(3)
-; AIX-PPC64-NEXT: addi 11, 31, 1
+; AIX-PPC64-NEXT: addi 11, 2, 1
; AIX-PPC64-NEXT: stb 11, 13(3)
-; AIX-PPC64-NEXT: addi 11, 30, 1
+; AIX-PPC64-NEXT: addi 11, 31, 1
; AIX-PPC64-NEXT: stb 11, 12(3)
-; AIX-PPC64-NEXT: addi 11, 29, 1
+; AIX-PPC64-NEXT: addi 11, 30, 1
; AIX-PPC64-NEXT: stb 11, 11(3)
-; AIX-PPC64-NEXT: addi 11, 28, 1
+; AIX-PPC64-NEXT: addi 11, 29, 1
; AIX-PPC64-NEXT: stb 11, 10(3)
-; AIX-PPC64-NEXT: addi 11, 27, 1
+; AIX-PPC64-NEXT: addi 11, 28, 1
; AIX-PPC64-NEXT: stb 11, 9(3)
-; AIX-PPC64-NEXT: addi 11, 26, 1
+; AIX-PPC64-NEXT: addi 11, 27, 1
; AIX-PPC64-NEXT: stb 11, 8(3)
-; AIX-PPC64-NEXT: addi 11, 25, 1
+; AIX-PPC64-NEXT: addi 11, 26, 1
; AIX-PPC64-NEXT: stb 11, 7(3)
; AIX-PPC64-NEXT: stb 10, 6(3)
; AIX-PPC64-NEXT: stb 9, 5(3)
@@ -249,6 +249,7 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; AIX-PPC64-NEXT: stb 6, 2(3)
; AIX-PPC64-NEXT: stb 5, 1(3)
; AIX-PPC64-NEXT: stb 4, 0(3)
+; AIX-PPC64-NEXT: ld 2, -80(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 31, -8(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 30, -16(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 29, -24(1) # 8-byte Folded Reload
@@ -258,7 +259,6 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; AIX-PPC64-NEXT: ld 25, -56(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 24, -64(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 23, -72(1) # 8-byte Folded Reload
-; AIX-PPC64-NEXT: ld 22, -80(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: blr
;
; PPC64LE-LABEL: vector_i128_i8:
@@ -314,30 +314,30 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {
;
; AIX-PPC64-LABEL: vector_i128_i16:
; AIX-PPC64: # %bb.0:
-; AIX-PPC64-NEXT: std 26, -48(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 27, -40(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 28, -32(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 29, -24(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 30, -16(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 31, -8(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 2, -48(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: lhz 11, 118(1)
; AIX-PPC64-NEXT: lhz 12, 182(1)
; AIX-PPC64-NEXT: lhz 0, 174(1)
-; AIX-PPC64-NEXT: lhz 31, 166(1)
+; AIX-PPC64-NEXT: lhz 2, 166(1)
; AIX-PPC64-NEXT: add 11, 12, 11
-; AIX-PPC64-NEXT: lhz 30, 158(1)
+; AIX-PPC64-NEXT: lhz 31, 158(1)
; AIX-PPC64-NEXT: add 10, 0, 10
-; AIX-PPC64-NEXT: lhz 29, 142(1)
-; AIX-PPC64-NEXT: add 9, 31, 9
-; AIX-PPC64-NEXT: lhz 28, 126(1)
-; AIX-PPC64-NEXT: add 8, 30, 8
-; AIX-PPC64-NEXT: lhz 27, 134(1)
-; AIX-PPC64-NEXT: add 6, 29, 6
-; AIX-PPC64-NEXT: lhz 26, 150(1)
-; AIX-PPC64-NEXT: add 4, 28, 4
-; AIX-PPC64-NEXT: add 5, 27, 5
+; AIX-PPC64-NEXT: lhz 30, 142(1)
+; AIX-PPC64-NEXT: add 9, 2, 9
+; AIX-PPC64-NEXT: lhz 29, 126(1)
+; AIX-PPC64-NEXT: add 8, 31, 8
+; AIX-PPC64-NEXT: lhz 28, 134(1)
+; AIX-PPC64-NEXT: add 6, 30, 6
+; AIX-PPC64-NEXT: lhz 27, 150(1)
+; AIX-PPC64-NEXT: add 4, 29, 4
+; AIX-PPC64-NEXT: add 5, 28, 5
; AIX-PPC64-NEXT: addi 11, 11, 1
-; AIX-PPC64-NEXT: add 7, 26, 7
+; AIX-PPC64-NEXT: add 7, 27, 7
; AIX-PPC64-NEXT: addi 10, 10, 1
; AIX-PPC64-NEXT: addi 9, 9, 1
; AIX-PPC64-NEXT: addi 8, 8, 1
@@ -353,12 +353,12 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; AIX-PPC64-NEXT: sth 6, 4(3)
; AIX-PPC64-NEXT: sth 5, 2(3)
; AIX-PPC64-NEXT: sth 4, 0(3)
+; AIX-PPC64-NEXT: ld 2, -48(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 31, -8(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 30, -16(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 29, -24(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 28, -32(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 27, -40(1) # 8-byte Folded Reload
-; AIX-PPC64-NEXT: ld 26, -48(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: blr
;
; PPC64LE-LABEL: vector_i128_i16:
diff --git a/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll b/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
index 7f13f5072d97f1..ec91566e63864a 100644
--- a/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
+++ b/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc-unknown-unkown \
-; RUN: -mcpu=pwr7 2>&1 | FileCheck %s
+; RUN: -mcpu=pwr7 -O0 2>&1 | FileCheck %s
; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc64-unknown-unkown \
-; RUN: -mcpu=pwr7 2>&1 | FileCheck %s
+; RUN: -mcpu=pwr7 -O0 2>&1 | FileCheck %s
define void @test_r1_clobber() {
entry:
@@ -20,3 +20,24 @@ entry:
; CHECK: warning: inline asm clobber list contains reserved registers: X1
; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
+
+; CHECK: warning: inline asm clobber list contains reserved registers: R31
+; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
+
+ at a = dso_local global i32 100, align 4
+define dso_local signext i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %old = alloca i64, align 8
+ store i32 0, ptr %retval, align 4
+ call void asm sideeffect "li 31, 1", "~{r31}"()
+ call void asm sideeffect "li 30, 1", "~{r30}"()
+ %0 = call i64 asm sideeffect "mr $0, 31", "=r"()
+ store i64 %0, ptr %old, align 8
+ %1 = load i32, ptr @a, align 4
+ %conv = sext i32 %1 to i64
+ %2 = alloca i8, i64 %conv, align 16
+ %3 = load i64, ptr %old, align 8
+ %conv1 = trunc i64 %3 to i32
+ ret i32 %conv1
+}
diff --git a/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir b/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
index b9c541feae5acf..08f242ecbac2ff 100644
--- a/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
+++ b/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
@@ -8,18 +8,18 @@ alignment: 8
tracksRegLiveness: true
body: |
bb.0.entry:
- liveins: $x3, $x4
+ liveins: $x5, $x4
; CHECK-LABEL: name: foo
- ; CHECK: liveins: $x3, $x4
+ ; CHECK: liveins: $x4, $x5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber renamable $g8p3 = LQ 128, $x4
- ; CHECK-NEXT: $x3 = OR8 $x7, $x7
- ; CHECK-NEXT: STQ killed renamable $g8p3, 160, $x3
- ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit $x3
+ ; CHECK-NEXT: $x5 = OR8 $x7, $x7
+ ; CHECK-NEXT: STQ killed renamable $g8p3, 160, $x5
+ ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit $x5
%0:g8prc = LQ 128, $x4
- $x3 = COPY %0.sub_gp8_x1:g8prc
- STQ %0, 160, $x3
- BLR8 implicit $lr8, implicit undef $rm, implicit $x3
+ $x5 = COPY %0.sub_gp8_x1:g8prc
+ STQ %0, 160, $x5
+ BLR8 implicit $lr8, implicit undef $rm, implicit $x5
...
---
@@ -73,7 +73,7 @@ body: |
bb.0.entry:
liveins: $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12
; CHECK-LABEL: name: spill_g8prc
- ; CHECK: liveins: $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
+ ; CHECK: liveins: $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: STD killed $x14, -144, $x1 :: (store (s64) into %fixed-stack.17, align 16)
; CHECK-NEXT: STD killed $x15, -136, $x1 :: (store (s64) into %fixed-stack.16)
@@ -93,44 +93,43 @@ body: |
; CHECK-NEXT: STD killed $x29, -24, $x1 :: (store (s64) into %fixed-stack.2)
; CHECK-NEXT: STD killed $x30, -16, $x1 :: (store (s64) into %fixed-stack.1, align 16)
; CHECK-NEXT: STD killed $x31, -8, $x1 :: (store (s64) into %fixed-stack.0)
+ ; CHECK-NEXT: STD killed $x2, -152, $x1 :: (store (s64) into %stack.4)
; CHECK-NEXT: $x7 = OR8 $x3, $x3
; CHECK-NEXT: renamable $g8p4 = LQARX $x5, $x6
- ; CHECK-NEXT: STD killed $x8, -160, $x1
- ; CHECK-NEXT: STD killed $x9, -152, $x1
- ; CHECK-NEXT: renamable $g8p13 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
; CHECK-NEXT: STD killed $x8, -176, $x1
; CHECK-NEXT: STD killed $x9, -168, $x1
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p1 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STD killed $x8, -192, $x1
; CHECK-NEXT: STD killed $x9, -184, $x1
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STD killed $x8, -208, $x1
; CHECK-NEXT: STD killed $x9, -200, $x1
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STD killed $x8, -224, $x1
; CHECK-NEXT: STD killed $x9, -216, $x1
- ; CHECK-NEXT: renamable $g8p10 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p9 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p8 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p7 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p15 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p11 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p12 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p14 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p5 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: $x3 = OR8 $x27, $x27
+ ; CHECK-NEXT: renamable $g8p12 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p11 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p10 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p9 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p8 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p7 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p15 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p13 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p14 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p5 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p5, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p14, renamable $x7, renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: STQCX killed renamable $g8p12, renamable $x7, renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: STQCX killed renamable $g8p11, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX killed renamable $g8p13, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p15, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p7, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p8, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p9, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p10, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX killed renamable $g8p11, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX killed renamable $g8p12, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: $x8 = LD -224, $x1
; CHECK-NEXT: $x9 = LD -216, $x1
; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
@@ -140,13 +139,11 @@ body: |
; CHECK-NEXT: $x8 = LD -192, $x1
; CHECK-NEXT: $x9 = LD -184, $x1
; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX renamable $g8p1, killed renamable $x7, killed renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: $x8 = LD -176, $x1
; CHECK-NEXT: $x9 = LD -168, $x1
- ; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: STQCX killed renamable $g8p13, killed renamable $x7, killed renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: $x8 = LD -160, $x1
- ; CHECK-NEXT: $x9 = LD -152, $x1
; CHECK-NEXT: STQCX killed renamable $g8p4, $x5, $x6, implicit-def dead $cr0
+ ; CHECK-NEXT: $x2 = LD -152, $x1 :: (load (s64) from %stack.4)
; CHECK-NEXT: $x31 = LD -8, $x1 :: (load (s64) from %fixed-stack.0)
; CHECK-NEXT: $x30 = LD -16, $x1 :: (load (s64) from %fixed-stack.1, align 16)
; CHECK-NEXT: $x29 = LD -24, $x1 :: (load (s64) from %fixed-stack.2)
@@ -216,10 +213,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x4 = OR8 $x16, $x16
; CHECK-NEXT: $x5 = OR8 $x17, $x17
- ; CHECK-NEXT: $x3 = OR8 $x5, $x5
- ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit killed $x3, implicit $x4
+ ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit $x5, implicit $x4
%0:g8prc = COPY $g8p8
- $x3 = COPY %0.sub_gp8_x1:g8prc
+ $x5 = COPY %0.sub_gp8_x1:g8prc
$x4 = COPY %0.sub_gp8_x0:g8prc
- BLR8 implicit $lr8, implicit undef $rm, implicit $x3, implicit $x4
+ BLR8 implicit $lr8, implicit undef $rm, implicit $x5, implicit $x4
...
diff --git a/llvm/test/CodeGen/PowerPC/mflr-store.mir b/llvm/test/CodeGen/PowerPC/mflr-store.mir
index 75c313617c8839..34874151f5fb92 100644
--- a/llvm/test/CodeGen/PowerPC/mflr-store.mir
+++ b/llvm/test/CodeGen/PowerPC/mflr-store.mir
@@ -27,12 +27,12 @@ body: |
; CHECK: $x0 = MFLR8 implicit $lr8
; CHECK-NEXT: STD killed $x0, 16, $x1
; CHECK-NEXT: $x1 = STDU $x1, -32752, $x1
- ; CHECK-NEXT: BL8 @test_callee, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x2, implicit-def $r1, implicit-def $x3
+ ; CHECK-NEXT: BL8 @test_callee, csr_ppc64_r2, implicit-def dead $lr8, implicit $rm, implicit-def $r1, implicit-def $x3
; CHECK-NEXT: $x1 = ADDI8 $x1, 32752
; CHECK-NEXT: $x0 = LD 16, $x1
; CHECK-NEXT: MTLR8 $x0, implicit-def $lr8
; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit killed $x3
- BL8 @test_callee, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x2, implicit-def $r1, implicit-def $x3
+ BL8 @test_callee, csr_ppc64_r2, implicit-def dead $lr8, implicit $rm, implicit-def $r1, implicit-def $x3
BLR8 implicit $lr8, implicit $rm, implicit $x3
...
diff --git a/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir b/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
index 71b1ad53681040..e85e778eb42417 100644
--- a/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
+++ b/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
@@ -458,6 +458,7 @@ constants: []
machineFunctionInfo: {}
body: |
bb.0.entry:
+ liveins: $x2, $x3, $x4
successors: %bb.2(0x80000000)
%64:g8rc_and_g8rc_nox0 = LDtoc @shortArray, $x2 :: (load (s64) from got)
@@ -472,7 +473,7 @@ body: |
%71:g8rc = EXTSW_32_64 killed %68
$x3 = COPY %70
$x4 = COPY %71
- BL8_NOP <mcsymbol .printf>, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $x2, implicit-def $r1, implicit-def $x3
+ BL8_NOP <mcsymbol .printf>, csr_ppc64_r2, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit-def $r1, implicit-def $x3
ADJCALLSTACKUP 112, 0, implicit-def dead $r1, implicit $r1
%73:g8rc_and_g8rc_nox0 = LDtoc @globalShortValue, $x2 :: (load (s64) from got)
%0:gprc = LHZ 0, killed %73 :: (dereferenceable load (s16) from @globalShortValue, !tbaa !3)
@@ -540,7 +541,8 @@ body: |
%126:crrc = CMPLD %11, %12
B %bb.2
- bb.1.for.cond.cleanup15:
+ bb.1.for.cond.cleanup15 (landing-pad):
+ liveins: $x2
%150:gprc_and_gprc_nor0 = EXTSH %7
%151:gprc_and_gprc_nor0 = EXTSH %0
@@ -646,9 +648,10 @@ body: |
MTCTR8loop killed %139, implicit-def dead $ctr8
B %bb.10
- bb.8.for.cond21.for.cond.cleanup25_crit_edge:
+ bb.8.for.cond21.for.cond.cleanup25_crit_edge (landing-pad):
successors: %bb.9(0x80000000)
-
+ liveins: $x2
+
%46:gprc = PHI %37, %bb.6, %60, %bb.10
%47:gprc = PHI %38, %bb.6, %61, %bb.10
%48:gprc = PHI %39, %bb.6, %62, %bb.10
diff --git a/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir b/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
index eff10d24a62f3c..347712199711e0 100644
--- a/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
+++ b/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
@@ -46,6 +46,7 @@ frameInfo:
machineFunctionInfo: {}
body: |
bb.0.entry:
+ liveins: $x3, $x2
%0:g8rc = LBZ8 @x + 2, $x2 :: (dereferenceable load (s8) from @x + 2, align 2, basealign 4)
$x3 = COPY %0
BLR8 implicit $lr8, implicit $rm, implicit $x3
@@ -62,6 +63,7 @@ frameInfo:
machineFunctionInfo: {}
body: |
bb.0.entry:
+ liveins: $x3, $x2
%0:g8rc = LBZ8 @y + 1, $x2 :: (dereferenceable load (s8) from @y + 1, basealign 4)
$x3 = COPY %0
BLR8 implicit $lr8, implicit $rm, implicit $x3
>From c2bd36465af5a9b9a5620f063335af1daad8841d Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 16 Oct 2024 18:33:32 +0000
Subject: [PATCH 2/3] add comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 4ee9f3301e3bc1..cb3ebef6871daa 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3434,6 +3434,8 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
if (Subtarget.hasAIXShLibTLSModelOpt())
updateForAIXShLibTLSModelOpt(Model, DAG, getTargetMachine());
+ // Whenever accessing the TLS variable, it is done through the TC entries.
+ // Therefore, we set the DAG to use the TOC base.
setUsesTOCBasePtr(DAG);
bool IsTLSLocalExecModel = Model == TLSModel::LocalExec;
>From ee1ce9d0128a821f1bb13216151e6571ad99c18f Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Thu, 31 Oct 2024 17:34:53 +0000
Subject: [PATCH 3/3] address comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 4 ++--
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp | 11 ++++-------
.../CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll | 2 ++
3 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index cb3ebef6871daa..affc900f2aa668 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3434,8 +3434,8 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
if (Subtarget.hasAIXShLibTLSModelOpt())
updateForAIXShLibTLSModelOpt(Model, DAG, getTargetMachine());
- // Whenever accessing the TLS variable, it is done through the TC entries.
- // Therefore, we set the DAG to use the TOC base.
+ // TLS variables are accessed through TOC entries.
+ // To support this, set the DAG to use the TOC base pointer.
setUsesTOCBasePtr(DAG);
bool IsTLSLocalExecModel = Model == TLSModel::LocalExec;
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index e2b415cf728a2f..5d416c9a0446b1 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -383,7 +383,7 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
bool UsesTOCBasePtr = FuncInfo->usesTOCBasePtr();
// The SVR4 ABI reserves r2 and r13
- if (Subtarget.isSVR4ABI()) {
+ if (Subtarget.isSVR4ABI() || Subtarget.isAIXABI()) {
// We only reserve r2 if we need to use the TOC pointer. If we have no
// explicit uses of the TOC pointer (meaning we're a leaf function with
// no constant-pool loads, etc.) and we have no potential uses inside an
@@ -391,13 +391,10 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// register.
if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm())
markSuperRegs(Reserved, PPC::R2); // System-reserved register.
- markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register.
- }
- if (Subtarget.isAIXABI())
- // We only reserve r2 if we need to use the TOC pointer on AIX.
- if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm())
- markSuperRegs(Reserved, PPC::R2); // System-reserved register.
+ if (Subtarget.isSVR4ABI())
+ markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register.
+ }
// On PPC64, r13 is the thread pointer. Never allocate this register.
if (TM.isPPC64())
diff --git a/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll b/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
index 0eb8cb207fd348..f695ee15cd3556 100644
--- a/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -mtriple=powerpc-unknown-aix-xcoff -verify-machineinstrs \
; RUN: -mcpu=pwr7 -mattr=+altivec -O0 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-aix-xcoff -verify-machineinstrs \
+; RUN: -mcpu=pwr7 -mattr=+altivec -O0 2>&1 | FileCheck %s
; CHECK: warning: inline asm clobber list contains reserved registers: R2
; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
More information about the llvm-commits
mailing list