[llvm] 2cd3213 - [PowerPC] Utilize getReservedRegs to find asm clobberable registers. (#107863)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 4 09:57:30 PST 2024
Author: zhijian lin
Date: 2024-11-04T12:57:26-05:00
New Revision: 2cd32132dbf5ec4a0e62f8fea0cd48420561e970
URL: https://github.com/llvm/llvm-project/commit/2cd32132dbf5ec4a0e62f8fea0cd48420561e970
DIFF: https://github.com/llvm/llvm-project/commit/2cd32132dbf5ec4a0e62f8fea0cd48420561e970.diff
LOG: [PowerPC] Utilize getReservedRegs to find asm clobberable registers. (#107863)
This patch utilizes getReservedRegs() to find asm clobberable registers.
And to make the result of getReservedRegs() accurate, this patch
implements the todo, which is to make r2 allocatable on AIX for some
leaf functions.
Added:
llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
Modified:
llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
llvm/test/CodeGen/PowerPC/inc-of-add.ll
llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
llvm/test/CodeGen/PowerPC/mflr-store.mir
llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 1083febc5f8520..b118976b4731c0 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -2771,6 +2771,14 @@ void PPCFrameLowering::updateCalleeSaves(const MachineFunction &MF,
MCPhysReg Cand = CSRegs[i];
if (!SavedRegs.test(Cand))
continue;
+ // When R2/X2 is a CSR and not used for passing arguments, it is allocated
+ // earlier than other volatile registers. R2/X2 is not contiguous with
+ // R13/X13 to R31/X31.
+ if (Cand == PPC::X2 || Cand == PPC::R2) {
+ SavedRegs.set(Cand);
+ continue;
+ }
+
if (PPC::GPRCRegClass.contains(Cand) && Cand < LowestGPR)
LowestGPR = Cand;
else if (PPC::G8RCRegClass.contains(Cand) && Cand < LowestG8R)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index ab31898e262e7e..ec4f8f4be425ed 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3437,6 +3437,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
if (Subtarget.hasAIXShLibTLSModelOpt())
updateForAIXShLibTLSModelOpt(Model, DAG, getTargetMachine());
+ // TLS variables are accessed through TOC entries.
+ // To support this, set the DAG to use the TOC base pointer.
+ setUsesTOCBasePtr(DAG);
+
bool IsTLSLocalExecModel = Model == TLSModel::LocalExec;
if (IsTLSLocalExecModel || Model == TLSModel::InitialExec) {
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 9e8da59615dfb3..5d416c9a0446b1 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -380,23 +380,21 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
markSuperRegs(Reserved, PPC::VRSAVE);
+ const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ bool UsesTOCBasePtr = FuncInfo->usesTOCBasePtr();
// The SVR4 ABI reserves r2 and r13
- if (Subtarget.isSVR4ABI()) {
+ if (Subtarget.isSVR4ABI() || Subtarget.isAIXABI()) {
// We only reserve r2 if we need to use the TOC pointer. If we have no
// explicit uses of the TOC pointer (meaning we're a leaf function with
// no constant-pool loads, etc.) and we have no potential uses inside an
// inline asm block, then we can treat r2 has an ordinary callee-saved
// register.
- const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
- markSuperRegs(Reserved, PPC::R2); // System-reserved register
- markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register
- }
+ if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm())
+ markSuperRegs(Reserved, PPC::R2); // System-reserved register.
- // Always reserve r2 on AIX for now.
- // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions.
- if (Subtarget.isAIXABI())
- markSuperRegs(Reserved, PPC::R2); // System-reserved register
+ if (Subtarget.isSVR4ABI())
+ markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register.
+ }
// On PPC64, r13 is the thread pointer. Never allocate this register.
if (TM.isPPC64())
@@ -441,14 +439,12 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
bool PPCRegisterInfo::isAsmClobberable(const MachineFunction &MF,
MCRegister PhysReg) const {
- // We cannot use getReservedRegs() to find the registers that are not asm
- // clobberable because there are some reserved registers which can be
- // clobbered by inline asm. For example, when LR is clobbered, the register is
- // saved and restored. We will hardcode the registers that are not asm
- // cloberable in this function.
-
- // The stack pointer (R1/X1) is not clobberable by inline asm
- return PhysReg != PPC::R1 && PhysReg != PPC::X1;
+ // CTR and LR registers are always reserved, but they are asm clobberable.
+ if (PhysReg == PPC::CTR || PhysReg == PPC::CTR8 || PhysReg == PPC::LR ||
+ PhysReg == PPC::LR8)
+ return true;
+
+ return !getReservedRegs(MF).test(PhysReg);
}
bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
index 267b4e52b7dac6..00d1c471c2fa7c 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
@@ -1146,11 +1146,11 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; 64BIT-NEXT: renamable $r11 = LWZ 0, %fixed-stack.1, implicit-def $x11 :: (load (s32) from %fixed-stack.1)
; 64BIT-NEXT: renamable $x12 = LWZ8 0, %fixed-stack.4 :: (load (s32) from %fixed-stack.4)
; 64BIT-NEXT: renamable $x0 = LWA 0, %fixed-stack.0 :: (load (s32) from %fixed-stack.0)
- ; 64BIT-NEXT: renamable $x31 = LD 0, %fixed-stack.2 :: (load (s64) from %fixed-stack.2)
- ; 64BIT-NEXT: renamable $x30 = LWA 0, %fixed-stack.3 :: (load (s32) from %fixed-stack.3)
- ; 64BIT-NEXT: renamable $r29 = LWZ 0, %fixed-stack.5, implicit-def $x29 :: (load (s32) from %fixed-stack.5)
- ; 64BIT-NEXT: renamable $x28 = LWA 0, %fixed-stack.6 :: (load (s32) from %fixed-stack.6)
- ; 64BIT-NEXT: renamable $x27 = LD 0, %fixed-stack.7 :: (load (s64) from %fixed-stack.7, align 16)
+ ; 64BIT-NEXT: renamable $x2 = LD 0, %fixed-stack.2 :: (load (s64) from %fixed-stack.2)
+ ; 64BIT-NEXT: renamable $x31 = LWA 0, %fixed-stack.3 :: (load (s32) from %fixed-stack.3)
+ ; 64BIT-NEXT: renamable $r30 = LWZ 0, %fixed-stack.5, implicit-def $x30 :: (load (s32) from %fixed-stack.5)
+ ; 64BIT-NEXT: renamable $x29 = LWA 0, %fixed-stack.6 :: (load (s32) from %fixed-stack.6)
+ ; 64BIT-NEXT: renamable $x28 = LD 0, %fixed-stack.7 :: (load (s64) from %fixed-stack.7, align 16)
; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6
@@ -1159,12 +1159,12 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r9, implicit killed $x9
; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r10, implicit killed $x10
; 64BIT-NEXT: renamable $x3 = EXTSW_32_64 killed renamable $r3
- ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x27
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x28
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x29
- ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x12
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x30
+ ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x12
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x31
+ ; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x2
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x11
; 64BIT-NEXT: renamable $x3 = nsw ADD8 killed renamable $x3, killed renamable $x0
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
index 78d60f06c06786..433d4273444660 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll
@@ -1240,11 +1240,11 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; ASM64PWR4-NEXT: lwz 5, 132(1)
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: add 3, 3, 12
-; ASM64PWR4-NEXT: std 31, -8(1) # 8-byte Folded Spill
+; ASM64PWR4-NEXT: std 2, -8(1) # 8-byte Folded Spill
; ASM64PWR4-NEXT: add 3, 3, 5
-; ASM64PWR4-NEXT: lwz 31, 140(1)
+; ASM64PWR4-NEXT: lwz 2, 140(1)
; ASM64PWR4-NEXT: lwa 11, 148(1)
-; ASM64PWR4-NEXT: add 3, 3, 31
+; ASM64PWR4-NEXT: add 3, 3, 2
; ASM64PWR4-NEXT: add 3, 3, 11
; ASM64PWR4-NEXT: ld 4, 152(1)
; ASM64PWR4-NEXT: lwz 0, 164(1)
@@ -1252,7 +1252,7 @@ define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6
; ASM64PWR4-NEXT: lwa 5, 172(1)
; ASM64PWR4-NEXT: add 3, 3, 0
; ASM64PWR4-NEXT: add 3, 3, 5
-; ASM64PWR4-NEXT: ld 31, -8(1) # 8-byte Folded Reload
+; ASM64PWR4-NEXT: ld 2, -8(1) # 8-byte Folded Reload
; ASM64PWR4-NEXT: blr
entry:
%add = add nsw i32 %i1, %i2
diff --git a/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll b/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
new file mode 100644
index 00000000000000..f695ee15cd3556
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-inline-asm-clobber-warning.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=powerpc-unknown-aix-xcoff -verify-machineinstrs \
+; RUN: -mcpu=pwr7 -mattr=+altivec -O0 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-aix-xcoff -verify-machineinstrs \
+; RUN: -mcpu=pwr7 -mattr=+altivec -O0 2>&1 | FileCheck %s
+
+; CHECK: warning: inline asm clobber list contains reserved registers: R2
+; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
+
+ at a = external global i32, align 4
+
+define void @bar() {
+ store i32 0, ptr @a, align 4
+ call void asm sideeffect "li 2, 1", "~{r2}"()
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir b/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
index c295add57c9c0a..584b6b0ad46dd9 100644
--- a/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
+++ b/llvm/test/CodeGen/PowerPC/aix64-csr-alloc.mir
@@ -17,6 +17,5 @@ body: |
BLR8 implicit $lr8, implicit undef $rm, implicit $x3, implicit $f1
...
# CHECK-DAG: AllocationOrder(VFRC) = [ $vf2 $vf3 $vf4 $vf5 $vf0 $vf1 $vf6 $vf7 $vf8 $vf9 $vf10 $vf11 $vf12 $vf13 $vf14 $vf15 $vf16 $vf17 $vf18 $vf19 $vf31 $vf30 $vf29 $vf28 $vf27 $vf26 $vf25 $vf24 $vf23 $vf22 $vf21 $vf20 ]
-# CHECK-DAG: AllocationOrder(G8RC_and_G8RC_NOX0) = [ $x3 $x4 $x5 $x6 $x7 $x8 $x9 $x10 $x11 $x12 $x31 $x30 $x29 $x28 $x27 $x26 $x25 $x24 $x23 $x22 $x21 $x20 $x19 $x18 $x17 $x16 $x15 $x1
-# CHECK-DAG: 4 ]
+# CHECK-DAG: AllocationOrder(G8RC_and_G8RC_NOX0) = [ $x3 $x4 $x5 $x6 $x7 $x8 $x9 $x10 $x11 $x12 $x2 $x31 $x30 $x29 $x28 $x27 $x26 $x25 $x24 $x23 $x22 $x21 $x20 $x19 $x18 $x17 $x16 $x15 $x14 ]
# CHECK-DAG: AllocationOrder(F8RC) = [ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f31 $f30 $f29 $f28 $f27 $f26 $f25 $f24 $f23 $f22 $f21 $f20 $f19 $f18 $f17 $f16 $f15 $f14 ]
diff --git a/llvm/test/CodeGen/PowerPC/inc-of-add.ll b/llvm/test/CodeGen/PowerPC/inc-of-add.ll
index bff6fd6f451efc..98b812e7845a5c 100644
--- a/llvm/test/CodeGen/PowerPC/inc-of-add.ll
+++ b/llvm/test/CodeGen/PowerPC/inc-of-add.ll
@@ -166,81 +166,81 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
;
; AIX-PPC64-LABEL: vector_i128_i8:
; AIX-PPC64: # %bb.0:
-; AIX-PPC64-NEXT: std 22, -80(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: lbz 22, 207(1)
; AIX-PPC64-NEXT: std 23, -72(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: lbz 23, 207(1)
; AIX-PPC64-NEXT: std 24, -64(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: std 26, -48(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 25, -56(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 27, -40(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 26, -48(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 30, -16(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 29, -24(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 28, -32(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: std 27, -40(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 2, -80(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 31, -8(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: std 30, -16(1) # 8-byte Folded Spill
-; AIX-PPC64-NEXT: lbz 23, 199(1)
-; AIX-PPC64-NEXT: lbz 24, 191(1)
-; AIX-PPC64-NEXT: add 6, 22, 6
-; AIX-PPC64-NEXT: lbz 22, 231(1)
-; AIX-PPC64-NEXT: add 5, 23, 5
-; AIX-PPC64-NEXT: lbz 23, 223(1)
-; AIX-PPC64-NEXT: add 4, 24, 4
-; AIX-PPC64-NEXT: lbz 24, 215(1)
-; AIX-PPC64-NEXT: add 9, 22, 9
-; AIX-PPC64-NEXT: lbz 26, 127(1)
-; AIX-PPC64-NEXT: add 8, 23, 8
-; AIX-PPC64-NEXT: lbz 22, 255(1)
-; AIX-PPC64-NEXT: add 7, 24, 7
-; AIX-PPC64-NEXT: lbz 25, 119(1)
+; AIX-PPC64-NEXT: lbz 24, 199(1)
+; AIX-PPC64-NEXT: lbz 25, 191(1)
+; AIX-PPC64-NEXT: add 6, 23, 6
+; AIX-PPC64-NEXT: lbz 23, 231(1)
+; AIX-PPC64-NEXT: add 5, 24, 5
+; AIX-PPC64-NEXT: lbz 24, 223(1)
+; AIX-PPC64-NEXT: add 4, 25, 4
+; AIX-PPC64-NEXT: lbz 25, 215(1)
+; AIX-PPC64-NEXT: add 9, 23, 9
+; AIX-PPC64-NEXT: lbz 27, 127(1)
+; AIX-PPC64-NEXT: add 8, 24, 8
+; AIX-PPC64-NEXT: lbz 23, 255(1)
+; AIX-PPC64-NEXT: add 7, 25, 7
+; AIX-PPC64-NEXT: lbz 26, 119(1)
; AIX-PPC64-NEXT: addi 9, 9, 1
-; AIX-PPC64-NEXT: lbz 23, 247(1)
-; AIX-PPC64-NEXT: add 26, 22, 26
-; AIX-PPC64-NEXT: lbz 24, 239(1)
+; AIX-PPC64-NEXT: lbz 24, 247(1)
+; AIX-PPC64-NEXT: add 27, 23, 27
+; AIX-PPC64-NEXT: lbz 25, 239(1)
; AIX-PPC64-NEXT: addi 8, 8, 1
-; AIX-PPC64-NEXT: lbz 29, 151(1)
-; AIX-PPC64-NEXT: add 25, 23, 25
-; AIX-PPC64-NEXT: lbz 22, 279(1)
-; AIX-PPC64-NEXT: add 10, 24, 10
-; AIX-PPC64-NEXT: lbz 28, 143(1)
+; AIX-PPC64-NEXT: lbz 30, 151(1)
+; AIX-PPC64-NEXT: add 26, 24, 26
+; AIX-PPC64-NEXT: lbz 23, 279(1)
+; AIX-PPC64-NEXT: add 10, 25, 10
+; AIX-PPC64-NEXT: lbz 29, 143(1)
; AIX-PPC64-NEXT: addi 10, 10, 1
-; AIX-PPC64-NEXT: lbz 23, 271(1)
-; AIX-PPC64-NEXT: add 29, 22, 29
-; AIX-PPC64-NEXT: lbz 27, 135(1)
+; AIX-PPC64-NEXT: lbz 24, 271(1)
+; AIX-PPC64-NEXT: add 30, 23, 30
+; AIX-PPC64-NEXT: lbz 28, 135(1)
; AIX-PPC64-NEXT: addi 7, 7, 1
-; AIX-PPC64-NEXT: lbz 24, 263(1)
-; AIX-PPC64-NEXT: add 28, 23, 28
+; AIX-PPC64-NEXT: lbz 25, 263(1)
+; AIX-PPC64-NEXT: add 29, 24, 29
; AIX-PPC64-NEXT: lbz 11, 183(1)
; AIX-PPC64-NEXT: addi 6, 6, 1
-; AIX-PPC64-NEXT: lbz 22, 311(1)
-; AIX-PPC64-NEXT: add 27, 24, 27
+; AIX-PPC64-NEXT: lbz 23, 311(1)
+; AIX-PPC64-NEXT: add 28, 25, 28
; AIX-PPC64-NEXT: lbz 12, 175(1)
; AIX-PPC64-NEXT: addi 5, 5, 1
; AIX-PPC64-NEXT: lbz 0, 303(1)
-; AIX-PPC64-NEXT: add 11, 22, 11
-; AIX-PPC64-NEXT: lbz 31, 167(1)
+; AIX-PPC64-NEXT: add 11, 23, 11
+; AIX-PPC64-NEXT: lbz 2, 167(1)
; AIX-PPC64-NEXT: addi 11, 11, 1
-; AIX-PPC64-NEXT: lbz 23, 295(1)
+; AIX-PPC64-NEXT: lbz 24, 295(1)
; AIX-PPC64-NEXT: add 12, 0, 12
-; AIX-PPC64-NEXT: lbz 30, 159(1)
+; AIX-PPC64-NEXT: lbz 31, 159(1)
; AIX-PPC64-NEXT: addi 4, 4, 1
-; AIX-PPC64-NEXT: lbz 24, 287(1)
-; AIX-PPC64-NEXT: add 31, 23, 31
+; AIX-PPC64-NEXT: lbz 25, 287(1)
+; AIX-PPC64-NEXT: add 2, 24, 2
; AIX-PPC64-NEXT: stb 11, 15(3)
; AIX-PPC64-NEXT: addi 11, 12, 1
-; AIX-PPC64-NEXT: add 30, 24, 30
+; AIX-PPC64-NEXT: add 31, 25, 31
; AIX-PPC64-NEXT: stb 11, 14(3)
-; AIX-PPC64-NEXT: addi 11, 31, 1
+; AIX-PPC64-NEXT: addi 11, 2, 1
; AIX-PPC64-NEXT: stb 11, 13(3)
-; AIX-PPC64-NEXT: addi 11, 30, 1
+; AIX-PPC64-NEXT: addi 11, 31, 1
; AIX-PPC64-NEXT: stb 11, 12(3)
-; AIX-PPC64-NEXT: addi 11, 29, 1
+; AIX-PPC64-NEXT: addi 11, 30, 1
; AIX-PPC64-NEXT: stb 11, 11(3)
-; AIX-PPC64-NEXT: addi 11, 28, 1
+; AIX-PPC64-NEXT: addi 11, 29, 1
; AIX-PPC64-NEXT: stb 11, 10(3)
-; AIX-PPC64-NEXT: addi 11, 27, 1
+; AIX-PPC64-NEXT: addi 11, 28, 1
; AIX-PPC64-NEXT: stb 11, 9(3)
-; AIX-PPC64-NEXT: addi 11, 26, 1
+; AIX-PPC64-NEXT: addi 11, 27, 1
; AIX-PPC64-NEXT: stb 11, 8(3)
-; AIX-PPC64-NEXT: addi 11, 25, 1
+; AIX-PPC64-NEXT: addi 11, 26, 1
; AIX-PPC64-NEXT: stb 11, 7(3)
; AIX-PPC64-NEXT: stb 10, 6(3)
; AIX-PPC64-NEXT: stb 9, 5(3)
@@ -249,6 +249,7 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; AIX-PPC64-NEXT: stb 6, 2(3)
; AIX-PPC64-NEXT: stb 5, 1(3)
; AIX-PPC64-NEXT: stb 4, 0(3)
+; AIX-PPC64-NEXT: ld 2, -80(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 31, -8(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 30, -16(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 29, -24(1) # 8-byte Folded Reload
@@ -258,7 +259,6 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; AIX-PPC64-NEXT: ld 25, -56(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 24, -64(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 23, -72(1) # 8-byte Folded Reload
-; AIX-PPC64-NEXT: ld 22, -80(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: blr
;
; PPC64LE-LABEL: vector_i128_i8:
@@ -314,30 +314,30 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {
;
; AIX-PPC64-LABEL: vector_i128_i16:
; AIX-PPC64: # %bb.0:
-; AIX-PPC64-NEXT: std 26, -48(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 27, -40(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 28, -32(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 29, -24(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 30, -16(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: std 31, -8(1) # 8-byte Folded Spill
+; AIX-PPC64-NEXT: std 2, -48(1) # 8-byte Folded Spill
; AIX-PPC64-NEXT: lhz 11, 118(1)
; AIX-PPC64-NEXT: lhz 12, 182(1)
; AIX-PPC64-NEXT: lhz 0, 174(1)
-; AIX-PPC64-NEXT: lhz 31, 166(1)
+; AIX-PPC64-NEXT: lhz 2, 166(1)
; AIX-PPC64-NEXT: add 11, 12, 11
-; AIX-PPC64-NEXT: lhz 30, 158(1)
+; AIX-PPC64-NEXT: lhz 31, 158(1)
; AIX-PPC64-NEXT: add 10, 0, 10
-; AIX-PPC64-NEXT: lhz 29, 142(1)
-; AIX-PPC64-NEXT: add 9, 31, 9
-; AIX-PPC64-NEXT: lhz 28, 126(1)
-; AIX-PPC64-NEXT: add 8, 30, 8
-; AIX-PPC64-NEXT: lhz 27, 134(1)
-; AIX-PPC64-NEXT: add 6, 29, 6
-; AIX-PPC64-NEXT: lhz 26, 150(1)
-; AIX-PPC64-NEXT: add 4, 28, 4
-; AIX-PPC64-NEXT: add 5, 27, 5
+; AIX-PPC64-NEXT: lhz 30, 142(1)
+; AIX-PPC64-NEXT: add 9, 2, 9
+; AIX-PPC64-NEXT: lhz 29, 126(1)
+; AIX-PPC64-NEXT: add 8, 31, 8
+; AIX-PPC64-NEXT: lhz 28, 134(1)
+; AIX-PPC64-NEXT: add 6, 30, 6
+; AIX-PPC64-NEXT: lhz 27, 150(1)
+; AIX-PPC64-NEXT: add 4, 29, 4
+; AIX-PPC64-NEXT: add 5, 28, 5
; AIX-PPC64-NEXT: addi 11, 11, 1
-; AIX-PPC64-NEXT: add 7, 26, 7
+; AIX-PPC64-NEXT: add 7, 27, 7
; AIX-PPC64-NEXT: addi 10, 10, 1
; AIX-PPC64-NEXT: addi 9, 9, 1
; AIX-PPC64-NEXT: addi 8, 8, 1
@@ -353,12 +353,12 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; AIX-PPC64-NEXT: sth 6, 4(3)
; AIX-PPC64-NEXT: sth 5, 2(3)
; AIX-PPC64-NEXT: sth 4, 0(3)
+; AIX-PPC64-NEXT: ld 2, -48(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 31, -8(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 30, -16(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 29, -24(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 28, -32(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: ld 27, -40(1) # 8-byte Folded Reload
-; AIX-PPC64-NEXT: ld 26, -48(1) # 8-byte Folded Reload
; AIX-PPC64-NEXT: blr
;
; PPC64LE-LABEL: vector_i128_i16:
diff --git a/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll b/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
index 7f13f5072d97f1..ec91566e63864a 100644
--- a/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
+++ b/llvm/test/CodeGen/PowerPC/inline-asm-clobber-warning.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc-unknown-unkown \
-; RUN: -mcpu=pwr7 2>&1 | FileCheck %s
+; RUN: -mcpu=pwr7 -O0 2>&1 | FileCheck %s
; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc64-unknown-unkown \
-; RUN: -mcpu=pwr7 2>&1 | FileCheck %s
+; RUN: -mcpu=pwr7 -O0 2>&1 | FileCheck %s
define void @test_r1_clobber() {
entry:
@@ -20,3 +20,24 @@ entry:
; CHECK: warning: inline asm clobber list contains reserved registers: X1
; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
+
+; CHECK: warning: inline asm clobber list contains reserved registers: R31
+; CHECK-NEXT: note: Reserved registers on the clobber list may not be preserved across the asm statement, and clobbering them may lead to undefined behaviour.
+
+ at a = dso_local global i32 100, align 4
+define dso_local signext i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %old = alloca i64, align 8
+ store i32 0, ptr %retval, align 4
+ call void asm sideeffect "li 31, 1", "~{r31}"()
+ call void asm sideeffect "li 30, 1", "~{r30}"()
+ %0 = call i64 asm sideeffect "mr $0, 31", "=r"()
+ store i64 %0, ptr %old, align 8
+ %1 = load i32, ptr @a, align 4
+ %conv = sext i32 %1 to i64
+ %2 = alloca i8, i64 %conv, align 16
+ %3 = load i64, ptr %old, align 8
+ %conv1 = trunc i64 %3 to i32
+ ret i32 %conv1
+}
diff --git a/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir b/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
index 6b127c988c2079..e000fece687b86 100644
--- a/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
+++ b/llvm/test/CodeGen/PowerPC/ldst-16-byte.mir
@@ -8,18 +8,18 @@ alignment: 8
tracksRegLiveness: true
body: |
bb.0.entry:
- liveins: $x3, $x4
+ liveins: $x5, $x4
; CHECK-LABEL: name: foo
- ; CHECK: liveins: $x3, $x4
+ ; CHECK: liveins: $x4, $x5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber renamable $g8p3 = LQ 128, $x4
- ; CHECK-NEXT: $x3 = OR8 $x7, $x7
- ; CHECK-NEXT: STQ killed renamable $g8p3, 160, $x3
- ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit $x3
+ ; CHECK-NEXT: $x5 = OR8 $x7, $x7
+ ; CHECK-NEXT: STQ killed renamable $g8p3, 160, $x5
+ ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit $x5
%0:g8prc = LQ 128, $x4
- $x3 = COPY %0.sub_gp8_x1:g8prc
- STQ %0, 160, $x3
- BLR8 implicit $lr8, implicit undef $rm, implicit $x3
+ $x5 = COPY %0.sub_gp8_x1:g8prc
+ STQ %0, 160, $x5
+ BLR8 implicit $lr8, implicit undef $rm, implicit $x5
...
---
@@ -73,7 +73,7 @@ body: |
bb.0.entry:
liveins: $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12
; CHECK-LABEL: name: spill_g8prc
- ; CHECK: liveins: $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
+ ; CHECK: liveins: $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: STD killed $x14, -144, $x1 :: (store (s64) into %fixed-stack.17, align 16)
; CHECK-NEXT: STD killed $x15, -136, $x1 :: (store (s64) into %fixed-stack.16)
@@ -93,44 +93,43 @@ body: |
; CHECK-NEXT: STD killed $x29, -24, $x1 :: (store (s64) into %fixed-stack.2)
; CHECK-NEXT: STD killed $x30, -16, $x1 :: (store (s64) into %fixed-stack.1, align 16)
; CHECK-NEXT: STD killed $x31, -8, $x1 :: (store (s64) into %fixed-stack.0)
+ ; CHECK-NEXT: STD killed $x2, -152, $x1 :: (store (s64) into %stack.4)
; CHECK-NEXT: $x7 = OR8 $x3, $x3
; CHECK-NEXT: renamable $g8p4 = LQARX $x5, $x6
- ; CHECK-NEXT: STD killed $x8, -160, $x1
- ; CHECK-NEXT: STD killed $x9, -152, $x1
- ; CHECK-NEXT: renamable $g8p13 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
; CHECK-NEXT: STD killed $x8, -176, $x1
; CHECK-NEXT: STD killed $x9, -168, $x1
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p1 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STD killed $x8, -192, $x1
; CHECK-NEXT: STD killed $x9, -184, $x1
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STD killed $x8, -208, $x1
; CHECK-NEXT: STD killed $x9, -200, $x1
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STD killed $x8, -224, $x1
; CHECK-NEXT: STD killed $x9, -216, $x1
- ; CHECK-NEXT: renamable $g8p10 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p9 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p8 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p7 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p15 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p11 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p12 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p14 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p5 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: renamable $g8p4 = LQARX $x3, renamable $x4
- ; CHECK-NEXT: $x3 = OR8 $x27, $x27
+ ; CHECK-NEXT: renamable $g8p12 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p11 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p10 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p9 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p8 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p7 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p15 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p13 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p14 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p5 = LQARX renamable $x7, renamable $x4
+ ; CHECK-NEXT: renamable $g8p4 = LQARX renamable $x7, renamable $x4
; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p5, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p14, renamable $x7, renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: STQCX killed renamable $g8p12, renamable $x7, renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: STQCX killed renamable $g8p11, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX killed renamable $g8p13, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p15, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p7, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p8, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p9, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: STQCX killed renamable $g8p10, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX killed renamable $g8p11, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX killed renamable $g8p12, renamable $x7, renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: $x8 = LD -224, $x1
; CHECK-NEXT: $x9 = LD -216, $x1
; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
@@ -140,13 +139,11 @@ body: |
; CHECK-NEXT: $x8 = LD -192, $x1
; CHECK-NEXT: $x9 = LD -184, $x1
; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
+ ; CHECK-NEXT: STQCX renamable $g8p1, killed renamable $x7, killed renamable $x4, implicit-def dead $cr0
; CHECK-NEXT: $x8 = LD -176, $x1
; CHECK-NEXT: $x9 = LD -168, $x1
- ; CHECK-NEXT: STQCX killed renamable $g8p4, renamable $x7, renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: STQCX killed renamable $g8p13, killed renamable $x7, killed renamable $x4, implicit-def dead $cr0
- ; CHECK-NEXT: $x8 = LD -160, $x1
- ; CHECK-NEXT: $x9 = LD -152, $x1
; CHECK-NEXT: STQCX killed renamable $g8p4, $x5, $x6, implicit-def dead $cr0
+ ; CHECK-NEXT: $x2 = LD -152, $x1 :: (load (s64) from %stack.4)
; CHECK-NEXT: $x31 = LD -8, $x1 :: (load (s64) from %fixed-stack.0)
; CHECK-NEXT: $x30 = LD -16, $x1 :: (load (s64) from %fixed-stack.1, align 16)
; CHECK-NEXT: $x29 = LD -24, $x1 :: (load (s64) from %fixed-stack.2)
@@ -216,10 +213,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x4 = OR8 $x16, $x16
; CHECK-NEXT: $x5 = OR8 $x17, $x17
- ; CHECK-NEXT: $x3 = OR8 $x5, $x5
- ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit killed $x3, implicit $x4
+ ; CHECK-NEXT: BLR8 implicit $lr8, implicit undef $rm, implicit $x5, implicit $x4
%0:g8prc = COPY $g8p8
- $x3 = COPY %0.sub_gp8_x1:g8prc
+ $x5 = COPY %0.sub_gp8_x1:g8prc
$x4 = COPY %0.sub_gp8_x0:g8prc
- BLR8 implicit $lr8, implicit undef $rm, implicit $x3, implicit $x4
+ BLR8 implicit $lr8, implicit undef $rm, implicit $x5, implicit $x4
...
diff --git a/llvm/test/CodeGen/PowerPC/mflr-store.mir b/llvm/test/CodeGen/PowerPC/mflr-store.mir
index 75c313617c8839..34874151f5fb92 100644
--- a/llvm/test/CodeGen/PowerPC/mflr-store.mir
+++ b/llvm/test/CodeGen/PowerPC/mflr-store.mir
@@ -27,12 +27,12 @@ body: |
; CHECK: $x0 = MFLR8 implicit $lr8
; CHECK-NEXT: STD killed $x0, 16, $x1
; CHECK-NEXT: $x1 = STDU $x1, -32752, $x1
- ; CHECK-NEXT: BL8 @test_callee, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x2, implicit-def $r1, implicit-def $x3
+ ; CHECK-NEXT: BL8 @test_callee, csr_ppc64_r2, implicit-def dead $lr8, implicit $rm, implicit-def $r1, implicit-def $x3
; CHECK-NEXT: $x1 = ADDI8 $x1, 32752
; CHECK-NEXT: $x0 = LD 16, $x1
; CHECK-NEXT: MTLR8 $x0, implicit-def $lr8
; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit killed $x3
- BL8 @test_callee, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x2, implicit-def $r1, implicit-def $x3
+ BL8 @test_callee, csr_ppc64_r2, implicit-def dead $lr8, implicit $rm, implicit-def $r1, implicit-def $x3
BLR8 implicit $lr8, implicit $rm, implicit $x3
...
diff --git a/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir b/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
index 8e3c0862d3cba9..088bdb8f241f30 100644
--- a/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
+++ b/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
@@ -458,6 +458,7 @@ constants: []
machineFunctionInfo: {}
body: |
bb.0.entry:
+ liveins: $x2, $x3, $x4
successors: %bb.2(0x80000000)
%64:g8rc_and_g8rc_nox0 = LDtoc @shortArray, $x2 :: (load (s64) from got)
@@ -472,7 +473,7 @@ body: |
%71:g8rc = EXTSW_32_64 killed %68
$x3 = COPY %70
$x4 = COPY %71
- BL8_NOP <mcsymbol .printf>, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $x2, implicit-def $r1, implicit-def $x3
+ BL8_NOP <mcsymbol .printf>, csr_ppc64_r2, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit-def $r1, implicit-def $x3
ADJCALLSTACKUP 112, 0, implicit-def dead $r1, implicit $r1
%73:g8rc_and_g8rc_nox0 = LDtoc @globalShortValue, $x2 :: (load (s64) from got)
%0:gprc = LHZ 0, killed %73 :: (dereferenceable load (s16) from @globalShortValue, !tbaa !3)
@@ -541,7 +542,8 @@ body: |
%126:crrc = CMPLD %11, %12
B %bb.2
- bb.1.for.cond.cleanup15:
+ bb.1.for.cond.cleanup15 (landing-pad):
+ liveins: $x2
%150:gprc_and_gprc_nor0 = EXTSH %7
%151:gprc_and_gprc_nor0 = EXTSH %0
@@ -653,9 +655,10 @@ body: |
MTCTR8loop killed %139, implicit-def dead $ctr8
B %bb.10
- bb.8.for.cond21.for.cond.cleanup25_crit_edge:
+ bb.8.for.cond21.for.cond.cleanup25_crit_edge (landing-pad):
successors: %bb.9(0x80000000)
-
+ liveins: $x2
+
%46:gprc = PHI %37, %bb.6, %60, %bb.10
%47:gprc = PHI %38, %bb.6, %61, %bb.10
%48:gprc = PHI %39, %bb.6, %62, %bb.10
diff --git a/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir b/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
index eff10d24a62f3c..347712199711e0 100644
--- a/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
+++ b/llvm/test/CodeGen/PowerPC/tocdata-non-zero-addend.mir
@@ -46,6 +46,7 @@ frameInfo:
machineFunctionInfo: {}
body: |
bb.0.entry:
+ liveins: $x3, $x2
%0:g8rc = LBZ8 @x + 2, $x2 :: (dereferenceable load (s8) from @x + 2, align 2, basealign 4)
$x3 = COPY %0
BLR8 implicit $lr8, implicit $rm, implicit $x3
@@ -62,6 +63,7 @@ frameInfo:
machineFunctionInfo: {}
body: |
bb.0.entry:
+ liveins: $x3, $x2
%0:g8rc = LBZ8 @y + 1, $x2 :: (dereferenceable load (s8) from @y + 1, basealign 4)
$x3 = COPY %0
BLR8 implicit $lr8, implicit $rm, implicit $x3
More information about the llvm-commits
mailing list