[llvm] Convert as many LivePhysRegs uses to LiveRegUnits (PR #83905)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 4 21:24:22 PST 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/83905
>From f0d6c615dfb869e7eb2cec8ee150ed673b65d69f Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Mon, 4 Mar 2024 14:48:09 -0500
Subject: [PATCH] Convert as many LivePhysRegs uses to LiveRegUnits
Additionally, remove unused #include "llvm/CodeGen/LivePhysRegs.h"
---
llvm/lib/CodeGen/ReachingDefAnalysis.cpp | 24 +++++------
.../Target/AArch64/AArch64FrameLowering.cpp | 18 ++++----
.../Target/AMDGPU/SIOptimizeExecMasking.cpp | 8 ++--
llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 6 +--
llvm/lib/Target/ARM/Thumb1FrameLowering.cpp | 6 +--
.../CodeGen/AArch64/arm64-shrink-wrapping.ll | 42 +++++++++----------
.../AArch64/stack-probing-no-scratch-reg.mir | 38 ++++++++---------
llvm/test/CodeGen/Thumb/PR35481.ll | 14 +++----
8 files changed, 76 insertions(+), 80 deletions(-)
diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
index 61a668907be77d..07fa92889d8853 100644
--- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
+++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SetOperations.h"
-#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/ReachingDefAnalysis.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Support/Debug.h"
@@ -421,9 +421,9 @@ void ReachingDefAnalysis::getLiveOuts(MachineBasicBlock *MBB,
return;
VisitedBBs.insert(MBB);
- LivePhysRegs LiveRegs(*TRI);
+ LiveRegUnits LiveRegs(*TRI);
LiveRegs.addLiveOuts(*MBB);
- if (LiveRegs.available(MBB->getParent()->getRegInfo(), PhysReg))
+ if (LiveRegs.available(PhysReg))
return;
if (auto *Def = getLocalLiveOutMIDef(MBB, PhysReg))
@@ -469,11 +469,11 @@ MachineInstr *ReachingDefAnalysis::getMIOperand(MachineInstr *MI,
bool ReachingDefAnalysis::isRegUsedAfter(MachineInstr *MI,
MCRegister PhysReg) const {
MachineBasicBlock *MBB = MI->getParent();
- LivePhysRegs LiveRegs(*TRI);
+ LiveRegUnits LiveRegs(*TRI);
LiveRegs.addLiveOuts(*MBB);
// Yes if the register is live out of the basic block.
- if (!LiveRegs.available(MBB->getParent()->getRegInfo(), PhysReg))
+ if (!LiveRegs.available(PhysReg))
return true;
// Walk backwards through the block to see if the register is live at some
@@ -481,7 +481,7 @@ bool ReachingDefAnalysis::isRegUsedAfter(MachineInstr *MI,
for (MachineInstr &Last :
instructionsWithoutDebug(MBB->instr_rbegin(), MBB->instr_rend())) {
LiveRegs.stepBackward(Last);
- if (!LiveRegs.available(MBB->getParent()->getRegInfo(), PhysReg))
+ if (!LiveRegs.available(PhysReg))
return InstIds.lookup(&Last) > InstIds.lookup(MI);
}
return false;
@@ -504,9 +504,9 @@ bool ReachingDefAnalysis::isRegDefinedAfter(MachineInstr *MI,
bool ReachingDefAnalysis::isReachingDefLiveOut(MachineInstr *MI,
MCRegister PhysReg) const {
MachineBasicBlock *MBB = MI->getParent();
- LivePhysRegs LiveRegs(*TRI);
+ LiveRegUnits LiveRegs(*TRI);
LiveRegs.addLiveOuts(*MBB);
- if (LiveRegs.available(MBB->getParent()->getRegInfo(), PhysReg))
+ if (LiveRegs.available(PhysReg))
return false;
auto Last = MBB->getLastNonDebugInstr();
@@ -525,9 +525,9 @@ bool ReachingDefAnalysis::isReachingDefLiveOut(MachineInstr *MI,
MachineInstr *
ReachingDefAnalysis::getLocalLiveOutMIDef(MachineBasicBlock *MBB,
MCRegister PhysReg) const {
- LivePhysRegs LiveRegs(*TRI);
+ LiveRegUnits LiveRegs(*TRI);
LiveRegs.addLiveOuts(*MBB);
- if (LiveRegs.available(MBB->getParent()->getRegInfo(), PhysReg))
+ if (LiveRegs.available(PhysReg))
return nullptr;
auto Last = MBB->getLastNonDebugInstr();
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 5cc612e89162af..273d7b8a8b48f8 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -197,6 +197,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -988,7 +989,7 @@ void AArch64FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,
}
}
-static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs,
+static void getLiveRegsForEntryMBB(LiveRegUnits &LiveRegs,
const MachineBasicBlock &MBB) {
const MachineFunction *MF = MBB.getParent();
LiveRegs.addLiveIns(MBB);
@@ -1018,16 +1019,15 @@ static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) {
const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo();
- LivePhysRegs LiveRegs(TRI);
+ LiveRegUnits LiveRegs(TRI);
getLiveRegsForEntryMBB(LiveRegs, *MBB);
// Prefer X9 since it was historically used for the prologue scratch reg.
- const MachineRegisterInfo &MRI = MF->getRegInfo();
- if (LiveRegs.available(MRI, AArch64::X9))
+ if (LiveRegs.available(AArch64::X9))
return AArch64::X9;
- for (unsigned Reg : AArch64::GPR64RegClass) {
- if (LiveRegs.available(MRI, Reg))
+ for (Register Reg : AArch64::GPR64RegClass) {
+ if (LiveRegs.available(Reg))
return Reg;
}
return AArch64::NoRegister;
@@ -1044,13 +1044,11 @@ bool AArch64FrameLowering::canUseAsPrologue(
if (AFI->hasSwiftAsyncContext()) {
const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo();
- const MachineRegisterInfo &MRI = MF->getRegInfo();
- LivePhysRegs LiveRegs(TRI);
+ LiveRegUnits LiveRegs(TRI);
getLiveRegsForEntryMBB(LiveRegs, MBB);
// The StoreSwiftAsyncContext clobbers X16 and X17. Make sure they are
// available.
- if (!LiveRegs.available(MRI, AArch64::X16) ||
- !LiveRegs.available(MRI, AArch64::X17))
+ if (!LiveRegs.available(AArch64::X16) || !LiveRegs.available(AArch64::X17))
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
index e3f54d01eb22a2..d510e729512571 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
@@ -11,7 +11,7 @@
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIRegisterInfo.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
@@ -313,7 +313,7 @@ MachineBasicBlock::reverse_iterator SIOptimizeExecMasking::findExecCopy(
return E;
}
-// XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly
+// XXX - Seems LiveRegUnits doesn't work correctly since it will incorrectly
// report the register as unavailable because a super-register with a lane mask
// is unavailable.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
@@ -383,7 +383,7 @@ bool SIOptimizeExecMasking::isRegisterInUseBetween(MachineInstr &Stop,
MCRegister Reg,
bool UseLiveOuts,
bool IgnoreStart) const {
- LivePhysRegs LR(*TRI);
+ LiveRegUnits LR(*TRI);
if (UseLiveOuts)
LR.addLiveOuts(*Stop.getParent());
@@ -396,7 +396,7 @@ bool SIOptimizeExecMasking::isRegisterInUseBetween(MachineInstr &Stop,
LR.stepBackward(*A);
}
- return !LR.available(*MRI, Reg);
+ return !LR.available(Reg);
}
// Determine if a register Reg is not re-defined and still in use
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 6121055eb02176..9bcf0007974485 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -31,7 +31,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -109,7 +109,7 @@ namespace {
const ARMSubtarget *STI;
const TargetLowering *TL;
ARMFunctionInfo *AFI;
- LivePhysRegs LiveRegs;
+ LiveRegUnits LiveRegs;
RegisterClassInfo RegClassInfo;
MachineBasicBlock::const_iterator LiveRegPos;
bool LiveRegsValid;
@@ -589,7 +589,7 @@ unsigned ARMLoadStoreOpt::findFreeReg(const TargetRegisterClass &RegClass) {
}
for (unsigned Reg : RegClassInfo.getOrder(&RegClass))
- if (LiveRegs.available(MF->getRegInfo(), Reg))
+ if (LiveRegs.available(Reg))
return Reg;
return 0;
}
diff --git a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index f1558e64ed3eed..8b7bbe0409bedf 100644
--- a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -601,11 +601,11 @@ bool Thumb1FrameLowering::needPopSpecialFixUp(const MachineFunction &MF) const {
static void findTemporariesForLR(const BitVector &GPRsNoLRSP,
const BitVector &PopFriendly,
- const LivePhysRegs &UsedRegs, unsigned &PopReg,
+ const LiveRegUnits &UsedRegs, unsigned &PopReg,
unsigned &TmpReg, MachineRegisterInfo &MRI) {
PopReg = TmpReg = 0;
for (auto Reg : GPRsNoLRSP.set_bits()) {
- if (UsedRegs.available(MRI, Reg)) {
+ if (UsedRegs.available(Reg)) {
// Remember the first pop-friendly register and exit.
if (PopFriendly.test(Reg)) {
PopReg = Reg;
@@ -673,7 +673,7 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB,
// Look for a temporary register to use.
// First, compute the liveness information.
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
- LivePhysRegs UsedRegs(TRI);
+ LiveRegUnits UsedRegs(TRI);
UsedRegs.addLiveOuts(MBB);
// The semantic of pristines changed recently and now,
// the callee-saved registers that are touched in the function
diff --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index 5806bcf0dacf16..5db2064c168a36 100644
--- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -1036,6 +1036,18 @@ false:
define void @stack_realign2(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2, ptr %ptr3, ptr %ptr4, ptr %ptr5, ptr %ptr6) {
; ENABLE-LABEL: stack_realign2:
; ENABLE: ; %bb.0:
+; ENABLE-NEXT: lsl w8, w1, w0
+; ENABLE-NEXT: lsr w9, w0, w1
+; ENABLE-NEXT: lsl w14, w0, w1
+; ENABLE-NEXT: lsr w11, w1, w0
+; ENABLE-NEXT: add w15, w1, w0
+; ENABLE-NEXT: sub w10, w8, w9
+; ENABLE-NEXT: subs w17, w1, w0
+; ENABLE-NEXT: add w16, w14, w8
+; ENABLE-NEXT: add w12, w9, w11
+; ENABLE-NEXT: add w13, w11, w15
+; ENABLE-NEXT: b.le LBB14_2
+; ENABLE-NEXT: ; %bb.1: ; %true
; ENABLE-NEXT: stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
; ENABLE-NEXT: stp x26, x25, [sp, #16] ; 16-byte Folded Spill
; ENABLE-NEXT: stp x24, x23, [sp, #32] ; 16-byte Folded Spill
@@ -1043,8 +1055,8 @@ define void @stack_realign2(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2, ptr %ptr3, ptr
; ENABLE-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill
; ENABLE-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill
; ENABLE-NEXT: add x29, sp, #80
-; ENABLE-NEXT: sub x9, sp, #32
-; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0
+; ENABLE-NEXT: sub x18, sp, #32
+; ENABLE-NEXT: and sp, x18, #0xffffffffffffffe0
; ENABLE-NEXT: .cfi_def_cfa w29, 16
; ENABLE-NEXT: .cfi_offset w30, -8
; ENABLE-NEXT: .cfi_offset w29, -16
@@ -1058,22 +1070,17 @@ define void @stack_realign2(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2, ptr %ptr3, ptr
; ENABLE-NEXT: .cfi_offset w26, -80
; ENABLE-NEXT: .cfi_offset w27, -88
; ENABLE-NEXT: .cfi_offset w28, -96
-; ENABLE-NEXT: lsl w8, w1, w0
-; ENABLE-NEXT: lsr w9, w0, w1
-; ENABLE-NEXT: lsl w14, w0, w1
-; ENABLE-NEXT: lsr w11, w1, w0
-; ENABLE-NEXT: add w15, w1, w0
-; ENABLE-NEXT: sub w10, w8, w9
-; ENABLE-NEXT: subs w17, w1, w0
-; ENABLE-NEXT: add w16, w14, w8
-; ENABLE-NEXT: add w12, w9, w11
-; ENABLE-NEXT: add w13, w11, w15
-; ENABLE-NEXT: b.le LBB14_2
-; ENABLE-NEXT: ; %bb.1: ; %true
; ENABLE-NEXT: str w0, [sp]
; ENABLE-NEXT: ; InlineAsm Start
; ENABLE-NEXT: nop
; ENABLE-NEXT: ; InlineAsm End
+; ENABLE-NEXT: sub sp, x29, #80
+; ENABLE-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
+; ENABLE-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
+; ENABLE-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
+; ENABLE-NEXT: ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
+; ENABLE-NEXT: ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
+; ENABLE-NEXT: ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
; ENABLE-NEXT: LBB14_2: ; %false
; ENABLE-NEXT: str w14, [x2]
; ENABLE-NEXT: str w8, [x3]
@@ -1084,13 +1091,6 @@ define void @stack_realign2(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2, ptr %ptr3, ptr
; ENABLE-NEXT: stp w0, w1, [x2, #4]
; ENABLE-NEXT: stp w16, w10, [x2, #12]
; ENABLE-NEXT: stp w12, w13, [x2, #20]
-; ENABLE-NEXT: sub sp, x29, #80
-; ENABLE-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
-; ENABLE-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
-; ENABLE-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
-; ENABLE-NEXT: ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
-; ENABLE-NEXT: ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
-; ENABLE-NEXT: ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
; ENABLE-NEXT: ret
;
; DISABLE-LABEL: stack_realign2:
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir b/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
index f50bd9ab4b8a1b..078d8a5bf6b66e 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
+++ b/llvm/test/CodeGen/AArch64/stack-probing-no-scratch-reg.mir
@@ -43,43 +43,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: f
; CHECK: bb.0.entry:
- ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $lr
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x9 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead $wzr = SUBSWri killed renamable $w0, 1, 0, implicit-def $nzcv
+ ; CHECK-NEXT: Bcc 12, %bb.2, implicit $nzcv
+ ; CHECK-NEXT: B %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.if.then1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x23, $x25, $x25, $x27, $x28, $lr
+ ; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 :: (store (s64) into %stack.2), (store (s64) into %stack.1)
- ; CHECK-NEXT: $x9 = frame-setup SUBXri $sp, 36, 12
+ ; CHECK-NEXT: $xzr = frame-setup SUBXri $sp, 36, 12
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.3.entry:
+ ; CHECK-NEXT: bb.3.if.then1:
; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.3(0x40000000)
- ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x25, $x27, $x28
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1, 12
; CHECK-NEXT: frame-setup STRXui $xzr, $sp, 0
- ; CHECK-NEXT: $xzr = frame-setup SUBSXrx64 $sp, $x9, 24, implicit-def $nzcv
+ ; CHECK-NEXT: $xzr = frame-setup SUBSXrx64 $sp, $xzr, 24, implicit-def $nzcv
; CHECK-NEXT: frame-setup Bcc 1, %bb.3, implicit $nzcv
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.4.entry:
- ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x25, $x27, $x28
+ ; CHECK-NEXT: bb.4.if.then1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $sp = frame-setup SUBXri $sp, 2544, 0
; CHECK-NEXT: frame-setup STRXui $xzr, $sp, 0
- ; CHECK-NEXT: $x9 = IMPLICIT_DEF
- ; CHECK-NEXT: dead $wzr = SUBSWri killed renamable $w0, 1, 0, implicit-def $nzcv
- ; CHECK-NEXT: Bcc 12, %bb.2, implicit $nzcv
- ; CHECK-NEXT: B %bb.1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1.if.then1:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x23, $x25, $x25, $x27, $x28
- ; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x0 = ADDXri $sp, 0, 0
; CHECK-NEXT: BL @g, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.2.exit:
; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 36, 12
; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 2544, 0
; CHECK-NEXT: early-clobber $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.2), (load (s64) from %stack.1)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.exit:
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
; CHECK-NEXT: RET_ReallyLR
bb.0.entry:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
diff --git a/llvm/test/CodeGen/Thumb/PR35481.ll b/llvm/test/CodeGen/Thumb/PR35481.ll
index ad3215ecb94952..e48d1547782caf 100644
--- a/llvm/test/CodeGen/Thumb/PR35481.ll
+++ b/llvm/test/CodeGen/Thumb/PR35481.ll
@@ -18,11 +18,10 @@ define <4 x i32> @f() local_unnamed_addr #0 {
; CHECK-V4T-NEXT: movs r2, #3
; CHECK-V4T-NEXT: movs r3, #4
; CHECK-V4T-NEXT: bl g
+; CHECK-V4T-NEXT: ldr r7, [sp, #4]
+; CHECK-V4T-NEXT: mov lr, r7
; CHECK-V4T-NEXT: pop {r7}
-; CHECK-V4T-NEXT: mov r12, r0
-; CHECK-V4T-NEXT: pop {r0}
-; CHECK-V4T-NEXT: mov lr, r0
-; CHECK-V4T-NEXT: mov r0, r12
+; CHECK-V4T-NEXT: add sp, #4
; CHECK-V4T-NEXT: bx lr
;
; CHECK-V8M-LABEL: f:
@@ -36,11 +35,10 @@ define <4 x i32> @f() local_unnamed_addr #0 {
; CHECK-V8M-NEXT: movs r1, #2
; CHECK-V8M-NEXT: movs r2, #3
; CHECK-V8M-NEXT: movs r3, #4
+; CHECK-V8M-NEXT: ldr r7, [sp, #4]
+; CHECK-V8M-NEXT: mov lr, r7
; CHECK-V8M-NEXT: pop {r7}
-; CHECK-V8M-NEXT: mov r12, r0
-; CHECK-V8M-NEXT: pop {r0}
-; CHECK-V8M-NEXT: mov lr, r0
-; CHECK-V8M-NEXT: mov r0, r12
+; CHECK-V8M-NEXT: add sp, #4
; CHECK-V8M-NEXT: b g
entry:
%call = tail call i32 @h(i32 1)
More information about the llvm-commits
mailing list