[llvm] [NFC][X86] Clang-format X86FrameLowering.cpp (PR #73287)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 23 22:11:24 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Shengchen Kan (KanRobert)
<details>
<summary>Changes</summary>
---
Patch is 36.33 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/73287.diff
1 Files Affected:
- (modified) llvm/lib/Target/X86/X86FrameLowering.cpp (+183-165)
``````````diff
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index f268fd5b3fe95e6..b042f6865f40d01 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -68,8 +68,8 @@ bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
/// call frame pseudos can be simplified. Having a FP, as in the default
/// implementation, is not sufficient here since we can't always use it.
/// Use a more nuanced condition.
-bool
-X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+bool X86FrameLowering::canSimplifyCallFramePseudos(
+ const MachineFunction &MF) const {
return hasReservedCallFrame(MF) ||
MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
(hasFP(MF) && !TRI->hasStackRealignment(MF)) ||
@@ -83,8 +83,8 @@ X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
// that were not simplified earlier.
// So, this is required for x86 functions that have push sequences even
// when there are no stack objects.
-bool
-X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
+bool X86FrameLowering::needsFrameIndexResolution(
+ const MachineFunction &MF) const {
return MF.getFrameInfo().hasStackObjects() ||
MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
@@ -194,8 +194,8 @@ flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
/// stack pointer by a constant value.
void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- const DebugLoc &DL,
- int64_t NumBytes, bool InEpilogue) const {
+ const DebugLoc &DL, int64_t NumBytes,
+ bool InEpilogue) const {
bool isSub = NumBytes < 0;
uint64_t Offset = isSub ? -NumBytes : NumBytes;
MachineInstr::MIFlag Flag =
@@ -279,13 +279,11 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
if (ThisVal == SlotSize) {
// Use push / pop for slot sized adjustments as a size optimization. We
// need to find a dead register when using pop.
- unsigned Reg = isSub
- ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
- : TRI->findDeadCallerSavedReg(MBB, MBBI);
+ unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
+ : TRI->findDeadCallerSavedReg(MBB, MBBI);
if (Reg) {
- unsigned Opc = isSub
- ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
- : (Is64Bit ? X86::POP64r : X86::POP32r);
+ unsigned Opc = isSub ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
+ : (Is64Bit ? X86::POP64r : X86::POP32r);
BuildMI(MBB, MBBI, DL, TII.get(Opc))
.addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
.setMIFlag(Flag);
@@ -922,24 +920,16 @@ void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
// registers. For the prolog expansion we use RAX, RCX and RDX.
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterClass *RegClass = &X86::GR64RegClass;
- const Register SizeReg = InProlog ? X86::RAX
- : MRI.createVirtualRegister(RegClass),
- ZeroReg = InProlog ? X86::RCX
- : MRI.createVirtualRegister(RegClass),
- CopyReg = InProlog ? X86::RDX
- : MRI.createVirtualRegister(RegClass),
- TestReg = InProlog ? X86::RDX
- : MRI.createVirtualRegister(RegClass),
- FinalReg = InProlog ? X86::RDX
- : MRI.createVirtualRegister(RegClass),
- RoundedReg = InProlog ? X86::RDX
- : MRI.createVirtualRegister(RegClass),
- LimitReg = InProlog ? X86::RCX
- : MRI.createVirtualRegister(RegClass),
- JoinReg = InProlog ? X86::RCX
- : MRI.createVirtualRegister(RegClass),
- ProbeReg = InProlog ? X86::RCX
- : MRI.createVirtualRegister(RegClass);
+ const Register
+ SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass),
+ ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
+ CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
+ TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
+ FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
+ RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
+ LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
+ JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
+ ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass);
// SP-relative offsets where we can save RCX and RDX.
int64_t RCXShadowSlot = 0;
@@ -1011,7 +1001,9 @@ void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
.addReg(X86::GS);
BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
// Jump if the desired stack pointer is at or above the stack limit.
- BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE);
+ BuildMI(&MBB, DL, TII.get(X86::JCC_1))
+ .addMBB(ContinueMBB)
+ .addImm(X86::COND_AE);
// Add code to roundMBB to round the final stack pointer to a page boundary.
RoundMBB->addLiveIn(FinalReg);
@@ -1048,7 +1040,9 @@ void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
.addReg(RoundedReg)
.addReg(ProbeReg);
- BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE);
+ BuildMI(LoopMBB, DL, TII.get(X86::JCC_1))
+ .addMBB(LoopMBB)
+ .addImm(X86::COND_NE);
MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
@@ -1132,7 +1126,7 @@ void X86FrameLowering::emitStackProbeCall(
CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
} else {
CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
- .addExternalSymbol(MF.createExternalSymbolName(Symbol));
+ .addExternalSymbol(MF.createExternalSymbolName(Symbol));
}
unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX;
@@ -1194,7 +1188,8 @@ static unsigned calculateSetFPREG(uint64_t SPAdjust) {
// info, we need to know the ABI stack alignment as well in case we
// have a call out. Otherwise just make sure we have some alignment - we'll
// go with the minimum SlotSize.
-uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
+uint64_t
+X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment.
Align StackAlign = getStackAlign();
@@ -1285,8 +1280,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
// Loop entry block
{
- const unsigned SUBOpc =
- getSUBriOpcode(Uses64BitFramePtr);
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr);
BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr)
.addReg(StackPtr)
.addImm(StackProbeSize)
@@ -1316,8 +1310,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
.addImm(0)
.setMIFlag(MachineInstr::FrameSetup);
- const unsigned SUBOpc =
- getSUBriOpcode(Uses64BitFramePtr);
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr);
BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr)
.addReg(StackPtr)
.addImm(StackProbeSize)
@@ -1368,7 +1361,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
}
}
-bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
+bool X86FrameLowering::has128ByteRedZone(const MachineFunction &MF) const {
// x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
// clobbered by any interrupt handler.
assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
@@ -1484,7 +1477,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
- uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
+ uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
bool IsFunclet = MBB.isEHFuncletEntry();
EHPersonality Personality = EHPersonality::Unknown;
if (Fn.hasPersonalityFn())
@@ -1502,8 +1495,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
bool NeedsDwarfCFI = needsDwarfCFI(MF);
Register FramePtr = TRI->getFrameRegister(MF);
const Register MachineFramePtr =
- STI.isTarget64BitILP32()
- ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
+ STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64))
+ : FramePtr;
Register BasePtr = TRI->getBaseRegister();
bool HasWinCFI = false;
@@ -1538,7 +1531,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
int64_t Offset = -(int64_t)SlotSize;
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64rmm: X86::PUSH32rmm))
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm))
.addReg(ArgBaseReg)
.addImm(1)
.addReg(X86::NoRegister)
@@ -1550,7 +1543,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Space reserved for stack-based arguments when making a (ABI-guaranteed)
// tail call.
unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta();
- if (TailCallArgReserveSize && IsWin64Prologue)
+ if (TailCallArgReserveSize && IsWin64Prologue)
report_fatal_error("Can't handle guaranteed tail call under win64 yet");
const bool EmitStackProbeCall =
@@ -1622,7 +1615,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize =
X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta();
- if (HasFP) MinSize += SlotSize;
+ if (HasFP)
+ MinSize += SlotSize;
X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
MFI.setStackSize(StackSize);
@@ -1677,8 +1671,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- NumBytes = FrameSize -
- (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
+ NumBytes =
+ FrameSize - (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
@@ -1686,8 +1680,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Save EBP/RBP into the appropriate stack slot.
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
- .addReg(MachineFramePtr, RegState::Kill)
- .setMIFlag(MachineInstr::FrameSetup);
+ .addReg(MachineFramePtr, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
if (NeedsDwarfCFI && !ArgBaseReg.isValid()) {
// Mark the place where EBP/RBP was saved.
@@ -1802,8 +1796,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
} else {
assert(!IsFunclet && "funclets without FPs not yet implemented");
- NumBytes = StackSize -
- (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
+ NumBytes =
+ StackSize - (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
}
// Update the offset adjustment, which is mainly used by codeview to translate
@@ -1825,8 +1819,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
bool PushedRegs = false;
int StackOffset = 2 * stackGrowth;
- while (MBBI != MBB.end() &&
- MBBI->getFlag(MachineInstr::FrameSetup) &&
+ while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup) &&
(MBBI->getOpcode() == X86::PUSH32r ||
MBBI->getOpcode() == X86::PUSH64r)) {
PushedRegs = true;
@@ -1896,13 +1889,13 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (Is64Bit) {
// Save RAX
BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
- .addReg(X86::RAX, RegState::Kill)
- .setMIFlag(MachineInstr::FrameSetup);
+ .addReg(X86::RAX, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
} else {
// Save EAX
BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
- .addReg(X86::EAX, RegState::Kill)
- .setMIFlag(MachineInstr::FrameSetup);
+ .addReg(X86::EAX, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
}
}
@@ -2085,16 +2078,16 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Update the base pointer with the current stack pointer.
unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
- .addReg(SPOrEstablisher)
- .setMIFlag(MachineInstr::FrameSetup);
+ .addReg(SPOrEstablisher)
+ .setMIFlag(MachineInstr::FrameSetup);
if (X86FI->getRestoreBasePointer()) {
// Stash value of base pointer. Saving RSP instead of EBP shortens
// dependence chain. Used by SjLj EH.
unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
- FramePtr, true, X86FI->getRestoreBasePointerOffset())
- .addReg(SPOrEstablisher)
- .setMIFlag(MachineInstr::FrameSetup);
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), FramePtr, true,
+ X86FI->getRestoreBasePointerOffset())
+ .addReg(SPOrEstablisher)
+ .setMIFlag(MachineInstr::FrameSetup);
}
if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
@@ -2207,9 +2200,9 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// This is the size of the pushed CSRs.
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
// This is the size of callee saved XMMs.
- const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
- unsigned XMMSize = WinEHXMMSlotInfo.size() *
- TRI->getSpillSize(X86::VR128RegClass);
+ const auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
+ unsigned XMMSize =
+ WinEHXMMSlotInfo.size() * TRI->getSpillSize(X86::VR128RegClass);
// This is the amount of stack a funclet needs to allocate.
unsigned UsedSize;
EHPersonality Personality =
@@ -2233,10 +2226,9 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
}
static bool isTailCallOpcode(unsigned Opc) {
- return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
- Opc == X86::TCRETURNmi ||
- Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
- Opc == X86::TCRETURNmi64;
+ return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
+ Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 ||
+ Opc == X86::TCRETURNdi64 || Opc == X86::TCRETURNmi64;
}
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
@@ -2322,7 +2314,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (X86FI->hasSwiftAsyncContext()) {
// Discard the context.
int Offset = 16 + mergeSPUpdates(MBB, MBBI, true);
- emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true);
+ emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/ true);
}
// Pop EBP.
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
@@ -2332,8 +2324,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// We need to reset FP to its untagged state on return. Bit 60 is currently
// used to show the presence of an extended frame.
if (X86FI->hasSwiftAsyncContext()) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8),
- MachineFramePtr)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), MachineFramePtr)
.addUse(MachineFramePtr)
.addImm(60)
.setMIFlag(MachineInstr::FrameDestroy);
@@ -2421,13 +2412,12 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// effects of the prologue can safely be undone.
if (LEAAmount != 0) {
unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
- FramePtr, false, LEAAmount);
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), FramePtr,
+ false, LEAAmount);
--MBBI;
} else {
unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
- BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
- .addReg(FramePtr);
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr).addReg(FramePtr);
--MBBI;
}
} else if (NumBytes) {
@@ -2533,7 +2523,8 @@ StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF,
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- // If required, include space for extra hidden slot for stashing base pointer.
+ // If required, include space for extra hidden slot for stashing base
+ // pointer.
if (X86FI->getRestoreBasePointer())
FrameSize += SlotSize;
uint64_t NumBytes = FrameSize - CSSize;
@@ -2578,7 +2569,7 @@ int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI,
Register &FrameReg) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
+ const auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
const auto it = WinEHXMMSlotInfo.find(FI);
if (it == WinEHXMMSlotInfo.end())
@@ -2706,7 +2697,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// }
// [EBP]
MFI.CreateFixedObject(-TailCallReturnAddrDelta,
- TailCallReturnAddrDelta - SlotSize, true);
+ TailCallReturnAddrDelta - SlotSize, true);
}
// Spill the BasePtr if it's used.
@@ -2737,7 +2728,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// about avoiding it later.
Register FPReg = TRI->getFrameRegister(MF);
for (unsigned i = 0; i < CSI.size(); ++i) {
- if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
+ if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {
CSI.erase(CSI.begin() + i);
break;
}
@@ -2846,8 +2837,9 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
// passed in callee saved registers.
// Omitting the kill flags is conservatively correct even if the live-in
// is not used after all.
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
- .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(MBB, MI, DL, TII.get(Opc))
+ .addReg(Reg, getKillRegState(CanKill))
+ .setMIFlag(MachineInstr::FrameSetup);
}
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -2942,8 +2934,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
// Reload XMMs from stack frame.
for (const CalleeSavedInfo &I : CSI) {
Register Reg = I.getReg();
- if (X86::GR64RegClass.contains(Reg) ||
- X86::GR32RegClass.contains(Reg))
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
// If this is k-register make sure we lookup via the largest legal type.
@@ -2970,8 +2961,7 @@ bool X86FrameLowering::restoreCalleeSaved...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/73287
More information about the llvm-commits
mailing list