[llvm] [RISCV][GISEL] lowerFormalArguments for variadic arguments (PR #73064)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 28 18:05:50 PST 2023
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/73064
>From bae7d94861bd5a28c2b732a87075cb27e1cd7a21 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 21 Nov 2023 07:05:09 -0800
Subject: [PATCH 1/4] [RISCV][GISEL] lowerFormalArguments for variadic
arguments
---
.../Target/RISCV/GISel/RISCVCallLowering.cpp | 82 +++-
.../Target/RISCV/GISel/RISCVCallLowering.h | 5 +
.../irtranslator/lower-args-vararg.ll | 365 ++++++++++++++++++
3 files changed, 444 insertions(+), 8 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 1aba8a8f52e96fc..f0aa0417a03164b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -423,18 +423,76 @@ bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
return true;
}
+/// If there are varargs that were passed in a0-a7, the data in those registers
+/// must be copied to the varargs save area on the stack.
+void RISCVCallLowering::saveVarArgRegisters(
+ MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
+ IncomingValueAssigner &Assigner, CCState &CCInfo) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+ unsigned XLenInBytes = Subtarget.getXLen() / 8;
+ ArrayRef<MCPhysReg> ArgRegs({RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
+ RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17});
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+
+ // Offset of the first variable argument from stack pointer, and size of
+ // the vararg save area. For now, the varargs save area is either zero or
+ // large enough to hold a0-a7.
+ int VaArgOffset, VarArgsSaveSize;
+ // If all registers are allocated, then all varargs must be passed on the
+ // stack and we don't need to save any argregs.
+ if (ArgRegs.size() == Idx) {
+ VaArgOffset = Assigner.StackSize;
+ VarArgsSaveSize = 0;
+ } else {
+ VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+ VaArgOffset = -VarArgsSaveSize;
+ }
+
+ // Record the frame index of the first variable argument which is a value
+ // necessary to G_VASTART.
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+ RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ RVFI->setVarArgsFrameIndex(FI);
+
+ // If saving an odd number of registers then create an extra stack slot to
+ // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+ // offsets to even-numbered registered remain 2*XLEN-aligned.
+ if (Idx % 2) {
+ MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
+ VarArgsSaveSize += XLenInBytes;
+ }
+ RVFI->setVarArgsSaveSize(VarArgsSaveSize);
+
+ // Copy the integer registers that may have been used for passing varargs
+ // to the vararg save area.
+ const LLT p0 = LLT::pointer(0, Subtarget.getXLen());
+ const LLT sXLen = LLT::scalar(Subtarget.getXLen());
+ const MVT XLenMVT = MVT::getIntegerVT(Subtarget.getXLen());
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) {
+ const Register VReg = MRI.createGenericVirtualRegister(sXLen);
+ Handler.assignValueToReg(
+ VReg, ArgRegs[I],
+ CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenMVT,
+ ArgRegs[I], XLenMVT, CCValAssign::Full));
+ FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+ auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
+ auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
+ MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+ }
+}
+
bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<ArrayRef<Register>> VRegs,
FunctionLoweringInfo &FLI) const {
- // Early exit if there are no arguments.
- if (F.arg_empty())
+ // Early exit if there are no arguments. varargs are not part of F.args() but
+ // must be lowered.
+ if (F.arg_empty() && !F.isVarArg())
return true;
- // TODO: Support vararg functions.
- if (F.isVarArg())
- return false;
-
const RISCVSubtarget &Subtarget =
MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
for (auto &Arg : F.args()) {
@@ -467,8 +525,16 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
/*IsRet=*/false);
RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
- return determineAndHandleAssignments(Handler, Assigner, SplitArgInfos,
- MIRBuilder, CC, F.isVarArg());
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
+ if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) ||
+ !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder))
+ return false;
+
+ if (F.isVarArg())
+ saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
+
+ return true;
}
bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
index d80a666f3489475..abe704b4a645189 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
@@ -42,6 +42,11 @@ class RISCVCallLowering : public CallLowering {
private:
bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<Register> VRegs, MachineInstrBuilder &Ret) const;
+
+ void saveVarArgRegisters(MachineIRBuilder &MIRBuilder,
+ CallLowering::IncomingValueHandler &Handler,
+ IncomingValueAssigner &Assigner,
+ CCState &CCInfo) const;
};
} // end namespace llvm
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
new file mode 100644
index 000000000000000..4f4d71de46ec89b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
@@ -0,0 +1,365 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64 %s
+
+define void @va1arg(ptr %a, ...) {
+ ; RV32-LABEL: name: va1arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+ ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+ ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+ ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+ ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+ ; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+ ; RV32-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va1arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+ ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+ ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+ ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+ ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+ ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va2arg(ptr %a, ptr %b, ...) {
+ ; RV32-LABEL: name: va2arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+ ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+ ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.4)
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+ ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+ ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+ ; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va2arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+ ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+ ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+ ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.4)
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
+ ; RV32-LABEL: name: va3arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+ ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.4)
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+ ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+ ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+ ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va3arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+ ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.4)
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
+ ; RV32-LABEL: name: va4arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+ ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+ ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va4arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
+ ; RV32-LABEL: name: va5arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+ ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va5arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
+ ; RV32-LABEL: name: va6arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va6arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...) {
+ ; RV32-LABEL: name: va7arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+ ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va7arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+ ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
+
+define void @va8arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ptr %h, ...) {
+ ; RV32-LABEL: name: va8arg
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+ ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+ ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x17
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: va8arg
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+ ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+ ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+ ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+ ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+ ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x17
+ ; RV64-NEXT: PseudoRET
+ ret void
+}
>From a9a42a4bac84a9eb9b15c112061b37c64245eb0e Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 22 Nov 2023 12:10:14 -0800
Subject: [PATCH 2/4] !fixup don't use list initializer for arrayref
---
llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index f0aa0417a03164b..2367712925e2b09 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -423,6 +423,10 @@ bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
return true;
}
+static const MCPhysReg ArgGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
+ RISCV::X13, RISCV::X14, RISCV::X15,
+ RISCV::X16, RISCV::X17};
+
/// If there are varargs that were passed in a0-a7, the data in those registers
/// must be copied to the varargs save area on the stack.
void RISCVCallLowering::saveVarArgRegisters(
@@ -431,8 +435,7 @@ void RISCVCallLowering::saveVarArgRegisters(
MachineFunction &MF = MIRBuilder.getMF();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
unsigned XLenInBytes = Subtarget.getXLen() / 8;
- ArrayRef<MCPhysReg> ArgRegs({RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
- RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17});
+ ArrayRef<MCPhysReg> ArgRegs(ArgGPRs);
unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
// Offset of the first variable argument from stack pointer, and size of
>From 1ac1ca1e95f7c7618d183fb0c179709650feeb6e Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 27 Nov 2023 15:37:27 -0800
Subject: [PATCH 3/4] !fixup rename variable from MVT to VT
---
llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 2367712925e2b09..f5027babb5d7aab 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -472,14 +472,14 @@ void RISCVCallLowering::saveVarArgRegisters(
// to the vararg save area.
const LLT p0 = LLT::pointer(0, Subtarget.getXLen());
const LLT sXLen = LLT::scalar(Subtarget.getXLen());
- const MVT XLenMVT = MVT::getIntegerVT(Subtarget.getXLen());
+ const MVT XLenVT = Subtarget.getXLenVT();
MachineRegisterInfo &MRI = MF.getRegInfo();
for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) {
const Register VReg = MRI.createGenericVirtualRegister(sXLen);
Handler.assignValueToReg(
VReg, ArgRegs[I],
- CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenMVT,
- ArgRegs[I], XLenMVT, CCValAssign::Full));
+ CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
+ ArgRegs[I], XLenVT, CCValAssign::Full));
FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
>From 38d58024c07d3b84ab2a7c8c476a4a471976464f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 28 Nov 2023 18:05:30 -0800
Subject: [PATCH 4/4] !fixup remove value from memoperand
---
.../Target/RISCV/GISel/RISCVCallLowering.cpp | 6 +-
.../irtranslator/lower-args-vararg.ll | 112 +++++++++---------
2 files changed, 61 insertions(+), 57 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index f5027babb5d7aab..9c3067800c87f6d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -483,7 +483,11 @@ void RISCVCallLowering::saveVarArgRegisters(
FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
- MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+ auto Store =
+ MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+ // This was taken from SelectionDAG, but we are not sure why it exists.
+ // It is being investigated in github.com/llvm/llvm-project/issues/73735.
+ Store->memoperands()[0]->setValue((Value *)nullptr);
}
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
index 4f4d71de46ec89b..ecfccc48bb34fd3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
@@ -12,25 +12,25 @@ define void @va1arg(ptr %a, ...) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
- ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+ ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
- ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+ ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
- ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+ ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 16)
; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32))
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va1arg
@@ -40,25 +40,25 @@ define void @va1arg(ptr %a, ...) {
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
- ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+ ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
- ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+ ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
- ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+ ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64))
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
@@ -72,22 +72,22 @@ define void @va2arg(ptr %a, ptr %b, ...) {
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
- ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+ ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
- ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.4)
+ ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 16)
; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32))
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va2arg
@@ -98,22 +98,22 @@ define void @va2arg(ptr %a, ptr %b, ...) {
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
- ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+ ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
- ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.4)
+ ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64))
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
@@ -128,19 +128,19 @@ define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
- ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.4)
+ ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 16)
; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va3arg
@@ -152,19 +152,19 @@ define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
- ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.4)
+ ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
@@ -180,16 +180,16 @@ define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+ ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 16)
; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va4arg
@@ -202,16 +202,16 @@ define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+ ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
@@ -228,13 +228,13 @@ define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.2)
+ ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va5arg
@@ -248,13 +248,13 @@ define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.2)
+ ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
@@ -272,10 +272,10 @@ define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+ ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8)
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va6arg
@@ -290,10 +290,10 @@ define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+ ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
@@ -312,7 +312,7 @@ define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...)
; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0)
+ ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: va7arg
@@ -328,7 +328,7 @@ define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...)
; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0)
+ ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
; RV64-NEXT: PseudoRET
ret void
}
More information about the llvm-commits
mailing list