[llvm] [RISCV][GISEL] lowerFormalArguments for variadic arguments (PR #73064)

via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 21 17:38:54 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Michael Maitland (michaelmaitland)

<details>
<summary>Changes</summary>



---

Patch is 26.64 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/73064.diff


3 Files Affected:

- (modified) llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp (+74-8) 
- (modified) llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h (+5) 
- (added) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll (+365) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 1aba8a8f52e96fc..f0aa0417a03164b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -423,18 +423,76 @@ bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
   return true;
 }
 
+/// If there are varargs that were passed in a0-a7, the data in those registers
+/// must be copied to the varargs save area on the stack.
+void RISCVCallLowering::saveVarArgRegisters(
+    MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
+    IncomingValueAssigner &Assigner, CCState &CCInfo) const {
+  MachineFunction &MF = MIRBuilder.getMF();
+  const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+  unsigned XLenInBytes = Subtarget.getXLen() / 8;
+  ArrayRef<MCPhysReg> ArgRegs({RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
+                               RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17});
+  unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+
+  // Offset of the first variable argument from stack pointer, and size of
+  // the vararg save area. For now, the varargs save area is either zero or
+  // large enough to hold a0-a7.
+  int VaArgOffset, VarArgsSaveSize;
+  // If all registers are allocated, then all varargs must be passed on the
+  // stack and we don't need to save any argregs.
+  if (ArgRegs.size() == Idx) {
+    VaArgOffset = Assigner.StackSize;
+    VarArgsSaveSize = 0;
+  } else {
+    VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+    VaArgOffset = -VarArgsSaveSize;
+  }
+
+  // Record the frame index of the first variable argument which is a value
+  // necessary to G_VASTART.
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+  RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+  RVFI->setVarArgsFrameIndex(FI);
+
+  // If saving an odd number of registers then create an extra stack slot to
+  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+  // offsets to even-numbered registered remain 2*XLEN-aligned.
+  if (Idx % 2) {
+    MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
+    VarArgsSaveSize += XLenInBytes;
+  }
+  RVFI->setVarArgsSaveSize(VarArgsSaveSize);
+
+  // Copy the integer registers that may have been used for passing varargs
+  // to the vararg save area.
+  const LLT p0 = LLT::pointer(0, Subtarget.getXLen());
+  const LLT sXLen = LLT::scalar(Subtarget.getXLen());
+  const MVT XLenMVT = MVT::getIntegerVT(Subtarget.getXLen());
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) {
+    const Register VReg = MRI.createGenericVirtualRegister(sXLen);
+    Handler.assignValueToReg(
+        VReg, ArgRegs[I],
+        CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenMVT,
+                            ArgRegs[I], XLenMVT, CCValAssign::Full));
+    FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+    auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
+    auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
+    MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+  }
+}
+
 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
                                              const Function &F,
                                              ArrayRef<ArrayRef<Register>> VRegs,
                                              FunctionLoweringInfo &FLI) const {
-  // Early exit if there are no arguments.
-  if (F.arg_empty())
+  // Early exit if there are no arguments. varargs are not part of F.args() but
+  // must be lowered.
+  if (F.arg_empty() && !F.isVarArg())
     return true;
 
-  // TODO: Support vararg functions.
-  if (F.isVarArg())
-    return false;
-
   const RISCVSubtarget &Subtarget =
       MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
   for (auto &Arg : F.args()) {
@@ -467,8 +525,16 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
       /*IsRet=*/false);
   RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
 
-  return determineAndHandleAssignments(Handler, Assigner, SplitArgInfos,
-                                       MIRBuilder, CC, F.isVarArg());
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
+  if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) ||
+      !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder))
+    return false;
+
+  if (F.isVarArg())
+    saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
+
+  return true;
 }
 
 bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
index d80a666f3489475..abe704b4a645189 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
@@ -42,6 +42,11 @@ class RISCVCallLowering : public CallLowering {
 private:
   bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
                       ArrayRef<Register> VRegs, MachineInstrBuilder &Ret) const;
+
+  void saveVarArgRegisters(MachineIRBuilder &MIRBuilder,
+                           CallLowering::IncomingValueHandler &Handler,
+                           IncomingValueAssigner &Assigner,
+                           CCState &CCInfo) const;
 };
 
 } // end namespace llvm
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
new file mode 100644
index 000000000000000..4f4d71de46ec89b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
@@ -0,0 +1,365 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64 %s
+
+define void @va1arg(ptr %a, ...) {
+  ; RV32-LABEL: name: va1arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va1arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va2arg(ptr %a, ptr %b, ...) {
+  ; RV32-LABEL: name: va2arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va2arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
+  ; RV32-LABEL: name: va3arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va3arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
+  ; RV32-LABEL: name: va4arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va4arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
+  ; RV32-LABEL: name: va5arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[CO...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/73064


More information about the llvm-commits mailing list