[llvm] [GISEL][RISCV] Add G_VACOPY GISEL opcode and add lowering code for it. (PR #73066)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 29 10:44:13 PST 2023


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/73066

>From 82ad45e7eeb753d5d50540281d2be1a3be16a3b7 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 17 Nov 2023 08:58:16 -0800
Subject: [PATCH 1/9] [RISCV][GISEL] Select G_IMPLICIT_DEF

This is similiar to the selection of G_IMPLICIT_DEF in AArch64. Register bank
selection needs to be implemented in a future patch. It is not so
straight forward since the register bank information is ambigious on its
own and depends on its uses.
---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  | 23 ++++++
 .../instruction-select/implicit-def32.mir     | 75 +++++++++++++++++++
 .../instruction-select/implicit-def64.mir     | 75 +++++++++++++++++++
 3 files changed, 173 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def64.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 3c72269d1e00c2f..5999fb4840b9ea0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -62,6 +62,8 @@ class RISCVInstructionSelector : public InstructionSelector {
 
   // Custom selection methods
   bool selectCopy(MachineInstr &MI, MachineRegisterInfo &MRI) const;
+  bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB,
+                         MachineRegisterInfo &MRI) const;
   bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
   bool selectGlobalValue(MachineInstr &MI, MachineIRBuilder &MIB,
                          MachineRegisterInfo &MRI) const;
@@ -564,6 +566,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
     return selectSelect(MI, MIB, MRI);
   case TargetOpcode::G_FCMP:
     return selectFPCompare(MI, MIB, MRI);
+  case TargetOpcode::G_IMPLICIT_DEF:
+    return selectImplicitDef(MI, MIB, MRI);
   default:
     return false;
   }
@@ -677,6 +681,25 @@ bool RISCVInstructionSelector::selectCopy(MachineInstr &MI,
   return true;
 }
 
+bool RISCVInstructionSelector::selectImplicitDef(
+    MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
+  assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
+
+  const Register DstReg = MI.getOperand(0).getReg();
+  const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
+      MRI.getType(DstReg), *RBI.getRegBank(DstReg, MRI, TRI));
+
+  assert(DstRC &&
+         "Register class not available for LLT, register bank combination");
+
+  if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
+    LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
+                      << " operand\n");
+  }
+  MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
+  return true;
+}
+
 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
                                               MachineIRBuilder &MIB) const {
   MachineRegisterInfo &MRI = *MIB.getMRI();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def32.mir
new file mode 100644
index 000000000000000..10a5246a99d2b4d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def32.mir
@@ -0,0 +1,75 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+f -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32F %s
+
+---
+name:            implicit_def_gpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+
+body:             |
+  bb.0:
+    ; RV32F-LABEL: name: implicit_def_gpr
+    ; RV32F: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
+    ; RV32F-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[DEF]], [[DEF]]
+    ; RV32F-NEXT: $x10 = COPY [[ADD]]
+    %0(s32) = G_IMPLICIT_DEF
+    %1(s32) = G_ADD %0, %0
+    $x10 = COPY %1(s32)
+...
+---
+name:            implicit_def_copy_gpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+
+body:             |
+  bb.0:
+    ; RV32F-LABEL: name: implicit_def_copy_gpr
+    ; RV32F: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
+    ; RV32F-NEXT: $x10 = COPY [[DEF]]
+    %0(s32) = G_IMPLICIT_DEF
+    %1(s32) = COPY %0(s32)
+    $x10 = COPY %1(s32)
+...
+
+---
+name:            implicit_def_fpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: fprb }
+  - { id: 1, class: fprb }
+
+body:             |
+  bb.0:
+    ; RV32F-LABEL: name: implicit_def_fpr
+    ; RV32F: [[DEF:%[0-9]+]]:fpr32 = IMPLICIT_DEF
+    ; RV32F-NEXT: [[FADD_S:%[0-9]+]]:fpr32 = nofpexcept FADD_S [[DEF]], [[DEF]], 7
+    ; RV32F-NEXT: $f10_f = COPY [[FADD_S]]
+    %0(s32) = G_IMPLICIT_DEF
+    %1(s32) = G_FADD %0, %0
+    $f10_f = COPY %1(s32)
+...
+---
+name:            implicit_def_copy_fpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: fprb }
+  - { id: 1, class: fprb }
+
+body:             |
+  bb.0:
+    ; RV32F-LABEL: name: implicit_def_copy_fpr
+    ; RV32F: [[DEF:%[0-9]+]]:fpr32 = IMPLICIT_DEF
+    ; RV32F-NEXT: $f10_f = COPY [[DEF]]
+    %0(s32) = G_IMPLICIT_DEF
+    %1(s32) = COPY %0(s32)
+    $f10_f = COPY %1(s32)
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def64.mir
new file mode 100644
index 000000000000000..1aafca8374ff908
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/implicit-def64.mir
@@ -0,0 +1,75 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+d -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64D %s
+
+---
+name:            implicit_def_gpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+
+body:             |
+  bb.0:
+    ; RV64D-LABEL: name: implicit_def_gpr
+    ; RV64D: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
+    ; RV64D-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[DEF]], [[DEF]]
+    ; RV64D-NEXT: $x10 = COPY [[ADD]]
+    %0(s64) = G_IMPLICIT_DEF
+    %1(s64) = G_ADD %0, %0
+    $x10 = COPY %1(s64)
+...
+---
+name:            implicit_def_copy_gpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+
+body:             |
+  bb.0:
+    ; RV64D-LABEL: name: implicit_def_copy_gpr
+    ; RV64D: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
+    ; RV64D-NEXT: $x10 = COPY [[DEF]]
+    %0(s64) = G_IMPLICIT_DEF
+    %1(s64) = COPY %0(s64)
+    $x10 = COPY %1(s64)
+...
+
+---
+name:            implicit_def_fpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: fprb }
+  - { id: 1, class: fprb }
+
+body:             |
+  bb.0:
+    ; RV64D-LABEL: name: implicit_def_fpr
+    ; RV64D: [[DEF:%[0-9]+]]:fpr64 = IMPLICIT_DEF
+    ; RV64D-NEXT: [[FADD_D:%[0-9]+]]:fpr64 = nofpexcept FADD_D [[DEF]], [[DEF]], 7
+    ; RV64D-NEXT: $f10_d = COPY [[FADD_D]]
+    %0(s64) = G_IMPLICIT_DEF
+    %1(s64) = G_FADD %0, %0
+    $f10_d = COPY %1(s64)
+...
+---
+name:            implicit_def_copy_fpr
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: fprb }
+  - { id: 1, class: fprb }
+
+body:             |
+  bb.0:
+    ; RV64D-LABEL: name: implicit_def_copy_fpr
+    ; RV64D: [[DEF:%[0-9]+]]:fpr64 = IMPLICIT_DEF
+    ; RV64D-NEXT: $f10_d = COPY [[DEF]]
+    %0(s64) = G_IMPLICIT_DEF
+    %1(s64) = COPY %0(s64)
+    $f10_d = COPY %1(s64)
+...

>From d066c6f9696fd4509b2fd6d10581051abf9034d7 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 20 Nov 2023 16:30:14 -0800
Subject: [PATCH 2/9] [RISCV][GISEL] Legalize, Regbankselect, and
 instructionselect for G_[UN]MERGE_VALUES

When MERGE or UNMERGE s64 on a subtarget that is non-64bit, it must have
the D extension and use FPR in order to be legal.

All other instances of MERGE and UNMERGE that can be made legal should be
narrowed, widend, or replaced by the combiner.
---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  |  59 ++++++++
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |   4 +
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |  28 ++++
 .../instruction-select/merge-unmerge-rv32.mir |  44 ++++++
 .../bitcast-between-f64-and-i64.ll            |  31 +++++
 .../GlobalISel/legalizer/merge-unmerge-d.mir  |  38 +++++
 .../legalizer/rv32/merge-unmerge.mir          | 131 ++++++++++++++++++
 .../legalizer/rv64/merge-unmerge.mir          | 119 ++++++++++++++++
 .../regbankselect/merge-unmerge-rv32.mir      |  40 ++++++
 9 files changed, 494 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/merge-unmerge-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/bitcast-between-f64-and-i64.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/merge-unmerge-d.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/merge-unmerge.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/merge-unmerge.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/merge-unmerge-rv32.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 5999fb4840b9ea0..1808686bd135da2 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -48,6 +48,9 @@ class RISCVInstructionSelector : public InstructionSelector {
   const TargetRegisterClass *
   getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
 
+  bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
+  bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
+
   // tblgen-erated 'select' implementation, used as the initial selector for
   // the patterns that don't require complex C++.
   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
@@ -74,6 +77,10 @@ class RISCVInstructionSelector : public InstructionSelector {
                     MachineRegisterInfo &MRI) const;
   bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB,
                        MachineRegisterInfo &MRI) const;
+  bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
+                           MachineRegisterInfo &MRI) const;
+  bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
+                           MachineRegisterInfo &MRI) const;
 
   ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
   ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
@@ -568,11 +575,53 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
     return selectFPCompare(MI, MIB, MRI);
   case TargetOpcode::G_IMPLICIT_DEF:
     return selectImplicitDef(MI, MIB, MRI);
+  case TargetOpcode::G_MERGE_VALUES:
+    return selectMergeValues(MI, MIB, MRI);
+  case TargetOpcode::G_UNMERGE_VALUES:
+    return selectUnmergeValues(MI, MIB, MRI);
   default:
     return false;
   }
 }
 
+bool RISCVInstructionSelector::selectMergeValues(
+    MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
+  assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
+
+  // Build a F64 Pair from operands
+  if (MI.getNumOperands() != 3)
+    return false;
+  Register Dst = MI.getOperand(0).getReg();
+  Register Lo = MI.getOperand(1).getReg();
+  Register Hi = MI.getOperand(2).getReg();
+  if (!isRegInFprb(Dst, MRI) || !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
+    return false;
+  MachineInstr *Result =
+      MIB.buildInstr(RISCV::BuildPairF64Pseudo, {Dst}, {Lo, Hi});
+
+  MI.eraseFromParent();
+  return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
+}
+
+bool RISCVInstructionSelector::selectUnmergeValues(
+    MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
+  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
+
+  // Split F64 Src into two s32 parts
+  if (MI.getNumOperands() != 3)
+    return false;
+  Register Src = MI.getOperand(2).getReg();
+  Register Lo = MI.getOperand(0).getReg();
+  Register Hi = MI.getOperand(1).getReg();
+  if (!isRegInFprb(Src, MRI) ||
+      !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
+    return false;
+  MachineInstr *Result = MIB.buildInstr(RISCV::SplitF64Pseudo, {Lo, Hi}, {Src});
+
+  MI.eraseFromParent();
+  return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
+}
+
 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
                                                  MachineIRBuilder &MIB,
                                                  MachineRegisterInfo &MRI) {
@@ -656,6 +705,16 @@ const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
   return nullptr;
 }
 
+bool RISCVInstructionSelector::isRegInGprb(Register Reg,
+                                          MachineRegisterInfo &MRI) const {
+  return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::GPRBRegBankID;
+}
+
+bool RISCVInstructionSelector::isRegInFprb(Register Reg,
+                                          MachineRegisterInfo &MRI) const {
+  return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::FPRBRegBankID;
+}
+
 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI,
                                           MachineRegisterInfo &MRI) const {
   Register DstReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 9eb5812e024b915..6155e94b58e7343 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -86,6 +86,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
     unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
     getActionDefinitionsBuilder(Op)
+        .legalIf([=, &ST](const LegalityQuery &Query) -> bool {
+          return ST.hasStdExtD() && typeIs(LitTyIdx, s32)(Query) &&
+                 typeIs(BigTyIdx, s64)(Query);
+        })
         .widenScalarToNextPow2(LitTyIdx, XLen)
         .widenScalarToNextPow2(BigTyIdx, XLen)
         .clampScalar(LitTyIdx, sXLen, sXLen)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 6d77e2b7edd9010..cf5f67ec80fd691 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -353,6 +353,34 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     OpdsMapping[2] = OpdsMapping[3] = getFPValueMapping(Size);
     break;
   }
+  case TargetOpcode::G_MERGE_VALUES: {
+    // Use FPR64 for s64 merge on rv32.
+    assert(MI.getNumOperands() == 3 && "Unsupported G_MERGE_VALUES");
+    LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+    if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+      assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
+      // FIXME: OpdsMapping[0, 1] should probably visit their uses to determine
+      // if GPRValueMapping or FPRValueMapping
+      OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
+      OpdsMapping[1] = GPRValueMapping;
+      OpdsMapping[2] = GPRValueMapping;
+    }
+    break;
+  }
+  case TargetOpcode::G_UNMERGE_VALUES: {
+    // Use FPR64 for s64 unmerge on rv32.
+    assert(MI.getNumOperands() == 3 && "Unsupported G_UNMERGE_VALUES");
+    LLT Ty = MRI.getType(MI.getOperand(2).getReg());
+    if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+      assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
+      // FIXME: OpdsMapping[0, 1] should probably visit their uses to determine
+      // if GPRValueMapping or FPRValueMapping
+      OpdsMapping[0] = GPRValueMapping;
+      OpdsMapping[1] = GPRValueMapping;
+      OpdsMapping[2] = getFPValueMapping(Ty.getSizeInBits());
+    }
+    break;
+  }
   default:
     // By default map all scalars to GPR.
     for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/merge-unmerge-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/merge-unmerge-rv32.mir
new file mode 100644
index 000000000000000..11d3bf6a4cd0e6e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/merge-unmerge-rv32.mir
@@ -0,0 +1,44 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+d -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name:            merge_i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: merge_i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY]]
+    ; CHECK-NEXT: $f10_d = COPY [[BuildPairF64Pseudo]]
+    ; CHECK-NEXT: PseudoRET implicit $f10_d
+    %0:gprb(s32) = COPY $x10
+    %1:fprb(s64) = G_MERGE_VALUES %0(s32), %0(s32)
+    $f10_d = COPY %1(s64)
+    PseudoRET implicit $f10_d
+...
+---
+name:            unmerge_i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $f10_d
+    ; CHECK-LABEL: name: unmerge_i32
+    ; CHECK: liveins: $f10_d
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $f10_d
+    ; CHECK-NEXT: [[SplitF64Pseudo:%[0-9]+]]:gpr, [[SplitF64Pseudo1:%[0-9]+]]:gpr = SplitF64Pseudo [[COPY]]
+    ; CHECK-NEXT: $x10 = COPY [[SplitF64Pseudo]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:fprb(s64) = COPY $f10_d
+    %1:gprb(s32), %2:gprb(s32) = G_UNMERGE_VALUES %0(s64)
+    $x10 = COPY %1(s32)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/bitcast-between-f64-and-i64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/bitcast-between-f64-and-i64.ll
new file mode 100644
index 000000000000000..5461f7366c523a4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/bitcast-between-f64-and-i64.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d -stop-after=legalizer -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=CHECK %s
+
+define i64 @double_to_i64(double %a) {
+  ; CHECK-LABEL: name: double_to_i64
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $f10_d
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+  ; CHECK-NEXT:   $x10 = COPY [[UV]](s32)
+  ; CHECK-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11
+  %1 = bitcast double %a to i64
+  ret i64 %1
+}
+
+define double @i64_to_double(i64 %a) {
+  ; CHECK-LABEL: name: i64_to_double
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; CHECK-NEXT:   $f10_d = COPY [[MV]](s64)
+  ; CHECK-NEXT:   PseudoRET implicit $f10_d
+  %1 = bitcast i64 %a to double
+  ret double %1
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/merge-unmerge-d.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/merge-unmerge-d.mir
new file mode 100644
index 000000000000000..10c775cd9ecfe1a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/merge-unmerge-d.mir
@@ -0,0 +1,38 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+d -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s
+
+---
+name:            merge_i64
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: merge_i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32)
+    ; CHECK-NEXT: $f10_d = COPY [[MV]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $f10_d
+    %0:_(s32) = COPY $x10
+    %1:_(s64) = G_MERGE_VALUES %0(s32), %0(s32)
+    $f10_d = COPY %1(s64)
+    PseudoRET implicit $f10_d
+...
+---
+name:            unmerge_i32
+body:             |
+  bb.0.entry:
+    liveins: $f10_d
+    ; CHECK-LABEL: name: unmerge_i32
+    ; CHECK: liveins: $f10_d
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; CHECK-NEXT: $x10 = COPY [[UV]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $f10_d
+    %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0(s64)
+    $x10 = COPY %1(s32)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/merge-unmerge.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/merge-unmerge.mir
new file mode 100644
index 000000000000000..2e4a39c468111f2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/merge-unmerge.mir
@@ -0,0 +1,131 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck --check-prefix=RV32 %s
+
+---
+name:            merge_i32
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: merge_i32
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; RV32-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
+    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; RV32-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ASSERT_ZEXT]], [[C]](s32)
+    ; RV32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ASSERT_ZEXT]], [[SHL]]
+    ; RV32-NEXT: $x10 = COPY [[OR]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %0:_(s32) = COPY $x10
+    %1:_(s32) = G_ASSERT_ZEXT %0, 16
+    %2:_(s16) = G_TRUNC %1(s32)
+    %3:_(s32) = G_MERGE_VALUES %2(s16), %2(s16)
+    $x10 = COPY %3(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            merge_i64
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: merge_i64
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; RV32-NEXT: $x10 = COPY [[COPY]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %0:_(s32) = COPY $x10
+    %1:_(s64) = G_MERGE_VALUES %0(s32), %0(s32)
+    %2:_(s32) = G_TRUNC %1(s64)
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            merge_i128
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: merge_i128
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; RV32-NEXT: $x10 = COPY [[COPY]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %1:_(s32) = COPY $x10
+    %2:_(s64) = G_ZEXT %1(s32)
+    %0:_(s128) = G_MERGE_VALUES %2(s64), %2(s64)
+    %3:_(s32) = G_TRUNC %0(s128)
+    $x10 = COPY %3(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i32
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: unmerge_i32
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; RV32-NEXT: $x10 = COPY [[COPY]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %0:_(s32) = COPY $x10
+    %1:_(s64) = G_ZEXT %0(s32)
+    %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i64
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: unmerge_i64
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; RV32-NEXT: $x10 = COPY [[COPY]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %0:_(s32) = COPY $x10
+    %1:_(s64) = G_ZEXT %0(s32)
+    %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i128
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: unmerge_i128
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32-NEXT: $x10 = COPY [[C]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %0:_(s32) = COPY $x10
+    %1:_(s128) = G_ZEXT %0(s32)
+    %2:_(s64), %3:_(s64) = G_UNMERGE_VALUES %1(s128)
+    %4:_(s32) = G_TRUNC %3(s64)
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i256
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; RV32-LABEL: name: unmerge_i256
+    ; RV32: liveins: $x10
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; RV32-NEXT: $x10 = COPY [[COPY]](s32)
+    ; RV32-NEXT: PseudoRET implicit $x10
+    %0:_(s32) = COPY $x10
+    %1:_(s256) = G_ZEXT %0(s32)
+    %2:_(s128), %3:_(s128) = G_UNMERGE_VALUES %1(s256)
+    %4:_(s32) = G_TRUNC %2(s128)
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/merge-unmerge.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/merge-unmerge.mir
new file mode 100644
index 000000000000000..3cf3a7c24863936
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/merge-unmerge.mir
@@ -0,0 +1,119 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name:            merge_i32
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: merge_i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 16
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[C]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[ASSERT_ZEXT]], [[SHL]]
+    ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $x10
+    %1:_(s64) = G_ASSERT_ZEXT %0, 16
+    %2:_(s16) = G_TRUNC %1(s64)
+    %3:_(s32) = G_MERGE_VALUES %2(s16), %2(s16)
+    %4:_(s64) = G_ZEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            merge_i64
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: merge_i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 32
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[C]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[ASSERT_ZEXT]], [[SHL]]
+    ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $x10
+    %1:_(s64) = G_ASSERT_ZEXT %0, 32
+    %2:_(s32) = G_TRUNC %1(s64)
+    %3:_(s64) = G_MERGE_VALUES %2(s32), %2(s32)
+    $x10 = COPY %3(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            merge_i128
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: merge_i128
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $x10
+    %1:_(s128) = G_MERGE_VALUES %0(s64), %0(s64)
+    %2:_(s64) = G_TRUNC %1(s128)
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i32
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: unmerge_i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $x10 = COPY [[AND]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $x10
+    %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0(s64)
+    %3:_(s64) = G_ZEXT %1(s32)
+    $x10 = COPY %3(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i64
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: unmerge_i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $x10
+    %1:_(s128) = G_ZEXT %0(s64)
+    %2:_(s64), %3:_(s64) = G_UNMERGE_VALUES %1(s128)
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+---
+name:            unmerge_i128
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: unmerge_i128
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $x10
+    %1:_(s256) = G_ZEXT %0(s64)
+    %2:_(s128), %3:_(s128) = G_UNMERGE_VALUES %1(s256)
+    %4:_(s64) = G_TRUNC %2(s128)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/merge-unmerge-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/merge-unmerge-rv32.mir
new file mode 100644
index 000000000000000..4785b86a8a78632
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/merge-unmerge-rv32.mir
@@ -0,0 +1,40 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+d -run-pass=regbankselect %s -o - \
+# RUN:   | FileCheck %s
+
+---
+name:            merge_i64
+legalized: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+    ; CHECK-LABEL: name: merge_i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $x10
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32)
+    ; CHECK-NEXT: $f10_d = COPY [[MV]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $f10_d
+    %0:_(s32) = COPY $x10
+    %1:_(s64) = G_MERGE_VALUES %0(s32), %0(s32)
+    $f10_d = COPY %1(s64)
+    PseudoRET implicit $f10_d
+...
+---
+name:            unmerge_i32
+legalized: true
+body:             |
+  bb.0.entry:
+    liveins: $f10_d
+    ; CHECK-LABEL: name: unmerge_i32
+    ; CHECK: liveins: $f10_d
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fprb(s64) = COPY $f10_d
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:gprb(s32), [[UV1:%[0-9]+]]:gprb(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; CHECK-NEXT: $x10 = COPY [[UV]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = COPY $f10_d
+    %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0(s64)
+    $x10 = COPY %1(s32)
+    PseudoRET implicit $x10
+...

>From cff7b6e85ea3f9f7b19c14047b5dc93e62675987 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 17 Nov 2023 07:37:47 -0800
Subject: [PATCH 3/9] [RISCV][GISEL] Legalize G_PTRMASK

G_PTRMASK is custom legalized by using G_PTRTOINT on the pointer, using a
G_AND to calculate the mask, and converted back to pointer using G_PTRTOINT.
---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 23 ++++++++++++++++++
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |  3 +++
 .../legalizer/rv32/legalize-ptrmask.mir       | 24 +++++++++++++++++++
 .../legalizer/rv64/legalize-ptrmask.mir       | 24 +++++++++++++++++++
 4 files changed, 74 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-ptrmask.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-ptrmask.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 6155e94b58e7343..e49348ac46ddaa9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -151,6 +151,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
 
   getActionDefinitionsBuilder(G_PTR_ADD).legalFor({{p0, sXLen}});
 
+  getActionDefinitionsBuilder(G_PTRMASK).customFor({{p0, sXLen}});
+
   getActionDefinitionsBuilder(G_PTRTOINT)
       .legalFor({{sXLen, p0}})
       .clampScalar(0, sXLen, sXLen);
@@ -292,6 +294,25 @@ bool RISCVLegalizerInfo::legalizeShlAshrLshr(
   return true;
 }
 
+bool RISCVLegalizerInfo::legalizePtrMask(MachineInstr &MI,
+                                         MachineIRBuilder &MIRBuilder,
+                                         GISelChangeObserver &Observer) const {
+  assert(MI.getOpcode() == TargetOpcode::G_PTRMASK);
+
+  MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
+  Register Tmp1 =
+      MRI.createGenericVirtualRegister(MRI.getType(MI.getOperand(2).getReg()));
+  Register Tmp2 =
+      MRI.createGenericVirtualRegister(MRI.getType(MI.getOperand(2).getReg()));
+  MIRBuilder.buildPtrToInt(Tmp1, MI.getOperand(1).getReg());
+  MIRBuilder.buildAnd(Tmp2, Tmp1, MI.getOperand(2).getReg());
+  MIRBuilder.buildIntToPtr(MI.getOperand(0).getReg(), Tmp2);
+
+  Observer.erasingInstr(MI);
+  MI.eraseFromParent();
+  return true;
+}
+
 bool RISCVLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
                                         MachineInstr &MI) const {
   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
@@ -313,6 +334,8 @@ bool RISCVLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
     return Helper.lower(MI, 0, /* Unused hint type */ LLT()) ==
            LegalizerHelper::Legalized;
   }
+  case TargetOpcode::G_PTRMASK:
+    return legalizePtrMask(MI, MIRBuilder, Observer);
   }
 
   llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index f39d3a130d85063..ff2be0622023795 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -31,6 +31,9 @@ class RISCVLegalizerInfo : public LegalizerInfo {
 private:
   bool legalizeShlAshrLshr(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
                            GISelChangeObserver &Observer) const;
+
+  bool legalizePtrMask(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+                       GISelChangeObserver &Observer) const;
 };
 } // end namespace llvm
 #endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-ptrmask.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-ptrmask.mir
new file mode 100644
index 000000000000000..7de567a765c0c00
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-ptrmask.mir
@@ -0,0 +1,24 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name:            ptrmask_p0_s32
+body:             |
+  bb.0:
+    liveins: $x10, $x11
+    ; CHECK-LABEL: name: p0_s32
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p0)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[PTRTOINT]], [[COPY1]]
+    ; CHECK-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
+    ; CHECK-NEXT: $x10 = COPY [[INTTOPTR]](p0)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(p0) = G_PTRMASK %0(p0), %1(s32)
+    $x10 = COPY %2(p0)
+    PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-ptrmask.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-ptrmask.mir
new file mode 100644
index 000000000000000..b7223c1e2ca2ea6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-ptrmask.mir
@@ -0,0 +1,24 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name:            ptrmask_p0_s64
+body:             |
+  bb.0:
+    liveins: $x10, $x11
+    ; CHECK-LABEL: name: ptrmask_p0_s64
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY]](p0)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[PTRTOINT]], [[COPY1]]
+    ; CHECK-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s64)
+    ; CHECK-NEXT: $x10 = COPY [[INTTOPTR]](p0)
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %0:_(p0) = COPY $x10
+    %1:_(s64) = COPY $x11
+    %2:_(p0) = G_PTRMASK %0(p0), %1(s64)
+    $x10 = COPY %2(p0)
+    PseudoRET implicit $x10
+...

>From 93e32a369ff8b153cc21fc5232bbba9bc7167a7a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 21 Nov 2023 13:58:37 -0800
Subject: [PATCH 4/9] [RISCV][GISEL] Legalize G_VASTART using custom
 legalization

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 24 +++++++++++++++++++
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |  3 +++
 2 files changed, 27 insertions(+)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e49348ac46ddaa9..98c918ebcf4ec63 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -11,6 +11,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "RISCVLegalizerInfo.h"
+#include "RISCVMachineFunctionInfo.h"
 #include "RISCVSubtarget.h"
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -267,6 +268,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
   getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
       .libcallFor({s32, s64});
 
+  getActionDefinitionsBuilder(G_VASTART).customFor({p0});
+
   getLegacyLegalizerInfo().computeTables();
 }
 
@@ -313,6 +316,25 @@ bool RISCVLegalizerInfo::legalizePtrMask(MachineInstr &MI,
   return true;
 }
 
+bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,
+                                         MachineIRBuilder &MIRBuilder,
+                                         GISelChangeObserver &Observer) const {
+  // Stores the address of the VarArgsFrameIndex slot into the memory location
+  assert(MI.getOpcode() == TargetOpcode::G_VASTART);
+  MachineFunction *MF = MI.getParent()->getParent();
+  RISCVMachineFunctionInfo *FuncInfo = MF->getInfo<RISCVMachineFunctionInfo>();
+  int FI = FuncInfo->getVarArgsFrameIndex();
+  LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());
+  auto FINAddr = MIRBuilder.buildFrameIndex(AddrTy, FI);
+  assert(MI.hasOneMemOperand());
+  MachineInstr *LoweredMI = MIRBuilder.buildStore(
+      MI.getOperand(0).getReg(), FINAddr, *MI.memoperands()[0]);
+  Observer.createdInstr(*LoweredMI);
+  Observer.erasingInstr(MI);
+  MI.eraseFromParent();
+  return true;
+}
+
 bool RISCVLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
                                         MachineInstr &MI) const {
   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
@@ -336,6 +358,8 @@ bool RISCVLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
   }
   case TargetOpcode::G_PTRMASK:
     return legalizePtrMask(MI, MIRBuilder, Observer);
+  case TargetOpcode::G_VASTART:
+    return legalizeVAStart(MI, MIRBuilder, Observer);
   }
 
   llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index ff2be0622023795..daf6b2d13d1cd4f 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -34,6 +34,9 @@ class RISCVLegalizerInfo : public LegalizerInfo {
 
   bool legalizePtrMask(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
                        GISelChangeObserver &Observer) const;
+
+  bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+                       GISelChangeObserver &Observer) const;
 };
 } // end namespace llvm
 #endif

>From 236b000cb5da427f4ccca61e1349a3c878a1d56f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 21 Nov 2023 07:05:09 -0800
Subject: [PATCH 5/9] [RISCV][GISEL] lowerFormalArguments for variadic
 arguments

---
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  |  85 +++-
 .../Target/RISCV/GISel/RISCVCallLowering.h    |   5 +
 .../irtranslator/lower-args-vararg.ll         | 365 ++++++++++++++++++
 3 files changed, 447 insertions(+), 8 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 1aba8a8f52e96fc..2367712925e2b09 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -423,18 +423,79 @@ bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
   return true;
 }
 
+static const MCPhysReg ArgGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
+                                    RISCV::X13, RISCV::X14, RISCV::X15,
+                                    RISCV::X16, RISCV::X17};
+
+/// If there are varargs that were passed in a0-a7, the data in those registers
+/// must be copied to the varargs save area on the stack.
+void RISCVCallLowering::saveVarArgRegisters(
+    MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
+    IncomingValueAssigner &Assigner, CCState &CCInfo) const {
+  MachineFunction &MF = MIRBuilder.getMF();
+  const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+  unsigned XLenInBytes = Subtarget.getXLen() / 8;
+  ArrayRef<MCPhysReg> ArgRegs(ArgGPRs);
+  unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+
+  // Offset of the first variable argument from stack pointer, and size of
+  // the vararg save area. For now, the varargs save area is either zero or
+  // large enough to hold a0-a7.
+  int VaArgOffset, VarArgsSaveSize;
+  // If all registers are allocated, then all varargs must be passed on the
+  // stack and we don't need to save any argregs.
+  if (ArgRegs.size() == Idx) {
+    VaArgOffset = Assigner.StackSize;
+    VarArgsSaveSize = 0;
+  } else {
+    VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+    VaArgOffset = -VarArgsSaveSize;
+  }
+
+  // Record the frame index of the first variable argument which is a value
+  // necessary to G_VASTART.
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+  RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+  RVFI->setVarArgsFrameIndex(FI);
+
+  // If saving an odd number of registers then create an extra stack slot to
+  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+  // offsets to even-numbered registered remain 2*XLEN-aligned.
+  if (Idx % 2) {
+    MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
+    VarArgsSaveSize += XLenInBytes;
+  }
+  RVFI->setVarArgsSaveSize(VarArgsSaveSize);
+
+  // Copy the integer registers that may have been used for passing varargs
+  // to the vararg save area.
+  const LLT p0 = LLT::pointer(0, Subtarget.getXLen());
+  const LLT sXLen = LLT::scalar(Subtarget.getXLen());
+  const MVT XLenMVT = MVT::getIntegerVT(Subtarget.getXLen());
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) {
+    const Register VReg = MRI.createGenericVirtualRegister(sXLen);
+    Handler.assignValueToReg(
+        VReg, ArgRegs[I],
+        CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenMVT,
+                            ArgRegs[I], XLenMVT, CCValAssign::Full));
+    FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+    auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
+    auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
+    MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+  }
+}
+
 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
                                              const Function &F,
                                              ArrayRef<ArrayRef<Register>> VRegs,
                                              FunctionLoweringInfo &FLI) const {
-  // Early exit if there are no arguments.
-  if (F.arg_empty())
+  // Early exit if there are no arguments. varargs are not part of F.args() but
+  // must be lowered.
+  if (F.arg_empty() && !F.isVarArg())
     return true;
 
-  // TODO: Support vararg functions.
-  if (F.isVarArg())
-    return false;
-
   const RISCVSubtarget &Subtarget =
       MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
   for (auto &Arg : F.args()) {
@@ -467,8 +528,16 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
       /*IsRet=*/false);
   RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
 
-  return determineAndHandleAssignments(Handler, Assigner, SplitArgInfos,
-                                       MIRBuilder, CC, F.isVarArg());
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
+  if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) ||
+      !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder))
+    return false;
+
+  if (F.isVarArg())
+    saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
+
+  return true;
 }
 
 bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
index d80a666f3489475..abe704b4a645189 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
@@ -42,6 +42,11 @@ class RISCVCallLowering : public CallLowering {
 private:
   bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
                       ArrayRef<Register> VRegs, MachineInstrBuilder &Ret) const;
+
+  void saveVarArgRegisters(MachineIRBuilder &MIRBuilder,
+                           CallLowering::IncomingValueHandler &Handler,
+                           IncomingValueAssigner &Assigner,
+                           CCState &CCInfo) const;
 };
 
 } // end namespace llvm
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
new file mode 100644
index 000000000000000..4f4d71de46ec89b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
@@ -0,0 +1,365 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64 %s
+
+define void @va1arg(ptr %a, ...) {
+  ; RV32-LABEL: name: va1arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va1arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va2arg(ptr %a, ptr %b, ...) {
+  ; RV32-LABEL: name: va2arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va2arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
+  ; RV32-LABEL: name: va3arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va3arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
+  ; RV32-LABEL: name: va4arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va4arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
+  ; RV32-LABEL: name: va5arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va5arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
+  ; RV32-LABEL: name: va6arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va6arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...) {
+  ; RV32-LABEL: name: va7arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va7arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}
+
+define void @va8arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ptr %h, ...) {
+  ; RV32-LABEL: name: va8arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(p0) = COPY $x17
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: va8arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(p0) = COPY $x17
+  ; RV64-NEXT:   PseudoRET
+  ret void
+}

>From dc978e75c1c02eaa70e8a46f190b04e5a0f6153d Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 17 Nov 2023 07:27:37 -0800
Subject: [PATCH 6/9] [RISCV][GISEL] Legalize G_VAARG through expansion.

G_VAARG can be expanded similiar to SelectionDAG::expandVAArg through
LegalizerHelper::lower. This patch implements the lowering through this
style of expansion.

The expansion gets the head of the va_list by loading the pointer to
va_list. Then, the head of the list is adjusted depending on argument
alignment information. This gives a pointer to the element to be read
out of the va_list. Next, the head of the va_list is bumped to the
next element in the list. The new head of the list is stored back to the
original pointer to the head of the va_list so that subsequent G_VAARG
instructions get the next element in the list. Lastly, the element is
loaded from the alignment adjusted pointer constructed earlier.
---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |  1 +
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    | 67 +++++++++++++++++++
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  5 ++
 .../legalizer/rv32/legalize-vaarg.mir         | 40 +++++++++++
 .../legalizer/rv64/legalize-vaarg.mir         | 40 +++++++++++
 5 files changed, 153 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-vaarg.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-vaarg.mir

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 86d3cb2bedb95b6..350c91ad6fa4f61 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -430,6 +430,7 @@ class LegalizerHelper {
   LegalizeResult lowerVectorReduction(MachineInstr &MI);
   LegalizeResult lowerMemcpyInline(MachineInstr &MI);
   LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
+  LegalizeResult lowerVAArg(MachineInstr &MI);
 };
 
 /// Helper function that creates a libcall to the given \p Name using the given
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index dd5577d47f97764..310b71dca37bf3c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3780,6 +3780,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
     return lowerTRUNC(MI);
   GISEL_VECREDUCE_CASES_NONSEQ
     return lowerVectorReduction(MI);
+  case G_VAARG:
+    return lowerVAArg(MI);
   }
 }
 
@@ -7865,6 +7867,71 @@ LegalizerHelper::lowerVectorReduction(MachineInstr &MI) {
   return UnableToLegalize;
 }
 
+static Type *getTypeForLLT(LLT Ty, LLVMContext &C);
+
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerVAArg(MachineInstr &MI) {
+  Observer.changingInstr(MI);
+  MachineFunction &MF = *MI.getMF();
+  const DataLayout &DL = MIRBuilder.getDataLayout();
+  LLVMContext &Ctx = MF.getFunction().getContext();
+  Register ListPtr = MI.getOperand(1).getReg();
+  LLT PtrTy = MRI.getType(ListPtr);
+
+  // LstPtr is a pointer to the head of the list. Get the address
+  // of the head of the list.
+  Align PtrAlignment = Align(DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx)));
+  MachineMemOperand *PtrLoadMMO =
+      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                              MachineMemOperand::MOLoad, PtrTy, PtrAlignment);
+  Register HeadOfList = MRI.createGenericVirtualRegister(PtrTy);
+  Register VAList =
+      MIRBuilder.buildLoad(HeadOfList, ListPtr, *PtrLoadMMO).getReg(0);
+
+  const MaybeAlign MA(MI.getOperand(2).getImm());
+  LLT PtrTyAsScalarTy = LLT::scalar(PtrTy.getSizeInBits());
+  if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
+    Register AlignAmt =
+        MIRBuilder.buildConstant(PtrTyAsScalarTy, MA->value() - 1).getReg(0);
+    Register AddDst = MRI.createGenericVirtualRegister(PtrTy);
+    MIRBuilder.buildPtrAdd(AddDst, HeadOfList, AlignAmt);
+    Register Mask =
+        MIRBuilder.buildConstant(PtrTyAsScalarTy, -(int64_t)MA->value())
+            .getReg(0);
+    Register AndDst = MRI.createGenericVirtualRegister(PtrTy);
+    VAList = MIRBuilder.buildPtrMask(AndDst, AddDst, Mask).getReg(0);
+  }
+
+  // Increment the pointer, VAList, to the next vaarg
+  // The list should be bumped by the size of element in the current head of
+  // list.
+  Register Dst = MI.getOperand(0).getReg();
+  LLT Ty = MRI.getType(Dst);
+  Register IncAmt =
+      MIRBuilder
+          .buildConstant(PtrTyAsScalarTy,
+                         DL.getTypeAllocSize(getTypeForLLT(Ty, Ctx)))
+          .getReg(0);
+  Register Succ = MRI.createGenericVirtualRegister(PtrTy);
+  MIRBuilder.buildPtrAdd(Succ, VAList, IncAmt);
+
+  // Store the increment VAList to the legalized pointer
+  MachineMemOperand *StoreMMO =
+      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                              MachineMemOperand::MOStore, PtrTy, PtrAlignment);
+  MIRBuilder.buildStore(Succ, ListPtr, *StoreMMO);
+  // Load the actual argument out of the pointer VAList
+  Align EltAlignment = Align(DL.getABITypeAlign(getTypeForLLT(Ty, Ctx)));
+  MachineMemOperand *EltLoadMMO =
+      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                              MachineMemOperand::MOLoad, Ty, EltAlignment);
+  MIRBuilder.buildLoad(Dst, VAList, *EltLoadMMO);
+
+  Observer.changedInstr(MI);
+  Observer.erasingInstr(MI);
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
   // On Darwin, -Os means optimize for size without hurting performance, so
   // only really optimize for size when -Oz (MinSize) is used.
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 98c918ebcf4ec63..c62233faecfdde0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -270,6 +270,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
 
   getActionDefinitionsBuilder(G_VASTART).customFor({p0});
 
+  // va_list must be a pointer, but most sized types are pretty easy to handle
+  // as the destination.
+  getActionDefinitionsBuilder(G_VAARG).lowerForCartesianProduct(
+      {s8, s16, s32, s64, p0}, {p0});
+
   getLegacyLegalizerInfo().computeTables();
 }
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-vaarg.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-vaarg.mir
new file mode 100644
index 000000000000000..4876e0d5ee96b2f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-vaarg.mir
@@ -0,0 +1,40 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s
+
+# On RISC-V, the MinStackArgumentAlignment is 1 and the ABI Alignment for p0 is
+# greater than 1, so we will always generate code to adjust for this alignment.
+
+--- |
+  define void @va_arg() {
+    %va = alloca ptr, align 4
+    %1 = va_arg ptr %va, i32
+    ret void
+  }
+...
+---
+name:            va_arg
+legalized:       false
+tracksRegLiveness: true
+stack:
+  - { id: 0, name: va, type: default, offset: 0, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+body:             |
+  bb.1 (%ir-block.0):
+    ; CHECK-LABEL: name: va_arg
+    ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0))
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -4
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[PTR_ADD]](p0)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[PTRTOINT]], [[C1]]
+    ; CHECK-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C2]](s32)
+    ; CHECK-NEXT: G_STORE [[PTR_ADD1]](p0), [[FRAME_INDEX]](p0) :: (store (p0))
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = G_FRAME_INDEX %stack.0.va
+    %1:_(s32) = G_VAARG %0(p0), 4
+    PseudoRET
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-vaarg.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-vaarg.mir
new file mode 100644
index 000000000000000..c32f590479c7aa3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-vaarg.mir
@@ -0,0 +1,40 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - | FileCheck %s
+
+# On RISC-V, the MinStackArgumentAlignment is 1 and the ABI Alignment for p0 is
+# greater than 1, so we will always generate code to adjust for this alignment.
+
+--- |
+  define void @va_arg() {
+    %va = alloca ptr, align 8
+    %1 = va_arg ptr %va, i32
+    ret void
+  }
+...
+---
+name:            va_arg
+legalized:       false
+tracksRegLiveness: true
+stack:
+  - { id: 0, name: va, type: default, offset: 0, size: 8, alignment: 4,
+      stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+body:             |
+  bb.1 (%ir-block.0):
+    ; CHECK-LABEL: name: va_arg
+    ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0))
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[PTR_ADD]](p0)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[PTRTOINT]], [[C1]]
+    ; CHECK-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s64)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C2]](s64)
+    ; CHECK-NEXT: G_STORE [[PTR_ADD1]](p0), [[FRAME_INDEX]](p0) :: (store (p0))
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = G_FRAME_INDEX %stack.0.va
+    %1:_(s32) = G_VAARG %0(p0), 4
+    PseudoRET
+...

>From 907eb7773e4c43f13dcbda68e6fd0e7ef118ed42 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 17 Nov 2023 09:42:48 -0800
Subject: [PATCH 7/9] [GISEL][RISCV] Add G_VACOPY GISEL opcode and add lowering
 code for it.

RISCV ISEL makes use of the lowering code.
---
 llvm/docs/GlobalISel/GenericOpcode.rst        |   11 +
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |    1 +
 llvm/include/llvm/Support/TargetOpcodes.def   |    6 +-
 llvm/include/llvm/Target/GenericOpcodes.td    |    7 +
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |    6 +
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |   31 +
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |    3 +
 .../RISCV/GlobalISel/irtranslator/vacopy.ll   |   28 +
 .../RISCV/GlobalISel/irtranslator/vararg.ll   | 1352 +++++++++++-
 llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll  | 1896 +++++++++++++++++
 10 files changed, 3328 insertions(+), 13 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll

diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 6c42ddcaff1eccf..7d71fa313108693 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -898,6 +898,17 @@ G_VAARG
 
   I found no documentation for this instruction at the time of writing.
 
+G_VACOPY
+^^^^^^^^
+
+In a target-dependent way, it copies the source va_list element into the
+destination va_list element. This opcode is necessary because the copy may be
+arbitrarily complex.
+
+.. code-block:: none
+
+  G_VACOPY %2(p0), %3(p0)
+
 Other Operations
 ----------------
 
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 350c91ad6fa4f61..a2a343144e82976 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -431,6 +431,7 @@ class LegalizerHelper {
   LegalizeResult lowerMemcpyInline(MachineInstr &MI);
   LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
   LegalizeResult lowerVAArg(MachineInstr &MI);
+  LegalizeResult lowerVACopy(MachineInstr &MI);
 };
 
 /// Helper function that creates a libcall to the given \p Name using the given
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 941c6d5f8cad8ce..5c3da9e65c74060 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -454,9 +454,13 @@ HANDLE_TARGET_OPCODE(G_FCONSTANT)
 /// Generic va_start instruction. Stores to its one pointer operand.
 HANDLE_TARGET_OPCODE(G_VASTART)
 
-/// Generic va_start instruction. Stores to its one pointer operand.
+/// Generic va_arg instruction. Stores to its one pointer operand.
 HANDLE_TARGET_OPCODE(G_VAARG)
 
+/// Generic va_copy instruction. Copies the source element into the destination
+/// element.
+HANDLE_TARGET_OPCODE(G_VACOPY)
+
 // Generic sign extend
 HANDLE_TARGET_OPCODE(G_SEXT)
 HANDLE_TARGET_OPCODE(G_SEXT_INREG)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 9a9c09d3c20d612..3b26ab35fa509f2 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -155,6 +155,13 @@ def G_VASTART : GenericInstruction {
   let mayStore = true;
 }
 
+def G_VACOPY : GenericInstruction {
+  let OutOperandList = (outs);
+  let InOperandList = (ins type0:$dest, type0:$src);
+  let hasSideEffects = true;
+  let mayStore = true;
+}
+
 def G_VAARG : GenericInstruction {
   let OutOperandList = (outs type0:$val);
   let InOperandList = (ins type1:$list, unknown:$align);
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 62450e4c43ff3e6..8f898c7d500da20 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2075,6 +2075,12 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                                 ListSize, Align(1)));
     return true;
   }
+  case Intrinsic::vacopy: {
+    Register DstList = getOrCreateVReg(*CI.getArgOperand(0));
+    Register SrcList = getOrCreateVReg(*CI.getArgOperand(1));
+    MIRBuilder.buildInstr(TargetOpcode::G_VACOPY, {}, {DstList, SrcList});
+    return true;
+  }
   case Intrinsic::dbg_value: {
     // This form of DBG_VALUE is target-independent.
     const DbgValueInst &DI = cast<DbgValueInst>(CI);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 310b71dca37bf3c..1c254883c7bc6da 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3782,6 +3782,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
     return lowerVectorReduction(MI);
   case G_VAARG:
     return lowerVAArg(MI);
+  case G_VACOPY:
+    return lowerVACopy(MI);
   }
 }
 
@@ -7932,6 +7934,35 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerVAArg(MachineInstr &MI) {
   return Legalized;
 }
 
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerVACopy(MachineInstr &MI) {
+  MachineFunction &MF = *MI.getMF();
+  const DataLayout &DL = MIRBuilder.getDataLayout();
+  LLVMContext &Ctx = MF.getFunction().getContext();
+
+  Register DstLst = MI.getOperand(0).getReg();
+  LLT PtrTy = MRI.getType(DstLst);
+
+  // Load the source va_list
+  Align Alignment = Align(DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx)));
+  MachineMemOperand *LoadMMO =
+      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                              MachineMemOperand::MOLoad, PtrTy, Alignment);
+  Register Tmp = MRI.createGenericVirtualRegister(PtrTy);
+  Register SrcLst = MI.getOperand(1).getReg();
+  MIRBuilder.buildLoad(Tmp, SrcLst, *LoadMMO);
+
+  // Store the result in the destination va_list
+  MachineMemOperand *StoreMMO =
+      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                              MachineMemOperand::MOStore, PtrTy, Alignment);
+  MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
+
+  Observer.changedInstr(MI);
+  Observer.erasingInstr(MI);
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
   // On Darwin, -Os means optimize for size without hurting performance, so
   // only really optimize for size when -Oz (MinSize) is used.
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index c62233faecfdde0..e1b4cdf486577cf 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -275,6 +275,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
   getActionDefinitionsBuilder(G_VAARG).lowerForCartesianProduct(
       {s8, s16, s32, s64, p0}, {p0});
 
+  // The va_list arguments must be a pointer
+  getActionDefinitionsBuilder(G_VACOPY).lowerFor({p0});
+
   getLegacyLegalizerInfo().computeTables();
 }
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll
new file mode 100644
index 000000000000000..1fdd2e1cdc7650b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+declare void @llvm.va_copy(ptr, ptr)
+define void @test_va_copy(ptr %dest_list, ptr %src_list) {
+  ; RV32I-LABEL: name: test_va_copy
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10, $x11
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32I-NEXT:   G_VACOPY [[COPY]](p0), [[COPY1]]
+  ; RV32I-NEXT:   PseudoRET
+  ;
+  ; RV64I-LABEL: name: test_va_copy
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10, $x11
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64I-NEXT:   G_VACOPY [[COPY]](p0), [[COPY1]]
+  ; RV64I-NEXT:   PseudoRET
+  call void @llvm.va_copy(ptr %dest_list, ptr %src_list)
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
index e03ef07b5fc0dd3..42d40bb68270374 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
@@ -1,26 +1,27 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=ILP32 %s
+; RUN:   | FileCheck -check-prefixes=RV32,ILP32 %s
 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV32D-ILP32 %s
+; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s
 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32f \
 ; RUN:     -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV32D-ILP32F %s
+; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s
 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32d \
 ; RUN:     -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV32D-ILP32D %s
+; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s
 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=LP64 %s
+; RUN:   | FileCheck -check-prefixes=RV64,LP64 %s
 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64f \
 ; RUN:     -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=LP64F %s
+; RUN:   | FileCheck -check-prefixes=RV64,LP64F %s
 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64d \
 ; RUN:     -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=LP64D %s
+; RUN:   | FileCheck -check-prefixes=RV64,LP64D %s
 
 ; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
-; lp64/lp64f/lp64d. Different CHECK lines are required for RV32D due to slight
-; codegen differences due to the way the f64 load operations are lowered.
+; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
+; codegen differences due to the way the f64 load operations are lowered and
+; because the PseudoCALL specifies the calling  convention.
 ; The nounwind attribute is omitted for some of the tests, to check that CFI
 ; directives are correctly generated.
 
@@ -29,7 +30,483 @@ declare void @llvm.va_end(ptr)
 
 declare void @notdead(ptr)
 
-declare i32 @va1(ptr %fmt, ...)
+; Although frontends are recommended to not generate va_arg due to the lack of
+; support for aggregate types, we test simple cases here to ensure they are
+; lowered correctly
+
+define i32 @va1(ptr %fmt, ...) {
+  ; RV32-LABEL: name: va1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX7]](p0) :: (dereferenceable load (p0) from %ir.va)
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C]](s32)
+  ; RV32-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX7]](p0) :: (store (p0) into %ir.va)
+  ; RV32-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
+  ; RV32-NEXT:   $x10 = COPY [[LOAD1]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64-LABEL: name: va1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX7]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C]](s64)
+  ; RV64-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX7]](p0) :: (store (p0) into %ir.va, align 4)
+  ; RV64-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
+  ; RV64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
+  ; RV64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va, align 4
+  %1 = load i32, ptr %argp.cur, align 4
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+; Ensure the adjustment when restoring the stack pointer using the frame
+; pointer is correct
+define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
+  ; ILP32-LABEL: name: va1_va_arg_alloca
+  ; ILP32: bb.1 (%ir-block.0):
+  ; ILP32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; ILP32-NEXT: {{  $}}
+  ; ILP32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; ILP32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; ILP32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; ILP32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; ILP32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; ILP32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; ILP32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; ILP32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; ILP32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; ILP32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; ILP32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; ILP32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; ILP32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; ILP32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; ILP32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; ILP32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; ILP32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; ILP32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; ILP32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; ILP32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; ILP32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; ILP32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; ILP32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; ILP32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; ILP32-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; ILP32-NEXT:   [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C]]
+  ; ILP32-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+  ; ILP32-NEXT:   [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C1]]
+  ; ILP32-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
+  ; ILP32-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
+  ; ILP32-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
+  ; ILP32-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; ILP32-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; ILP32-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+  ; ILP32-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; ILP32-NEXT:   $x10 = COPY [[VAARG]](s32)
+  ; ILP32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV32D-ILP32-LABEL: name: va1_va_arg_alloca
+  ; RV32D-ILP32: bb.1 (%ir-block.0):
+  ; RV32D-ILP32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32D-ILP32-NEXT: {{  $}}
+  ; RV32D-ILP32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32D-ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32D-ILP32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32D-ILP32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32D-ILP32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32D-ILP32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32D-ILP32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32D-ILP32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32D-ILP32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32D-ILP32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32D-ILP32-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32-NEXT:   [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C]]
+  ; RV32D-ILP32-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+  ; RV32D-ILP32-NEXT:   [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C1]]
+  ; RV32D-ILP32-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
+  ; RV32D-ILP32-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
+  ; RV32D-ILP32-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
+  ; RV32D-ILP32-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; RV32D-ILP32-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+  ; RV32D-ILP32-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[VAARG]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV32D-ILP32F-LABEL: name: va1_va_arg_alloca
+  ; RV32D-ILP32F: bb.1 (%ir-block.0):
+  ; RV32D-ILP32F-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32D-ILP32F-NEXT: {{  $}}
+  ; RV32D-ILP32F-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32D-ILP32F-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32D-ILP32F-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32D-ILP32F-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32D-ILP32F-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32D-ILP32F-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32D-ILP32F-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32D-ILP32F-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32D-ILP32F-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32D-ILP32F-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32D-ILP32F-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32F-NEXT:   [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C]]
+  ; RV32D-ILP32F-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+  ; RV32D-ILP32F-NEXT:   [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C1]]
+  ; RV32D-ILP32F-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
+  ; RV32D-ILP32F-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
+  ; RV32D-ILP32F-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
+  ; RV32D-ILP32F-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; RV32D-ILP32F-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+  ; RV32D-ILP32F-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[VAARG]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV32D-ILP32D-LABEL: name: va1_va_arg_alloca
+  ; RV32D-ILP32D: bb.1 (%ir-block.0):
+  ; RV32D-ILP32D-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32D-ILP32D-NEXT: {{  $}}
+  ; RV32D-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32D-ILP32D-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32D-ILP32D-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32D-ILP32D-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32D-ILP32D-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32D-ILP32D-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32D-ILP32D-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32D-ILP32D-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32D-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32D-ILP32D-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32D-ILP32D-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32D-NEXT:   [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C]]
+  ; RV32D-ILP32D-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+  ; RV32D-ILP32D-NEXT:   [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C1]]
+  ; RV32D-ILP32D-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
+  ; RV32D-ILP32D-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
+  ; RV32D-ILP32D-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
+  ; RV32D-ILP32D-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; RV32D-ILP32D-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+  ; RV32D-ILP32D-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[VAARG]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoRET implicit $x10
+  ;
+  ; LP64-LABEL: name: va1_va_arg_alloca
+  ; LP64: bb.1 (%ir-block.0):
+  ; LP64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; LP64-NEXT: {{  $}}
+  ; LP64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; LP64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; LP64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; LP64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; LP64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; LP64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; LP64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; LP64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; LP64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; LP64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; LP64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; LP64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; LP64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; LP64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; LP64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; LP64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; LP64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; LP64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; LP64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; LP64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; LP64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; LP64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; LP64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; LP64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; LP64-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; LP64-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32)
+  ; LP64-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
+  ; LP64-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+  ; LP64-NEXT:   [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]]
+  ; LP64-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
+  ; LP64-NEXT:   [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]]
+  ; LP64-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
+  ; LP64-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; LP64-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; LP64-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+  ; LP64-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; LP64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
+  ; LP64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64-NEXT:   PseudoRET implicit $x10
+  ;
+  ; LP64F-LABEL: name: va1_va_arg_alloca
+  ; LP64F: bb.1 (%ir-block.0):
+  ; LP64F-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; LP64F-NEXT: {{  $}}
+  ; LP64F-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; LP64F-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; LP64F-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; LP64F-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; LP64F-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; LP64F-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; LP64F-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; LP64F-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; LP64F-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; LP64F-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; LP64F-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; LP64F-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; LP64F-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; LP64F-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; LP64F-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; LP64F-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; LP64F-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; LP64F-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; LP64F-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; LP64F-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; LP64F-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; LP64F-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; LP64F-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; LP64F-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; LP64F-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; LP64F-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64F-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32)
+  ; LP64F-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
+  ; LP64F-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+  ; LP64F-NEXT:   [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]]
+  ; LP64F-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
+  ; LP64F-NEXT:   [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]]
+  ; LP64F-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
+  ; LP64F-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; LP64F-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; LP64F-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+  ; LP64F-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; LP64F-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
+  ; LP64F-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64F-NEXT:   PseudoRET implicit $x10
+  ;
+  ; LP64D-LABEL: name: va1_va_arg_alloca
+  ; LP64D: bb.1 (%ir-block.0):
+  ; LP64D-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; LP64D-NEXT: {{  $}}
+  ; LP64D-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; LP64D-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; LP64D-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; LP64D-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; LP64D-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; LP64D-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; LP64D-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; LP64D-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; LP64D-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; LP64D-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; LP64D-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; LP64D-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; LP64D-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; LP64D-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; LP64D-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; LP64D-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; LP64D-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; LP64D-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; LP64D-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; LP64D-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; LP64D-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; LP64D-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; LP64D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; LP64D-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; LP64D-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; LP64D-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64D-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32)
+  ; LP64D-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
+  ; LP64D-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+  ; LP64D-NEXT:   [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]]
+  ; LP64D-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
+  ; LP64D-NEXT:   [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]]
+  ; LP64D-NEXT:   [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
+  ; LP64D-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; LP64D-NEXT:   $x10 = COPY [[DYN_STACKALLOC]](p0)
+  ; LP64D-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+  ; LP64D-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; LP64D-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
+  ; LP64D-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64D-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  %2 = alloca i8, i32 %1
+  call void @notdead(ptr %2)
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+
+define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
+  ; RV32-LABEL: name: va1_va_arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32-NEXT:   $x10 = COPY [[VAARG]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64-LABEL: name: va1_va_arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
+  ; RV64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
 
 define void @va1_caller() nounwind {
   ; ILP32-LABEL: name: va1_caller
@@ -150,7 +627,180 @@ define void @va1_caller() nounwind {
 ; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
 ; register pair (where the first register is even-numbered).
 
-declare i64 @va2(ptr %fmt, ...) nounwind
+define i64 @va2(ptr %fmt, ...) nounwind {
+  ; RV32-LABEL: name: va2
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; RV32-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
+  ; RV32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (dereferenceable load (s32) from %ir.va)
+  ; RV32-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C]]
+  ; RV32-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C1]]
+  ; RV32-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
+  ; RV32-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C2]](s32)
+  ; RV32-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX7]](p0) :: (store (p0) into %ir.va)
+  ; RV32-NEXT:   [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
+  ; RV32-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
+  ; RV32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
+  ; RV32-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ;
+  ; RV64-LABEL: name: va2
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; RV64-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
+  ; RV64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (dereferenceable load (s32) from %ir.va)
+  ; RV64-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C]]
+  ; RV64-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C1]]
+  ; RV64-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
+  ; RV64-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C2]](s64)
+  ; RV64-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX7]](p0) :: (store (p0) into %ir.va, align 4)
+  ; RV64-NEXT:   [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
+  ; RV64-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
+  ; RV64-NEXT:   $x10 = COPY [[LOAD1]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load i32, ptr %va, align 4
+  %1 = add i32 %argp.cur, 7
+  %2 = and i32 %1, -8
+  %argp.cur.aligned = inttoptr i32 %1 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %va, align 4
+  %3 = inttoptr i32 %2 to ptr
+  %4 = load double, ptr %3, align 8
+  %5 = bitcast double %4 to i64
+  call void @llvm.va_end(ptr %va)
+  ret i64 %5
+}
+
+define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
+  ; RV32-LABEL: name: va2_va_arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX7]](p0), 8
+  ; RV32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VAARG]](s64)
+  ; RV32-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ;
+  ; RV64-LABEL: name: va2_va_arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX7]](p0), 8
+  ; RV64-NEXT:   $x10 = COPY [[VAARG]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr %va)
+  %2 = bitcast double %1 to i64
+  ret i64 %2
+}
 
 define void @va2_caller() nounwind {
   ; ILP32-LABEL: name: va2_caller
@@ -259,7 +909,178 @@ define void @va2_caller() nounwind {
 ; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
 ; vararg double is passed in a4 and a5 (rather than a3 and a4)
 
-declare i64 @va3(i32 %a, i64 %b, ...) nounwind
+define i64 @va3(i32 %a, i64 %b, ...) nounwind {
+  ; RV32-LABEL: name: va3
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; RV32-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX5]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (dereferenceable load (s32) from %ir.va)
+  ; RV32-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C]]
+  ; RV32-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C1]]
+  ; RV32-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
+  ; RV32-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C2]](s32)
+  ; RV32-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX5]](p0) :: (store (p0) into %ir.va)
+  ; RV32-NEXT:   [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
+  ; RV32-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
+  ; RV32-NEXT:   [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[LOAD1]]
+  ; RV32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD1]](s64)
+  ; RV32-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ;
+  ; RV64-LABEL: name: va3
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; RV64-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX6]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (dereferenceable load (s32) from %ir.va)
+  ; RV64-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C]]
+  ; RV64-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C1]]
+  ; RV64-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
+  ; RV64-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C2]](s64)
+  ; RV64-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX6]](p0) :: (store (p0) into %ir.va, align 4)
+  ; RV64-NEXT:   [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
+  ; RV64-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
+  ; RV64-NEXT:   [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[LOAD1]]
+  ; RV64-NEXT:   $x10 = COPY [[ADD1]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load i32, ptr %va, align 4
+  %1 = add i32 %argp.cur, 7
+  %2 = and i32 %1, -8
+  %argp.cur.aligned = inttoptr i32 %1 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %va, align 4
+  %3 = inttoptr i32 %2 to ptr
+  %4 = load double, ptr %3, align 8
+  call void @llvm.va_end(ptr %va)
+  %5 = bitcast double %4 to i64
+  %6 = add i64 %b, %5
+  ret i64 %6
+}
+
+define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
+  ; RV32-LABEL: name: va3_va_arg
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX5]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX5]](p0), 8
+  ; RV32-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[VAARG]]
+  ; RV32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+  ; RV32-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ;
+  ; RV64-LABEL: name: va3_va_arg
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX6]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX6]](p0), 8
+  ; RV64-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[VAARG]]
+  ; RV64-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr %va)
+  %2 = bitcast double %1 to i64
+  %3 = add i64 %b, %2
+  ret i64 %3
+}
 
 define void @va3_caller() nounwind {
   ; ILP32-LABEL: name: va3_caller
@@ -391,3 +1212,510 @@ define void @va3_caller() nounwind {
 }
 
 declare void @llvm.va_copy(ptr, ptr)
+
+define i32 @va4_va_copy(i32 %argno, ...) nounwind {
+  ; ILP32-LABEL: name: va4_va_copy
+  ; ILP32: bb.1 (%ir-block.0):
+  ; ILP32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; ILP32-NEXT: {{  $}}
+  ; ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; ILP32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; ILP32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; ILP32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; ILP32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; ILP32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; ILP32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; ILP32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; ILP32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; ILP32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; ILP32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; ILP32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; ILP32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; ILP32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; ILP32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; ILP32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; ILP32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; ILP32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; ILP32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; ILP32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; ILP32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; ILP32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; ILP32-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; ILP32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.vargs, align 1)
+  ; ILP32-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; ILP32-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; ILP32-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs)
+  ; ILP32-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; ILP32-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; ILP32-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+  ; ILP32-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; ILP32-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; ILP32-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; ILP32-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; ILP32-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; ILP32-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; ILP32-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; ILP32-NEXT:   $x10 = COPY [[ADD2]](s32)
+  ; ILP32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV32D-ILP32-LABEL: name: va4_va_copy
+  ; RV32D-ILP32: bb.1 (%ir-block.0):
+  ; RV32D-ILP32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32D-ILP32-NEXT: {{  $}}
+  ; RV32D-ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32D-ILP32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32D-ILP32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32D-ILP32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32D-ILP32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32D-ILP32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32D-ILP32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32D-ILP32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; RV32D-ILP32-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; RV32D-ILP32-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.vargs, align 1)
+  ; RV32D-ILP32-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; RV32D-ILP32-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs)
+  ; RV32D-ILP32-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; RV32D-ILP32-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+  ; RV32D-ILP32-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; RV32D-ILP32-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; RV32D-ILP32-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[ADD2]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV32D-ILP32F-LABEL: name: va4_va_copy
+  ; RV32D-ILP32F: bb.1 (%ir-block.0):
+  ; RV32D-ILP32F-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32D-ILP32F-NEXT: {{  $}}
+  ; RV32D-ILP32F-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32F-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32D-ILP32F-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32D-ILP32F-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32D-ILP32F-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32D-ILP32F-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32D-ILP32F-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32D-ILP32F-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32D-ILP32F-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; RV32D-ILP32F-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; RV32D-ILP32F-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.vargs, align 1)
+  ; RV32D-ILP32F-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32F-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; RV32D-ILP32F-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs)
+  ; RV32D-ILP32F-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; RV32D-ILP32F-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+  ; RV32D-ILP32F-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32F-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32F-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32F-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32F-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; RV32D-ILP32F-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; RV32D-ILP32F-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[ADD2]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV32D-ILP32D-LABEL: name: va4_va_copy
+  ; RV32D-ILP32D: bb.1 (%ir-block.0):
+  ; RV32D-ILP32D-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32D-ILP32D-NEXT: {{  $}}
+  ; RV32D-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32D-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32D-ILP32D-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32D-ILP32D-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32D-ILP32D-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32D-ILP32D-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32D-ILP32D-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32D-ILP32D-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32D-ILP32D-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; RV32D-ILP32D-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; RV32D-ILP32D-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s32) into %ir.vargs, align 1)
+  ; RV32D-ILP32D-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32D-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; RV32D-ILP32D-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs)
+  ; RV32D-ILP32D-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; RV32D-ILP32D-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+  ; RV32D-ILP32D-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV32D-ILP32D-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32D-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32D-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; RV32D-ILP32D-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; RV32D-ILP32D-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; RV32D-ILP32D-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[ADD2]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoRET implicit $x10
+  ;
+  ; LP64-LABEL: name: va4_va_copy
+  ; LP64: bb.1 (%ir-block.0):
+  ; LP64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; LP64-NEXT: {{  $}}
+  ; LP64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; LP64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; LP64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; LP64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; LP64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; LP64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; LP64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; LP64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; LP64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; LP64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; LP64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; LP64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; LP64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; LP64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; LP64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; LP64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; LP64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; LP64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; LP64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; LP64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; LP64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; LP64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; LP64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; LP64-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; LP64-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.vargs, align 1)
+  ; LP64-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; LP64-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
+  ; LP64-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; LP64-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; LP64-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+  ; LP64-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; LP64-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; LP64-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; LP64-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; LP64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32)
+  ; LP64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64-NEXT:   PseudoRET implicit $x10
+  ;
+  ; LP64F-LABEL: name: va4_va_copy
+  ; LP64F: bb.1 (%ir-block.0):
+  ; LP64F-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; LP64F-NEXT: {{  $}}
+  ; LP64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64F-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; LP64F-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; LP64F-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; LP64F-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; LP64F-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; LP64F-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; LP64F-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; LP64F-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; LP64F-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; LP64F-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; LP64F-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; LP64F-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; LP64F-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; LP64F-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; LP64F-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; LP64F-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; LP64F-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; LP64F-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; LP64F-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; LP64F-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; LP64F-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; LP64F-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; LP64F-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; LP64F-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; LP64F-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.vargs, align 1)
+  ; LP64F-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64F-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; LP64F-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
+  ; LP64F-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; LP64F-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; LP64F-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+  ; LP64F-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; LP64F-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64F-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64F-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64F-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; LP64F-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; LP64F-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; LP64F-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32)
+  ; LP64F-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64F-NEXT:   PseudoRET implicit $x10
+  ;
+  ; LP64D-LABEL: name: va4_va_copy
+  ; LP64D: bb.1 (%ir-block.0):
+  ; LP64D-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; LP64D-NEXT: {{  $}}
+  ; LP64D-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64D-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; LP64D-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; LP64D-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; LP64D-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; LP64D-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; LP64D-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; LP64D-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; LP64D-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; LP64D-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; LP64D-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; LP64D-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; LP64D-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; LP64D-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; LP64D-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; LP64D-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; LP64D-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; LP64D-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; LP64D-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; LP64D-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; LP64D-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; LP64D-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; LP64D-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; LP64D-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
+  ; LP64D-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
+  ; LP64D-NEXT:   G_VASTART [[FRAME_INDEX7]](p0) :: (store (s64) into %ir.vargs, align 1)
+  ; LP64D-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64D-NEXT:   G_VACOPY [[FRAME_INDEX8]](p0), [[FRAME_INDEX7]]
+  ; LP64D-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
+  ; LP64D-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; LP64D-NEXT:   $x10 = COPY [[LOAD]](p0)
+  ; LP64D-NEXT:   PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+  ; LP64D-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; LP64D-NEXT:   [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64D-NEXT:   [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64D-NEXT:   [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX7]](p0), 4
+  ; LP64D-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
+  ; LP64D-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
+  ; LP64D-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
+  ; LP64D-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32)
+  ; LP64D-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64D-NEXT:   PseudoRET implicit $x10
+  %vargs = alloca ptr
+  %wargs = alloca ptr
+  call void @llvm.va_start(ptr %vargs)
+  %1 = va_arg ptr %vargs, i32
+  call void @llvm.va_copy(ptr %wargs, ptr %vargs)
+  %2 = load ptr, ptr %wargs, align 4
+  call void @notdead(ptr %2)
+  %3 = va_arg ptr %vargs, i32
+  %4 = va_arg ptr %vargs, i32
+  %5 = va_arg ptr %vargs, i32
+  call void @llvm.va_end(ptr %vargs)
+  call void @llvm.va_end(ptr %wargs)
+  %add1 = add i32 %3, %1
+  %add2 = add i32 %add1, %4
+  %add3 = add i32 %add2, %5
+  ret i32 %add3
+}
+
+; A function with no fixed arguments is not valid C, but can be
+; specified in LLVM IR. We must ensure the vararg save area is
+; still set up correctly.
+
+define i32 @va6_no_fixed_args(...) nounwind {
+  ; RV32-LABEL: name: va6_no_fixed_args
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
+  ; RV32-NEXT:   G_STORE [[COPY]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.7, align 16)
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX7]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX8]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX8]](p0), 4
+  ; RV32-NEXT:   $x10 = COPY [[VAARG]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64-LABEL: name: va6_no_fixed_args
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
+  ; RV64-NEXT:   G_STORE [[COPY]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.7, align 16)
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX7]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX8]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX8]](p0), 4
+  ; RV64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
+  ; RV64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+; TODO: improve constant materialization of stack addresses
+
+define i32 @va_large_stack(ptr %fmt, ...) {
+  ; RV32-LABEL: name: va_large_stack
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.6)
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %fixed-stack.5, align 8)
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32) into %fixed-stack.4)
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32) into %fixed-stack.3, align 16)
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32) into %fixed-stack.2)
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32) into %fixed-stack.1, align 8)
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32) into %fixed-stack.0)
+  ; RV32-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.large
+  ; RV32-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.va
+  ; RV32-NEXT:   G_VASTART [[FRAME_INDEX8]](p0) :: (store (s32) into %ir.va, align 1)
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.va)
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C]](s32)
+  ; RV32-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX8]](p0) :: (store (p0) into %ir.va)
+  ; RV32-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
+  ; RV32-NEXT:   $x10 = COPY [[LOAD1]](s32)
+  ; RV32-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64-LABEL: name: va_large_stack
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.6)
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %fixed-stack.5, align 16)
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %fixed-stack.4)
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64) into %fixed-stack.3, align 16)
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64) into %fixed-stack.2)
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64) into %fixed-stack.1, align 16)
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64) into %fixed-stack.0)
+  ; RV64-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.large
+  ; RV64-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.va
+  ; RV64-NEXT:   G_VASTART [[FRAME_INDEX8]](p0) :: (store (s64) into %ir.va, align 1)
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX8]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C]](s64)
+  ; RV64-NEXT:   G_STORE [[PTR_ADD]](p0), [[FRAME_INDEX8]](p0) :: (store (p0) into %ir.va, align 4)
+  ; RV64-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
+  ; RV64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
+  ; RV64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64-NEXT:   PseudoRET implicit $x10
+  %large = alloca [ 100000000 x i8 ]
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va, align 4
+  %1 = load i32, ptr %argp.cur, align 4
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
new file mode 100644
index 000000000000000..eba35e2f4944c61
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -0,0 +1,1896 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -global-isel  -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32,ILP32 %s
+; RUN: llc -mtriple=riscv32 -global-isel  -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s
+; RUN: llc -mtriple=riscv32 -global-isel  -mattr=+d -target-abi ilp32f \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s
+; RUN: llc -mtriple=riscv32 -global-isel  -mattr=+d -target-abi ilp32d \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s
+; RUN: llc -mtriple=riscv64 -global-isel  -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64,LP64 %s
+; RUN: llc -mtriple=riscv64 -global-isel  -mattr=+d -target-abi lp64f \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64,LP64F %s
+; RUN: llc -mtriple=riscv64 -global-isel  -mattr=+d -target-abi lp64d \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64,LP64D %s
+
+; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
+; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
+; codegen differences due to the way the f64 load operations are lowered and
+; because the PseudoCALL specifies the calling  convention.
+; The nounwind attribute is omitted for some of the tests, to check that CFI
+; directives are correctly generated.
+
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+
+declare void @notdead(ptr)
+
+; Although frontends are recommended to not generate va_arg due to the lack of
+; support for aggregate types, we test simple cases here to ensure they are
+; lowered correctly
+
+define i32 @va1(ptr %fmt, ...) {
+; RV32-LABEL: va1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -48
+; RV32-NEXT:    .cfi_def_cfa_offset 48
+; RV32-NEXT:    addi a0, sp, 20
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    addi a0, sp, 24
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    addi a0, sp, 28
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    addi a0, sp, 32
+; RV32-NEXT:    sw a4, 0(a0)
+; RV32-NEXT:    addi a0, sp, 36
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    addi a0, sp, 40
+; RV32-NEXT:    addi a1, sp, 12
+; RV32-NEXT:    addi a2, sp, 20
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    addi a4, a2, 2
+; RV32-NEXT:    lui a5, 16
+; RV32-NEXT:    addi a5, a5, -1
+; RV32-NEXT:    and a5, a1, a5
+; RV32-NEXT:    srli a5, a5, 8
+; RV32-NEXT:    sb a1, 0(a2)
+; RV32-NEXT:    addi a2, a2, 1
+; RV32-NEXT:    sb a5, 0(a2)
+; RV32-NEXT:    srli a2, a3, 8
+; RV32-NEXT:    addi a5, a4, 1
+; RV32-NEXT:    sb a3, 0(a4)
+; RV32-NEXT:    sb a2, 0(a5)
+; RV32-NEXT:    lw a2, 0(a1)
+; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    addi a0, sp, 44
+; RV32-NEXT:    sw a7, 0(a0)
+; RV32-NEXT:    addi a0, a2, 4
+; RV32-NEXT:    sw a0, 0(a1)
+; RV32-NEXT:    lw a0, 0(a2)
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    .cfi_def_cfa_offset 80
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a1, sp, 24
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and t0, a0, a6
+; RV64-NEXT:    srliw t0, t0, 8
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    addi a1, a1, 1
+; RV64-NEXT:    sb t0, 0(a1)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi t0, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(t0)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    addi a3, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    addi a1, a0, 4
+; RV64-NEXT:    lw a2, 0(a1)
+; RV64-NEXT:    lwu a3, 0(a0)
+; RV64-NEXT:    addi a4, sp, 72
+; RV64-NEXT:    sd a7, 0(a4)
+; RV64-NEXT:    slli a2, a2, 32
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    addi a3, a2, 4
+; RV64-NEXT:    srli a4, a3, 32
+; RV64-NEXT:    sw a3, 0(a0)
+; RV64-NEXT:    sw a4, 0(a1)
+; RV64-NEXT:    lw a0, 0(a2)
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va, align 4
+  %1 = load i32, ptr %argp.cur, align 4
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+; Ensure the adjustment when restoring the stack pointer using the frame
+; pointer is correct
+define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
+; RV32-LABEL: va1_va_arg_alloca:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -48
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 16
+; RV32-NEXT:    addi a0, s0, 4
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    addi a0, s0, 8
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    addi a0, s0, 12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    addi a0, s0, 16
+; RV32-NEXT:    sw a4, 0(a0)
+; RV32-NEXT:    addi a0, s0, 20
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    addi a0, s0, 24
+; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    addi a0, s0, 28
+; RV32-NEXT:    sw a7, 0(a0)
+; RV32-NEXT:    addi a0, s0, -16
+; RV32-NEXT:    addi a1, s0, 4
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    addi a3, a1, 2
+; RV32-NEXT:    lui a4, 16
+; RV32-NEXT:    addi a4, a4, -1
+; RV32-NEXT:    and a4, a0, a4
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    addi a5, a1, 1
+; RV32-NEXT:    sb a0, 0(a1)
+; RV32-NEXT:    sb a4, 0(a5)
+; RV32-NEXT:    srli a1, a2, 8
+; RV32-NEXT:    addi a4, a3, 1
+; RV32-NEXT:    sb a2, 0(a3)
+; RV32-NEXT:    sb a1, 0(a4)
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    addi a1, a1, 3
+; RV32-NEXT:    andi a1, a1, -4
+; RV32-NEXT:    addi a2, a1, 4
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    lw s1, 0(a1)
+; RV32-NEXT:    addi a0, s1, 15
+; RV32-NEXT:    andi a0, a0, -16
+; RV32-NEXT:    sub a0, sp, a0
+; RV32-NEXT:    mv sp, a0
+; RV32-NEXT:    call notdead at plt
+; RV32-NEXT:    mv a0, s1
+; RV32-NEXT:    addi sp, s0, -16
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va1_va_arg_alloca:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -96
+; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 32
+; RV64-NEXT:    addi a0, s0, 8
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, s0, 16
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, s0, 24
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, s0, 32
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, s0, 40
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, s0, 48
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, s0, 56
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, s0, -32
+; RV64-NEXT:    addi a1, s0, 8
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and a7, a0, a6
+; RV64-NEXT:    srliw a7, a7, 8
+; RV64-NEXT:    addi t0, a1, 1
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    sb a7, 0(t0)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi a7, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(a7)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    addi a3, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    ld a1, 0(a0)
+; RV64-NEXT:    addi a1, a1, 3
+; RV64-NEXT:    andi a1, a1, -4
+; RV64-NEXT:    addi a2, a1, 4
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    lw s1, 0(a1)
+; RV64-NEXT:    slli a0, s1, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    addi a0, a0, 15
+; RV64-NEXT:    andi a0, a0, -16
+; RV64-NEXT:    sub a0, sp, a0
+; RV64-NEXT:    mv sp, a0
+; RV64-NEXT:    call notdead at plt
+; RV64-NEXT:    mv a0, s1
+; RV64-NEXT:    addi sp, s0, -32
+; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 96
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  %2 = alloca i8, i32 %1
+  call void @notdead(ptr %2)
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+
+define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
+; RV32-LABEL: va1_va_arg:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -48
+; RV32-NEXT:    addi a0, sp, 20
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    addi a0, sp, 24
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    addi a0, sp, 28
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    addi a0, sp, 32
+; RV32-NEXT:    sw a4, 0(a0)
+; RV32-NEXT:    addi a0, sp, 36
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    addi a0, sp, 40
+; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    addi a0, sp, 44
+; RV32-NEXT:    sw a7, 0(a0)
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    addi a1, sp, 20
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    addi a3, a1, 2
+; RV32-NEXT:    lui a4, 16
+; RV32-NEXT:    addi a4, a4, -1
+; RV32-NEXT:    and a4, a0, a4
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    addi a5, a1, 1
+; RV32-NEXT:    sb a0, 0(a1)
+; RV32-NEXT:    sb a4, 0(a5)
+; RV32-NEXT:    srli a1, a2, 8
+; RV32-NEXT:    addi a4, a3, 1
+; RV32-NEXT:    sb a2, 0(a3)
+; RV32-NEXT:    sb a1, 0(a4)
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    addi a1, a1, 3
+; RV32-NEXT:    andi a1, a1, -4
+; RV32-NEXT:    addi a2, a1, 4
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    lw a0, 0(a1)
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va1_va_arg:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 72
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a1, sp, 24
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and a7, a0, a6
+; RV64-NEXT:    srliw a7, a7, 8
+; RV64-NEXT:    addi t0, a1, 1
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    sb a7, 0(t0)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi a7, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(a7)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    addi a3, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    ld a1, 0(a0)
+; RV64-NEXT:    addi a1, a1, 3
+; RV64-NEXT:    andi a1, a1, -4
+; RV64-NEXT:    addi a2, a1, 4
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    lw a0, 0(a1)
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+define void @va1_caller() nounwind {
+; RV32-LABEL: va1_caller:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a4, 2
+; RV32-NEXT:    li a2, 0
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call va1 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va1_caller:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 2
+; RV64-NEXT:    li a1, 0
+; RV64-NEXT:    call va1 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %1 = call i32 (ptr, ...) @va1(ptr undef, i64 0, i32 2)
+  ret void
+}
+
+; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
+; register pair (where the first register is even-numbered).
+
+define i64 @va2(ptr %fmt, ...) nounwind {
+; ILP32-LABEL: va2:
+; ILP32:       # %bb.0:
+; ILP32-NEXT:    addi sp, sp, -48
+; ILP32-NEXT:    addi a0, sp, 20
+; ILP32-NEXT:    sw a1, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 24
+; ILP32-NEXT:    sw a2, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 28
+; ILP32-NEXT:    sw a3, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 32
+; ILP32-NEXT:    sw a4, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 36
+; ILP32-NEXT:    sw a5, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 40
+; ILP32-NEXT:    addi a1, sp, 12
+; ILP32-NEXT:    addi a2, sp, 20
+; ILP32-NEXT:    srli a3, a1, 16
+; ILP32-NEXT:    addi a4, a2, 2
+; ILP32-NEXT:    lui a5, 16
+; ILP32-NEXT:    addi a5, a5, -1
+; ILP32-NEXT:    and a5, a1, a5
+; ILP32-NEXT:    srli a5, a5, 8
+; ILP32-NEXT:    sb a1, 0(a2)
+; ILP32-NEXT:    addi a2, a2, 1
+; ILP32-NEXT:    sb a5, 0(a2)
+; ILP32-NEXT:    srli a2, a3, 8
+; ILP32-NEXT:    addi a5, a4, 1
+; ILP32-NEXT:    sb a3, 0(a4)
+; ILP32-NEXT:    sb a2, 0(a5)
+; ILP32-NEXT:    lw a2, 0(a1)
+; ILP32-NEXT:    sw a6, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 44
+; ILP32-NEXT:    sw a7, 0(a0)
+; ILP32-NEXT:    addi a2, a2, 7
+; ILP32-NEXT:    andi a3, a2, -8
+; ILP32-NEXT:    addi a2, a2, 8
+; ILP32-NEXT:    sw a2, 0(a1)
+; ILP32-NEXT:    lw a0, 0(a3)
+; ILP32-NEXT:    addi a3, a3, 4
+; ILP32-NEXT:    lw a1, 0(a3)
+; ILP32-NEXT:    addi sp, sp, 48
+; ILP32-NEXT:    ret
+;
+; RV32D-ILP32-LABEL: va2:
+; RV32D-ILP32:       # %bb.0:
+; RV32D-ILP32-NEXT:    addi sp, sp, -48
+; RV32D-ILP32-NEXT:    addi a0, sp, 20
+; RV32D-ILP32-NEXT:    sw a1, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 24
+; RV32D-ILP32-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 28
+; RV32D-ILP32-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 32
+; RV32D-ILP32-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 36
+; RV32D-ILP32-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 40
+; RV32D-ILP32-NEXT:    addi a1, sp, 12
+; RV32D-ILP32-NEXT:    addi a2, sp, 20
+; RV32D-ILP32-NEXT:    srli a3, a1, 16
+; RV32D-ILP32-NEXT:    addi a4, a2, 2
+; RV32D-ILP32-NEXT:    lui a5, 16
+; RV32D-ILP32-NEXT:    addi a5, a5, -1
+; RV32D-ILP32-NEXT:    and a5, a1, a5
+; RV32D-ILP32-NEXT:    srli a5, a5, 8
+; RV32D-ILP32-NEXT:    sb a1, 0(a2)
+; RV32D-ILP32-NEXT:    addi a2, a2, 1
+; RV32D-ILP32-NEXT:    sb a5, 0(a2)
+; RV32D-ILP32-NEXT:    srli a2, a3, 8
+; RV32D-ILP32-NEXT:    addi a5, a4, 1
+; RV32D-ILP32-NEXT:    sb a3, 0(a4)
+; RV32D-ILP32-NEXT:    sb a2, 0(a5)
+; RV32D-ILP32-NEXT:    lw a2, 0(a1)
+; RV32D-ILP32-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 44
+; RV32D-ILP32-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32-NEXT:    addi a2, a2, 7
+; RV32D-ILP32-NEXT:    andi a0, a2, -8
+; RV32D-ILP32-NEXT:    fld fa5, 0(a0)
+; RV32D-ILP32-NEXT:    addi a2, a2, 8
+; RV32D-ILP32-NEXT:    sw a2, 0(a1)
+; RV32D-ILP32-NEXT:    fsd fa5, 0(sp)
+; RV32D-ILP32-NEXT:    lw a0, 0(sp)
+; RV32D-ILP32-NEXT:    lw a1, 4(sp)
+; RV32D-ILP32-NEXT:    addi sp, sp, 48
+; RV32D-ILP32-NEXT:    ret
+;
+; RV32D-ILP32F-LABEL: va2:
+; RV32D-ILP32F:       # %bb.0:
+; RV32D-ILP32F-NEXT:    addi sp, sp, -48
+; RV32D-ILP32F-NEXT:    addi a0, sp, 20
+; RV32D-ILP32F-NEXT:    sw a1, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 24
+; RV32D-ILP32F-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 28
+; RV32D-ILP32F-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 32
+; RV32D-ILP32F-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 36
+; RV32D-ILP32F-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 40
+; RV32D-ILP32F-NEXT:    addi a1, sp, 12
+; RV32D-ILP32F-NEXT:    addi a2, sp, 20
+; RV32D-ILP32F-NEXT:    srli a3, a1, 16
+; RV32D-ILP32F-NEXT:    addi a4, a2, 2
+; RV32D-ILP32F-NEXT:    lui a5, 16
+; RV32D-ILP32F-NEXT:    addi a5, a5, -1
+; RV32D-ILP32F-NEXT:    and a5, a1, a5
+; RV32D-ILP32F-NEXT:    srli a5, a5, 8
+; RV32D-ILP32F-NEXT:    sb a1, 0(a2)
+; RV32D-ILP32F-NEXT:    addi a2, a2, 1
+; RV32D-ILP32F-NEXT:    sb a5, 0(a2)
+; RV32D-ILP32F-NEXT:    srli a2, a3, 8
+; RV32D-ILP32F-NEXT:    addi a5, a4, 1
+; RV32D-ILP32F-NEXT:    sb a3, 0(a4)
+; RV32D-ILP32F-NEXT:    sb a2, 0(a5)
+; RV32D-ILP32F-NEXT:    lw a2, 0(a1)
+; RV32D-ILP32F-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 44
+; RV32D-ILP32F-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a2, a2, 7
+; RV32D-ILP32F-NEXT:    andi a0, a2, -8
+; RV32D-ILP32F-NEXT:    fld fa5, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a2, a2, 8
+; RV32D-ILP32F-NEXT:    sw a2, 0(a1)
+; RV32D-ILP32F-NEXT:    fsd fa5, 0(sp)
+; RV32D-ILP32F-NEXT:    lw a0, 0(sp)
+; RV32D-ILP32F-NEXT:    lw a1, 4(sp)
+; RV32D-ILP32F-NEXT:    addi sp, sp, 48
+; RV32D-ILP32F-NEXT:    ret
+;
+; RV32D-ILP32D-LABEL: va2:
+; RV32D-ILP32D:       # %bb.0:
+; RV32D-ILP32D-NEXT:    addi sp, sp, -48
+; RV32D-ILP32D-NEXT:    addi a0, sp, 20
+; RV32D-ILP32D-NEXT:    sw a1, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 24
+; RV32D-ILP32D-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 28
+; RV32D-ILP32D-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 32
+; RV32D-ILP32D-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 36
+; RV32D-ILP32D-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 40
+; RV32D-ILP32D-NEXT:    addi a1, sp, 12
+; RV32D-ILP32D-NEXT:    addi a2, sp, 20
+; RV32D-ILP32D-NEXT:    srli a3, a1, 16
+; RV32D-ILP32D-NEXT:    addi a4, a2, 2
+; RV32D-ILP32D-NEXT:    lui a5, 16
+; RV32D-ILP32D-NEXT:    addi a5, a5, -1
+; RV32D-ILP32D-NEXT:    and a5, a1, a5
+; RV32D-ILP32D-NEXT:    srli a5, a5, 8
+; RV32D-ILP32D-NEXT:    sb a1, 0(a2)
+; RV32D-ILP32D-NEXT:    addi a2, a2, 1
+; RV32D-ILP32D-NEXT:    sb a5, 0(a2)
+; RV32D-ILP32D-NEXT:    srli a2, a3, 8
+; RV32D-ILP32D-NEXT:    addi a5, a4, 1
+; RV32D-ILP32D-NEXT:    sb a3, 0(a4)
+; RV32D-ILP32D-NEXT:    sb a2, 0(a5)
+; RV32D-ILP32D-NEXT:    lw a2, 0(a1)
+; RV32D-ILP32D-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 44
+; RV32D-ILP32D-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a2, a2, 7
+; RV32D-ILP32D-NEXT:    andi a0, a2, -8
+; RV32D-ILP32D-NEXT:    fld fa5, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a2, a2, 8
+; RV32D-ILP32D-NEXT:    sw a2, 0(a1)
+; RV32D-ILP32D-NEXT:    fsd fa5, 0(sp)
+; RV32D-ILP32D-NEXT:    lw a0, 0(sp)
+; RV32D-ILP32D-NEXT:    lw a1, 4(sp)
+; RV32D-ILP32D-NEXT:    addi sp, sp, 48
+; RV32D-ILP32D-NEXT:    ret
+;
+; RV64-LABEL: va2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 72
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a1, sp, 24
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and a7, a0, a6
+; RV64-NEXT:    srliw a7, a7, 8
+; RV64-NEXT:    addi t0, a1, 1
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    sb a7, 0(t0)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi a7, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(a7)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    lw a3, 0(a0)
+; RV64-NEXT:    addi a5, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a5)
+; RV64-NEXT:    addi a3, a3, 7
+; RV64-NEXT:    andi a1, a3, -8
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    srli a3, a3, 32
+; RV64-NEXT:    addi a3, a3, 8
+; RV64-NEXT:    srli a2, a3, 32
+; RV64-NEXT:    addi a4, a0, 4
+; RV64-NEXT:    sw a3, 0(a0)
+; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    ld a0, 0(a1)
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load i32, ptr %va, align 4
+  %1 = add i32 %argp.cur, 7
+  %2 = and i32 %1, -8
+  %argp.cur.aligned = inttoptr i32 %1 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %va, align 4
+  %3 = inttoptr i32 %2 to ptr
+  %4 = load double, ptr %3, align 8
+  %5 = bitcast double %4 to i64
+  call void @llvm.va_end(ptr %va)
+  ret i64 %5
+}
+
+define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
+; ILP32-LABEL: va2_va_arg:
+; ILP32:       # %bb.0:
+; ILP32-NEXT:    addi sp, sp, -48
+; ILP32-NEXT:    addi a0, sp, 20
+; ILP32-NEXT:    sw a1, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 24
+; ILP32-NEXT:    sw a2, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 28
+; ILP32-NEXT:    sw a3, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 32
+; ILP32-NEXT:    sw a4, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 36
+; ILP32-NEXT:    sw a5, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 40
+; ILP32-NEXT:    sw a6, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 44
+; ILP32-NEXT:    sw a7, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 12
+; ILP32-NEXT:    addi a1, sp, 20
+; ILP32-NEXT:    srli a2, a0, 16
+; ILP32-NEXT:    addi a3, a1, 2
+; ILP32-NEXT:    lui a4, 16
+; ILP32-NEXT:    addi a4, a4, -1
+; ILP32-NEXT:    and a4, a0, a4
+; ILP32-NEXT:    srli a4, a4, 8
+; ILP32-NEXT:    addi a5, a1, 1
+; ILP32-NEXT:    sb a0, 0(a1)
+; ILP32-NEXT:    sb a4, 0(a5)
+; ILP32-NEXT:    srli a1, a2, 8
+; ILP32-NEXT:    addi a4, a3, 1
+; ILP32-NEXT:    sb a2, 0(a3)
+; ILP32-NEXT:    sb a1, 0(a4)
+; ILP32-NEXT:    lw a1, 0(a0)
+; ILP32-NEXT:    addi a1, a1, 7
+; ILP32-NEXT:    andi a1, a1, -8
+; ILP32-NEXT:    addi a2, a1, 8
+; ILP32-NEXT:    sw a2, 0(a0)
+; ILP32-NEXT:    lw a0, 0(a1)
+; ILP32-NEXT:    addi a1, a1, 4
+; ILP32-NEXT:    lw a1, 0(a1)
+; ILP32-NEXT:    addi sp, sp, 48
+; ILP32-NEXT:    ret
+;
+; RV32D-ILP32-LABEL: va2_va_arg:
+; RV32D-ILP32:       # %bb.0:
+; RV32D-ILP32-NEXT:    addi sp, sp, -48
+; RV32D-ILP32-NEXT:    addi a0, sp, 20
+; RV32D-ILP32-NEXT:    sw a1, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 24
+; RV32D-ILP32-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 28
+; RV32D-ILP32-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 32
+; RV32D-ILP32-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 36
+; RV32D-ILP32-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 40
+; RV32D-ILP32-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 44
+; RV32D-ILP32-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 12
+; RV32D-ILP32-NEXT:    addi a1, sp, 20
+; RV32D-ILP32-NEXT:    srli a2, a0, 16
+; RV32D-ILP32-NEXT:    addi a3, a1, 2
+; RV32D-ILP32-NEXT:    lui a4, 16
+; RV32D-ILP32-NEXT:    addi a4, a4, -1
+; RV32D-ILP32-NEXT:    and a4, a0, a4
+; RV32D-ILP32-NEXT:    srli a4, a4, 8
+; RV32D-ILP32-NEXT:    addi a5, a1, 1
+; RV32D-ILP32-NEXT:    sb a0, 0(a1)
+; RV32D-ILP32-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32-NEXT:    srli a1, a2, 8
+; RV32D-ILP32-NEXT:    addi a4, a3, 1
+; RV32D-ILP32-NEXT:    sb a2, 0(a3)
+; RV32D-ILP32-NEXT:    sb a1, 0(a4)
+; RV32D-ILP32-NEXT:    lw a1, 0(a0)
+; RV32D-ILP32-NEXT:    addi a1, a1, 7
+; RV32D-ILP32-NEXT:    andi a1, a1, -8
+; RV32D-ILP32-NEXT:    addi a2, a1, 8
+; RV32D-ILP32-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32-NEXT:    fld fa5, 0(a1)
+; RV32D-ILP32-NEXT:    fsd fa5, 0(sp)
+; RV32D-ILP32-NEXT:    lw a0, 0(sp)
+; RV32D-ILP32-NEXT:    lw a1, 4(sp)
+; RV32D-ILP32-NEXT:    addi sp, sp, 48
+; RV32D-ILP32-NEXT:    ret
+;
+; RV32D-ILP32F-LABEL: va2_va_arg:
+; RV32D-ILP32F:       # %bb.0:
+; RV32D-ILP32F-NEXT:    addi sp, sp, -48
+; RV32D-ILP32F-NEXT:    addi a0, sp, 20
+; RV32D-ILP32F-NEXT:    sw a1, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 24
+; RV32D-ILP32F-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 28
+; RV32D-ILP32F-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 32
+; RV32D-ILP32F-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 36
+; RV32D-ILP32F-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 40
+; RV32D-ILP32F-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 44
+; RV32D-ILP32F-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 12
+; RV32D-ILP32F-NEXT:    addi a1, sp, 20
+; RV32D-ILP32F-NEXT:    srli a2, a0, 16
+; RV32D-ILP32F-NEXT:    addi a3, a1, 2
+; RV32D-ILP32F-NEXT:    lui a4, 16
+; RV32D-ILP32F-NEXT:    addi a4, a4, -1
+; RV32D-ILP32F-NEXT:    and a4, a0, a4
+; RV32D-ILP32F-NEXT:    srli a4, a4, 8
+; RV32D-ILP32F-NEXT:    addi a5, a1, 1
+; RV32D-ILP32F-NEXT:    sb a0, 0(a1)
+; RV32D-ILP32F-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32F-NEXT:    srli a1, a2, 8
+; RV32D-ILP32F-NEXT:    addi a4, a3, 1
+; RV32D-ILP32F-NEXT:    sb a2, 0(a3)
+; RV32D-ILP32F-NEXT:    sb a1, 0(a4)
+; RV32D-ILP32F-NEXT:    lw a1, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a1, a1, 7
+; RV32D-ILP32F-NEXT:    andi a1, a1, -8
+; RV32D-ILP32F-NEXT:    addi a2, a1, 8
+; RV32D-ILP32F-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32F-NEXT:    fld fa5, 0(a1)
+; RV32D-ILP32F-NEXT:    fsd fa5, 0(sp)
+; RV32D-ILP32F-NEXT:    lw a0, 0(sp)
+; RV32D-ILP32F-NEXT:    lw a1, 4(sp)
+; RV32D-ILP32F-NEXT:    addi sp, sp, 48
+; RV32D-ILP32F-NEXT:    ret
+;
+; RV32D-ILP32D-LABEL: va2_va_arg:
+; RV32D-ILP32D:       # %bb.0:
+; RV32D-ILP32D-NEXT:    addi sp, sp, -48
+; RV32D-ILP32D-NEXT:    addi a0, sp, 20
+; RV32D-ILP32D-NEXT:    sw a1, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 24
+; RV32D-ILP32D-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 28
+; RV32D-ILP32D-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 32
+; RV32D-ILP32D-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 36
+; RV32D-ILP32D-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 40
+; RV32D-ILP32D-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 44
+; RV32D-ILP32D-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 12
+; RV32D-ILP32D-NEXT:    addi a1, sp, 20
+; RV32D-ILP32D-NEXT:    srli a2, a0, 16
+; RV32D-ILP32D-NEXT:    addi a3, a1, 2
+; RV32D-ILP32D-NEXT:    lui a4, 16
+; RV32D-ILP32D-NEXT:    addi a4, a4, -1
+; RV32D-ILP32D-NEXT:    and a4, a0, a4
+; RV32D-ILP32D-NEXT:    srli a4, a4, 8
+; RV32D-ILP32D-NEXT:    addi a5, a1, 1
+; RV32D-ILP32D-NEXT:    sb a0, 0(a1)
+; RV32D-ILP32D-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32D-NEXT:    srli a1, a2, 8
+; RV32D-ILP32D-NEXT:    addi a4, a3, 1
+; RV32D-ILP32D-NEXT:    sb a2, 0(a3)
+; RV32D-ILP32D-NEXT:    sb a1, 0(a4)
+; RV32D-ILP32D-NEXT:    lw a1, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a1, a1, 7
+; RV32D-ILP32D-NEXT:    andi a1, a1, -8
+; RV32D-ILP32D-NEXT:    addi a2, a1, 8
+; RV32D-ILP32D-NEXT:    sw a2, 0(a0)
+; RV32D-ILP32D-NEXT:    fld fa5, 0(a1)
+; RV32D-ILP32D-NEXT:    fsd fa5, 0(sp)
+; RV32D-ILP32D-NEXT:    lw a0, 0(sp)
+; RV32D-ILP32D-NEXT:    lw a1, 4(sp)
+; RV32D-ILP32D-NEXT:    addi sp, sp, 48
+; RV32D-ILP32D-NEXT:    ret
+;
+; RV64-LABEL: va2_va_arg:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 72
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a1, sp, 24
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and a7, a0, a6
+; RV64-NEXT:    srliw a7, a7, 8
+; RV64-NEXT:    addi t0, a1, 1
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    sb a7, 0(t0)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi a7, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(a7)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    addi a3, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    ld a1, 0(a0)
+; RV64-NEXT:    addi a1, a1, 7
+; RV64-NEXT:    andi a1, a1, -8
+; RV64-NEXT:    addi a2, a1, 8
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    ld a0, 0(a1)
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr %va)
+  %2 = bitcast double %1 to i64
+  ret i64 %2
+}
+
+define void @va2_caller() nounwind {
+; RV32-LABEL: va2_caller:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a1, 1
+; RV32-NEXT:    call va2 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va2_caller:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a1, 1
+; RV64-NEXT:    call va2 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+ %1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
+ ret void
+}
+
+; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
+; vararg double is passed in a4 and a5 (rather than a3 and a4)
+
+define i64 @va3(i32 %a, i64 %b, ...) nounwind {
+; ILP32-LABEL: va3:
+; ILP32:       # %bb.0:
+; ILP32-NEXT:    addi sp, sp, -32
+; ILP32-NEXT:    addi a0, sp, 12
+; ILP32-NEXT:    sw a3, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 16
+; ILP32-NEXT:    sw a4, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 20
+; ILP32-NEXT:    sw a5, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 4
+; ILP32-NEXT:    addi a3, sp, 12
+; ILP32-NEXT:    lui a4, 16
+; ILP32-NEXT:    addi a4, a4, -1
+; ILP32-NEXT:    and a4, a0, a4
+; ILP32-NEXT:    srli a4, a4, 8
+; ILP32-NEXT:    addi a5, a3, 1
+; ILP32-NEXT:    sb a4, 0(a5)
+; ILP32-NEXT:    addi a4, sp, 24
+; ILP32-NEXT:    srli a5, a0, 16
+; ILP32-NEXT:    sb a0, 0(a3)
+; ILP32-NEXT:    addi a3, a3, 2
+; ILP32-NEXT:    sb a5, 0(a3)
+; ILP32-NEXT:    srli a5, a5, 8
+; ILP32-NEXT:    addi a3, a3, 1
+; ILP32-NEXT:    sb a5, 0(a3)
+; ILP32-NEXT:    lw a3, 0(a0)
+; ILP32-NEXT:    sw a6, 0(a4)
+; ILP32-NEXT:    addi a4, sp, 28
+; ILP32-NEXT:    sw a7, 0(a4)
+; ILP32-NEXT:    addi a3, a3, 7
+; ILP32-NEXT:    andi a4, a3, -8
+; ILP32-NEXT:    addi a3, a3, 8
+; ILP32-NEXT:    sw a3, 0(a0)
+; ILP32-NEXT:    lw a3, 0(a4)
+; ILP32-NEXT:    addi a4, a4, 4
+; ILP32-NEXT:    lw a4, 0(a4)
+; ILP32-NEXT:    add a0, a1, a3
+; ILP32-NEXT:    sltu a1, a0, a3
+; ILP32-NEXT:    add a2, a2, a4
+; ILP32-NEXT:    add a1, a2, a1
+; ILP32-NEXT:    addi sp, sp, 32
+; ILP32-NEXT:    ret
+;
+; RV32D-ILP32-LABEL: va3:
+; RV32D-ILP32:       # %bb.0:
+; RV32D-ILP32-NEXT:    addi sp, sp, -48
+; RV32D-ILP32-NEXT:    addi a0, sp, 28
+; RV32D-ILP32-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 32
+; RV32D-ILP32-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 36
+; RV32D-ILP32-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 20
+; RV32D-ILP32-NEXT:    addi a3, sp, 28
+; RV32D-ILP32-NEXT:    lui a4, 16
+; RV32D-ILP32-NEXT:    addi a4, a4, -1
+; RV32D-ILP32-NEXT:    and a4, a0, a4
+; RV32D-ILP32-NEXT:    srli a4, a4, 8
+; RV32D-ILP32-NEXT:    addi a5, a3, 1
+; RV32D-ILP32-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32-NEXT:    addi a4, sp, 40
+; RV32D-ILP32-NEXT:    srli a5, a0, 16
+; RV32D-ILP32-NEXT:    sb a0, 0(a3)
+; RV32D-ILP32-NEXT:    addi a3, a3, 2
+; RV32D-ILP32-NEXT:    sb a5, 0(a3)
+; RV32D-ILP32-NEXT:    srli a5, a5, 8
+; RV32D-ILP32-NEXT:    addi a3, a3, 1
+; RV32D-ILP32-NEXT:    sb a5, 0(a3)
+; RV32D-ILP32-NEXT:    lw a3, 0(a0)
+; RV32D-ILP32-NEXT:    sw a6, 0(a4)
+; RV32D-ILP32-NEXT:    addi a4, sp, 44
+; RV32D-ILP32-NEXT:    sw a7, 0(a4)
+; RV32D-ILP32-NEXT:    addi a3, a3, 7
+; RV32D-ILP32-NEXT:    andi a4, a3, -8
+; RV32D-ILP32-NEXT:    fld fa5, 0(a4)
+; RV32D-ILP32-NEXT:    addi a3, a3, 8
+; RV32D-ILP32-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32-NEXT:    fsd fa5, 8(sp)
+; RV32D-ILP32-NEXT:    lw a3, 8(sp)
+; RV32D-ILP32-NEXT:    lw a4, 12(sp)
+; RV32D-ILP32-NEXT:    add a0, a1, a3
+; RV32D-ILP32-NEXT:    sltu a1, a0, a3
+; RV32D-ILP32-NEXT:    add a2, a2, a4
+; RV32D-ILP32-NEXT:    add a1, a2, a1
+; RV32D-ILP32-NEXT:    addi sp, sp, 48
+; RV32D-ILP32-NEXT:    ret
+;
+; RV32D-ILP32F-LABEL: va3:
+; RV32D-ILP32F:       # %bb.0:
+; RV32D-ILP32F-NEXT:    addi sp, sp, -48
+; RV32D-ILP32F-NEXT:    addi a0, sp, 28
+; RV32D-ILP32F-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 32
+; RV32D-ILP32F-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 36
+; RV32D-ILP32F-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 20
+; RV32D-ILP32F-NEXT:    addi a3, sp, 28
+; RV32D-ILP32F-NEXT:    lui a4, 16
+; RV32D-ILP32F-NEXT:    addi a4, a4, -1
+; RV32D-ILP32F-NEXT:    and a4, a0, a4
+; RV32D-ILP32F-NEXT:    srli a4, a4, 8
+; RV32D-ILP32F-NEXT:    addi a5, a3, 1
+; RV32D-ILP32F-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32F-NEXT:    addi a4, sp, 40
+; RV32D-ILP32F-NEXT:    srli a5, a0, 16
+; RV32D-ILP32F-NEXT:    sb a0, 0(a3)
+; RV32D-ILP32F-NEXT:    addi a3, a3, 2
+; RV32D-ILP32F-NEXT:    sb a5, 0(a3)
+; RV32D-ILP32F-NEXT:    srli a5, a5, 8
+; RV32D-ILP32F-NEXT:    addi a3, a3, 1
+; RV32D-ILP32F-NEXT:    sb a5, 0(a3)
+; RV32D-ILP32F-NEXT:    lw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    sw a6, 0(a4)
+; RV32D-ILP32F-NEXT:    addi a4, sp, 44
+; RV32D-ILP32F-NEXT:    sw a7, 0(a4)
+; RV32D-ILP32F-NEXT:    addi a3, a3, 7
+; RV32D-ILP32F-NEXT:    andi a4, a3, -8
+; RV32D-ILP32F-NEXT:    fld fa5, 0(a4)
+; RV32D-ILP32F-NEXT:    addi a3, a3, 8
+; RV32D-ILP32F-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    fsd fa5, 8(sp)
+; RV32D-ILP32F-NEXT:    lw a3, 8(sp)
+; RV32D-ILP32F-NEXT:    lw a4, 12(sp)
+; RV32D-ILP32F-NEXT:    add a0, a1, a3
+; RV32D-ILP32F-NEXT:    sltu a1, a0, a3
+; RV32D-ILP32F-NEXT:    add a2, a2, a4
+; RV32D-ILP32F-NEXT:    add a1, a2, a1
+; RV32D-ILP32F-NEXT:    addi sp, sp, 48
+; RV32D-ILP32F-NEXT:    ret
+;
+; RV32D-ILP32D-LABEL: va3:
+; RV32D-ILP32D:       # %bb.0:
+; RV32D-ILP32D-NEXT:    addi sp, sp, -48
+; RV32D-ILP32D-NEXT:    addi a0, sp, 28
+; RV32D-ILP32D-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 32
+; RV32D-ILP32D-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 36
+; RV32D-ILP32D-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 20
+; RV32D-ILP32D-NEXT:    addi a3, sp, 28
+; RV32D-ILP32D-NEXT:    lui a4, 16
+; RV32D-ILP32D-NEXT:    addi a4, a4, -1
+; RV32D-ILP32D-NEXT:    and a4, a0, a4
+; RV32D-ILP32D-NEXT:    srli a4, a4, 8
+; RV32D-ILP32D-NEXT:    addi a5, a3, 1
+; RV32D-ILP32D-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32D-NEXT:    addi a4, sp, 40
+; RV32D-ILP32D-NEXT:    srli a5, a0, 16
+; RV32D-ILP32D-NEXT:    sb a0, 0(a3)
+; RV32D-ILP32D-NEXT:    addi a3, a3, 2
+; RV32D-ILP32D-NEXT:    sb a5, 0(a3)
+; RV32D-ILP32D-NEXT:    srli a5, a5, 8
+; RV32D-ILP32D-NEXT:    addi a3, a3, 1
+; RV32D-ILP32D-NEXT:    sb a5, 0(a3)
+; RV32D-ILP32D-NEXT:    lw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    sw a6, 0(a4)
+; RV32D-ILP32D-NEXT:    addi a4, sp, 44
+; RV32D-ILP32D-NEXT:    sw a7, 0(a4)
+; RV32D-ILP32D-NEXT:    addi a3, a3, 7
+; RV32D-ILP32D-NEXT:    andi a4, a3, -8
+; RV32D-ILP32D-NEXT:    fld fa5, 0(a4)
+; RV32D-ILP32D-NEXT:    addi a3, a3, 8
+; RV32D-ILP32D-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    fsd fa5, 8(sp)
+; RV32D-ILP32D-NEXT:    lw a3, 8(sp)
+; RV32D-ILP32D-NEXT:    lw a4, 12(sp)
+; RV32D-ILP32D-NEXT:    add a0, a1, a3
+; RV32D-ILP32D-NEXT:    sltu a1, a0, a3
+; RV32D-ILP32D-NEXT:    add a2, a2, a4
+; RV32D-ILP32D-NEXT:    add a1, a2, a1
+; RV32D-ILP32D-NEXT:    addi sp, sp, 48
+; RV32D-ILP32D-NEXT:    ret
+;
+; RV64-LABEL: va3:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    srli a3, a0, 32
+; RV64-NEXT:    addi a4, a2, 4
+; RV64-NEXT:    srliw a5, a0, 16
+; RV64-NEXT:    addi a6, a2, 2
+; RV64-NEXT:    lui a7, 16
+; RV64-NEXT:    addi a7, a7, -1
+; RV64-NEXT:    and t0, a0, a7
+; RV64-NEXT:    srliw t0, t0, 8
+; RV64-NEXT:    addi t1, a2, 1
+; RV64-NEXT:    sb a0, 0(a2)
+; RV64-NEXT:    sb t0, 0(t1)
+; RV64-NEXT:    srliw a2, a5, 8
+; RV64-NEXT:    addi t0, a6, 1
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    sb a2, 0(t0)
+; RV64-NEXT:    srliw a2, a3, 16
+; RV64-NEXT:    addi a5, a4, 2
+; RV64-NEXT:    and a6, a3, a7
+; RV64-NEXT:    srliw a6, a6, 8
+; RV64-NEXT:    addi a7, a4, 1
+; RV64-NEXT:    sb a3, 0(a4)
+; RV64-NEXT:    sb a6, 0(a7)
+; RV64-NEXT:    srliw a3, a2, 8
+; RV64-NEXT:    lw a4, 0(a0)
+; RV64-NEXT:    addi a6, a5, 1
+; RV64-NEXT:    sb a2, 0(a5)
+; RV64-NEXT:    sb a3, 0(a6)
+; RV64-NEXT:    addi a4, a4, 7
+; RV64-NEXT:    andi a2, a4, -8
+; RV64-NEXT:    slli a4, a4, 32
+; RV64-NEXT:    srli a4, a4, 32
+; RV64-NEXT:    addi a4, a4, 8
+; RV64-NEXT:    srli a3, a4, 32
+; RV64-NEXT:    addi a5, a0, 4
+; RV64-NEXT:    sw a4, 0(a0)
+; RV64-NEXT:    sw a3, 0(a5)
+; RV64-NEXT:    slli a2, a2, 32
+; RV64-NEXT:    srli a2, a2, 32
+; RV64-NEXT:    ld a0, 0(a2)
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load i32, ptr %va, align 4
+  %1 = add i32 %argp.cur, 7
+  %2 = and i32 %1, -8
+  %argp.cur.aligned = inttoptr i32 %1 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %va, align 4
+  %3 = inttoptr i32 %2 to ptr
+  %4 = load double, ptr %3, align 8
+  call void @llvm.va_end(ptr %va)
+  %5 = bitcast double %4 to i64
+  %6 = add i64 %b, %5
+  ret i64 %6
+}
+
+define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
+; ILP32-LABEL: va3_va_arg:
+; ILP32:       # %bb.0:
+; ILP32-NEXT:    addi sp, sp, -32
+; ILP32-NEXT:    addi a0, sp, 12
+; ILP32-NEXT:    sw a3, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 16
+; ILP32-NEXT:    sw a4, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 20
+; ILP32-NEXT:    sw a5, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 24
+; ILP32-NEXT:    sw a6, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 28
+; ILP32-NEXT:    sw a7, 0(a0)
+; ILP32-NEXT:    addi a0, sp, 4
+; ILP32-NEXT:    addi a3, sp, 12
+; ILP32-NEXT:    srli a4, a0, 16
+; ILP32-NEXT:    addi a5, a3, 2
+; ILP32-NEXT:    lui a6, 16
+; ILP32-NEXT:    addi a6, a6, -1
+; ILP32-NEXT:    and a6, a0, a6
+; ILP32-NEXT:    srli a6, a6, 8
+; ILP32-NEXT:    addi a7, a3, 1
+; ILP32-NEXT:    sb a0, 0(a3)
+; ILP32-NEXT:    sb a6, 0(a7)
+; ILP32-NEXT:    srli a3, a4, 8
+; ILP32-NEXT:    addi a6, a5, 1
+; ILP32-NEXT:    sb a4, 0(a5)
+; ILP32-NEXT:    sb a3, 0(a6)
+; ILP32-NEXT:    lw a3, 0(a0)
+; ILP32-NEXT:    addi a3, a3, 7
+; ILP32-NEXT:    andi a3, a3, -8
+; ILP32-NEXT:    addi a4, a3, 8
+; ILP32-NEXT:    sw a4, 0(a0)
+; ILP32-NEXT:    lw a4, 0(a3)
+; ILP32-NEXT:    addi a3, a3, 4
+; ILP32-NEXT:    lw a3, 0(a3)
+; ILP32-NEXT:    add a0, a1, a4
+; ILP32-NEXT:    sltu a1, a0, a4
+; ILP32-NEXT:    add a2, a2, a3
+; ILP32-NEXT:    add a1, a2, a1
+; ILP32-NEXT:    addi sp, sp, 32
+; ILP32-NEXT:    ret
+;
+; RV32D-ILP32-LABEL: va3_va_arg:
+; RV32D-ILP32:       # %bb.0:
+; RV32D-ILP32-NEXT:    addi sp, sp, -48
+; RV32D-ILP32-NEXT:    addi a0, sp, 28
+; RV32D-ILP32-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 32
+; RV32D-ILP32-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 36
+; RV32D-ILP32-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 40
+; RV32D-ILP32-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 44
+; RV32D-ILP32-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32-NEXT:    addi a0, sp, 20
+; RV32D-ILP32-NEXT:    addi a3, sp, 28
+; RV32D-ILP32-NEXT:    srli a4, a0, 16
+; RV32D-ILP32-NEXT:    addi a5, a3, 2
+; RV32D-ILP32-NEXT:    lui a6, 16
+; RV32D-ILP32-NEXT:    addi a6, a6, -1
+; RV32D-ILP32-NEXT:    and a6, a0, a6
+; RV32D-ILP32-NEXT:    srli a6, a6, 8
+; RV32D-ILP32-NEXT:    addi a7, a3, 1
+; RV32D-ILP32-NEXT:    sb a0, 0(a3)
+; RV32D-ILP32-NEXT:    sb a6, 0(a7)
+; RV32D-ILP32-NEXT:    srli a3, a4, 8
+; RV32D-ILP32-NEXT:    addi a6, a5, 1
+; RV32D-ILP32-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32-NEXT:    sb a3, 0(a6)
+; RV32D-ILP32-NEXT:    lw a3, 0(a0)
+; RV32D-ILP32-NEXT:    addi a3, a3, 7
+; RV32D-ILP32-NEXT:    andi a3, a3, -8
+; RV32D-ILP32-NEXT:    addi a4, a3, 8
+; RV32D-ILP32-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32-NEXT:    fld fa5, 0(a3)
+; RV32D-ILP32-NEXT:    fsd fa5, 8(sp)
+; RV32D-ILP32-NEXT:    lw a3, 8(sp)
+; RV32D-ILP32-NEXT:    lw a4, 12(sp)
+; RV32D-ILP32-NEXT:    add a0, a1, a3
+; RV32D-ILP32-NEXT:    sltu a1, a0, a3
+; RV32D-ILP32-NEXT:    add a2, a2, a4
+; RV32D-ILP32-NEXT:    add a1, a2, a1
+; RV32D-ILP32-NEXT:    addi sp, sp, 48
+; RV32D-ILP32-NEXT:    ret
+;
+; RV32D-ILP32F-LABEL: va3_va_arg:
+; RV32D-ILP32F:       # %bb.0:
+; RV32D-ILP32F-NEXT:    addi sp, sp, -48
+; RV32D-ILP32F-NEXT:    addi a0, sp, 28
+; RV32D-ILP32F-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 32
+; RV32D-ILP32F-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 36
+; RV32D-ILP32F-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 40
+; RV32D-ILP32F-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 44
+; RV32D-ILP32F-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a0, sp, 20
+; RV32D-ILP32F-NEXT:    addi a3, sp, 28
+; RV32D-ILP32F-NEXT:    srli a4, a0, 16
+; RV32D-ILP32F-NEXT:    addi a5, a3, 2
+; RV32D-ILP32F-NEXT:    lui a6, 16
+; RV32D-ILP32F-NEXT:    addi a6, a6, -1
+; RV32D-ILP32F-NEXT:    and a6, a0, a6
+; RV32D-ILP32F-NEXT:    srli a6, a6, 8
+; RV32D-ILP32F-NEXT:    addi a7, a3, 1
+; RV32D-ILP32F-NEXT:    sb a0, 0(a3)
+; RV32D-ILP32F-NEXT:    sb a6, 0(a7)
+; RV32D-ILP32F-NEXT:    srli a3, a4, 8
+; RV32D-ILP32F-NEXT:    addi a6, a5, 1
+; RV32D-ILP32F-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32F-NEXT:    sb a3, 0(a6)
+; RV32D-ILP32F-NEXT:    lw a3, 0(a0)
+; RV32D-ILP32F-NEXT:    addi a3, a3, 7
+; RV32D-ILP32F-NEXT:    andi a3, a3, -8
+; RV32D-ILP32F-NEXT:    addi a4, a3, 8
+; RV32D-ILP32F-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32F-NEXT:    fld fa5, 0(a3)
+; RV32D-ILP32F-NEXT:    fsd fa5, 8(sp)
+; RV32D-ILP32F-NEXT:    lw a3, 8(sp)
+; RV32D-ILP32F-NEXT:    lw a4, 12(sp)
+; RV32D-ILP32F-NEXT:    add a0, a1, a3
+; RV32D-ILP32F-NEXT:    sltu a1, a0, a3
+; RV32D-ILP32F-NEXT:    add a2, a2, a4
+; RV32D-ILP32F-NEXT:    add a1, a2, a1
+; RV32D-ILP32F-NEXT:    addi sp, sp, 48
+; RV32D-ILP32F-NEXT:    ret
+;
+; RV32D-ILP32D-LABEL: va3_va_arg:
+; RV32D-ILP32D:       # %bb.0:
+; RV32D-ILP32D-NEXT:    addi sp, sp, -48
+; RV32D-ILP32D-NEXT:    addi a0, sp, 28
+; RV32D-ILP32D-NEXT:    sw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 32
+; RV32D-ILP32D-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 36
+; RV32D-ILP32D-NEXT:    sw a5, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 40
+; RV32D-ILP32D-NEXT:    sw a6, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 44
+; RV32D-ILP32D-NEXT:    sw a7, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a0, sp, 20
+; RV32D-ILP32D-NEXT:    addi a3, sp, 28
+; RV32D-ILP32D-NEXT:    srli a4, a0, 16
+; RV32D-ILP32D-NEXT:    addi a5, a3, 2
+; RV32D-ILP32D-NEXT:    lui a6, 16
+; RV32D-ILP32D-NEXT:    addi a6, a6, -1
+; RV32D-ILP32D-NEXT:    and a6, a0, a6
+; RV32D-ILP32D-NEXT:    srli a6, a6, 8
+; RV32D-ILP32D-NEXT:    addi a7, a3, 1
+; RV32D-ILP32D-NEXT:    sb a0, 0(a3)
+; RV32D-ILP32D-NEXT:    sb a6, 0(a7)
+; RV32D-ILP32D-NEXT:    srli a3, a4, 8
+; RV32D-ILP32D-NEXT:    addi a6, a5, 1
+; RV32D-ILP32D-NEXT:    sb a4, 0(a5)
+; RV32D-ILP32D-NEXT:    sb a3, 0(a6)
+; RV32D-ILP32D-NEXT:    lw a3, 0(a0)
+; RV32D-ILP32D-NEXT:    addi a3, a3, 7
+; RV32D-ILP32D-NEXT:    andi a3, a3, -8
+; RV32D-ILP32D-NEXT:    addi a4, a3, 8
+; RV32D-ILP32D-NEXT:    sw a4, 0(a0)
+; RV32D-ILP32D-NEXT:    fld fa5, 0(a3)
+; RV32D-ILP32D-NEXT:    fsd fa5, 8(sp)
+; RV32D-ILP32D-NEXT:    lw a3, 8(sp)
+; RV32D-ILP32D-NEXT:    lw a4, 12(sp)
+; RV32D-ILP32D-NEXT:    add a0, a1, a3
+; RV32D-ILP32D-NEXT:    sltu a1, a0, a3
+; RV32D-ILP32D-NEXT:    add a2, a2, a4
+; RV32D-ILP32D-NEXT:    add a1, a2, a1
+; RV32D-ILP32D-NEXT:    addi sp, sp, 48
+; RV32D-ILP32D-NEXT:    ret
+;
+; RV64-LABEL: va3_va_arg:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    srli a3, a0, 32
+; RV64-NEXT:    addi a4, a2, 4
+; RV64-NEXT:    srliw a5, a0, 16
+; RV64-NEXT:    addi a6, a2, 2
+; RV64-NEXT:    lui a7, 16
+; RV64-NEXT:    addi a7, a7, -1
+; RV64-NEXT:    and t0, a0, a7
+; RV64-NEXT:    srliw t0, t0, 8
+; RV64-NEXT:    addi t1, a2, 1
+; RV64-NEXT:    sb a0, 0(a2)
+; RV64-NEXT:    sb t0, 0(t1)
+; RV64-NEXT:    srliw a2, a5, 8
+; RV64-NEXT:    addi t0, a6, 1
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    sb a2, 0(t0)
+; RV64-NEXT:    srliw a2, a3, 16
+; RV64-NEXT:    addi a5, a4, 2
+; RV64-NEXT:    and a6, a3, a7
+; RV64-NEXT:    srliw a6, a6, 8
+; RV64-NEXT:    addi a7, a4, 1
+; RV64-NEXT:    sb a3, 0(a4)
+; RV64-NEXT:    sb a6, 0(a7)
+; RV64-NEXT:    srliw a3, a2, 8
+; RV64-NEXT:    addi a4, a5, 1
+; RV64-NEXT:    sb a2, 0(a5)
+; RV64-NEXT:    sb a3, 0(a4)
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    addi a2, a2, 7
+; RV64-NEXT:    andi a2, a2, -8
+; RV64-NEXT:    addi a3, a2, 8
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    ld a0, 0(a2)
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr %va)
+  %2 = bitcast double %1 to i64
+  %3 = add i64 %b, %2
+  ret i64 %3
+}
+
+define void @va3_caller() nounwind {
+; RV32-LABEL: va3_caller:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a3, a0, -480
+; RV32-NEXT:    li a0, 2
+; RV32-NEXT:    li a1, 1111
+; RV32-NEXT:    li a2, 0
+; RV32-NEXT:    call va3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va3_caller:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a2, a0, -480
+; RV64-NEXT:    li a0, 2
+; RV64-NEXT:    li a1, 1111
+; RV64-NEXT:    call va3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+ %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
+ ret void
+}
+
+declare void @llvm.va_copy(ptr, ptr)
+
+define i32 @va4_va_copy(i32 %argno, ...) nounwind {
+; RV32-LABEL: va4_va_copy:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi a0, sp, 36
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    addi a0, sp, 40
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    addi a0, sp, 44
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    addi a0, sp, 48
+; RV32-NEXT:    sw a4, 0(a0)
+; RV32-NEXT:    addi a0, sp, 52
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    addi a0, sp, 56
+; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    addi a0, sp, 60
+; RV32-NEXT:    sw a7, 0(a0)
+; RV32-NEXT:    addi s0, sp, 16
+; RV32-NEXT:    addi a0, sp, 36
+; RV32-NEXT:    srli a1, s0, 16
+; RV32-NEXT:    addi a2, a0, 2
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -1
+; RV32-NEXT:    and a3, s0, a3
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb s0, 0(a0)
+; RV32-NEXT:    sb a3, 0(a4)
+; RV32-NEXT:    srli a0, a1, 8
+; RV32-NEXT:    addi a3, a2, 1
+; RV32-NEXT:    sb a1, 0(a2)
+; RV32-NEXT:    sb a0, 0(a3)
+; RV32-NEXT:    lw a0, 0(s0)
+; RV32-NEXT:    addi a0, a0, 3
+; RV32-NEXT:    andi a0, a0, -4
+; RV32-NEXT:    addi a1, a0, 4
+; RV32-NEXT:    sw a1, 0(s0)
+; RV32-NEXT:    lw a1, 0(s0)
+; RV32-NEXT:    addi a2, sp, 12
+; RV32-NEXT:    lw s1, 0(a0)
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    lw a0, 0(a2)
+; RV32-NEXT:    call notdead at plt
+; RV32-NEXT:    lw a0, 0(s0)
+; RV32-NEXT:    addi a0, a0, 3
+; RV32-NEXT:    andi a0, a0, -4
+; RV32-NEXT:    addi a1, a0, 4
+; RV32-NEXT:    sw a1, 0(s0)
+; RV32-NEXT:    lw a1, 0(s0)
+; RV32-NEXT:    lw a0, 0(a0)
+; RV32-NEXT:    addi a1, a1, 3
+; RV32-NEXT:    andi a1, a1, -4
+; RV32-NEXT:    addi a2, a1, 4
+; RV32-NEXT:    sw a2, 0(s0)
+; RV32-NEXT:    lw a2, 0(s0)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    addi a2, a2, 3
+; RV32-NEXT:    andi a2, a2, -4
+; RV32-NEXT:    addi a3, a2, 4
+; RV32-NEXT:    sw a3, 0(s0)
+; RV32-NEXT:    lw a2, 0(a2)
+; RV32-NEXT:    add a0, a0, s1
+; RV32-NEXT:    add a1, a1, a2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va4_va_copy:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -112
+; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 72
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 80
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 88
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 96
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 104
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi s0, sp, 16
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    srli a1, s0, 32
+; RV64-NEXT:    addi a2, a0, 4
+; RV64-NEXT:    srliw a3, s0, 16
+; RV64-NEXT:    addi a4, a0, 2
+; RV64-NEXT:    lui a5, 16
+; RV64-NEXT:    addi a5, a5, -1
+; RV64-NEXT:    and a6, s0, a5
+; RV64-NEXT:    srliw a6, a6, 8
+; RV64-NEXT:    addi a7, a0, 1
+; RV64-NEXT:    sb s0, 0(a0)
+; RV64-NEXT:    sb a6, 0(a7)
+; RV64-NEXT:    srliw a0, a3, 8
+; RV64-NEXT:    addi a6, a4, 1
+; RV64-NEXT:    sb a3, 0(a4)
+; RV64-NEXT:    sb a0, 0(a6)
+; RV64-NEXT:    srliw a0, a1, 16
+; RV64-NEXT:    addi a3, a2, 2
+; RV64-NEXT:    and a5, a1, a5
+; RV64-NEXT:    srliw a4, a5, 8
+; RV64-NEXT:    addi a5, a2, 1
+; RV64-NEXT:    sb a1, 0(a2)
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    srliw a1, a0, 8
+; RV64-NEXT:    addi a2, a3, 1
+; RV64-NEXT:    sb a0, 0(a3)
+; RV64-NEXT:    sb a1, 0(a2)
+; RV64-NEXT:    ld a0, 0(s0)
+; RV64-NEXT:    addi a0, a0, 3
+; RV64-NEXT:    andi a0, a0, -4
+; RV64-NEXT:    addi a1, a0, 4
+; RV64-NEXT:    sd a1, 0(s0)
+; RV64-NEXT:    ld a1, 0(s0)
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    lw s1, 0(a0)
+; RV64-NEXT:    sd a2, 0(a1)
+; RV64-NEXT:    addi a0, a2, 4
+; RV64-NEXT:    lw a0, 0(a0)
+; RV64-NEXT:    lwu a1, 0(a2)
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    call notdead at plt
+; RV64-NEXT:    ld a0, 0(s0)
+; RV64-NEXT:    addi a0, a0, 3
+; RV64-NEXT:    andi a0, a0, -4
+; RV64-NEXT:    addi a1, a0, 4
+; RV64-NEXT:    sd a1, 0(s0)
+; RV64-NEXT:    ld a1, 0(s0)
+; RV64-NEXT:    lw a0, 0(a0)
+; RV64-NEXT:    addi a1, a1, 3
+; RV64-NEXT:    andi a1, a1, -4
+; RV64-NEXT:    addi a2, a1, 4
+; RV64-NEXT:    sd a2, 0(s0)
+; RV64-NEXT:    ld a2, 0(s0)
+; RV64-NEXT:    lw a1, 0(a1)
+; RV64-NEXT:    addi a2, a2, 3
+; RV64-NEXT:    andi a2, a2, -4
+; RV64-NEXT:    addi a3, a2, 4
+; RV64-NEXT:    sd a3, 0(s0)
+; RV64-NEXT:    lw a2, 0(a2)
+; RV64-NEXT:    add a0, a0, s1
+; RV64-NEXT:    add a1, a1, a2
+; RV64-NEXT:    addw a0, a0, a1
+; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 112
+; RV64-NEXT:    ret
+  %vargs = alloca ptr
+  %wargs = alloca ptr
+  call void @llvm.va_start(ptr %vargs)
+  %1 = va_arg ptr %vargs, i32
+  call void @llvm.va_copy(ptr %wargs, ptr %vargs)
+  %2 = load ptr, ptr %wargs, align 4
+  call void @notdead(ptr %2)
+  %3 = va_arg ptr %vargs, i32
+  %4 = va_arg ptr %vargs, i32
+  %5 = va_arg ptr %vargs, i32
+  call void @llvm.va_end(ptr %vargs)
+  call void @llvm.va_end(ptr %wargs)
+  %add1 = add i32 %3, %1
+  %add2 = add i32 %add1, %4
+  %add3 = add i32 %add2, %5
+  ret i32 %add3
+}
+
+; A function with no fixed arguments is not valid C, but can be
+; specified in LLVM IR. We must ensure the vararg save area is
+; still set up correctly.
+
+define i32 @va6_no_fixed_args(...) nounwind {
+; RV32-LABEL: va6_no_fixed_args:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -48
+; RV32-NEXT:    addi t0, sp, 16
+; RV32-NEXT:    sw a0, 0(t0)
+; RV32-NEXT:    addi a0, sp, 20
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    addi a0, sp, 24
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    addi a0, sp, 28
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    addi a0, sp, 32
+; RV32-NEXT:    sw a4, 0(a0)
+; RV32-NEXT:    addi a0, sp, 36
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    addi a0, sp, 40
+; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    addi a0, sp, 44
+; RV32-NEXT:    sw a7, 0(a0)
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    addi a3, a1, 2
+; RV32-NEXT:    lui a4, 16
+; RV32-NEXT:    addi a4, a4, -1
+; RV32-NEXT:    and a4, a0, a4
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    addi a5, a1, 1
+; RV32-NEXT:    sb a0, 0(a1)
+; RV32-NEXT:    sb a4, 0(a5)
+; RV32-NEXT:    srli a1, a2, 8
+; RV32-NEXT:    addi a4, a3, 1
+; RV32-NEXT:    sb a2, 0(a3)
+; RV32-NEXT:    sb a1, 0(a4)
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    addi a1, a1, 3
+; RV32-NEXT:    andi a1, a1, -4
+; RV32-NEXT:    addi a2, a1, 4
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    lw a0, 0(a1)
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va6_no_fixed_args:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    addi t0, sp, 16
+; RV64-NEXT:    sd a0, 0(t0)
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a0, sp, 32
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, sp, 40
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    addi a0, sp, 48
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    addi a0, sp, 56
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 72
+; RV64-NEXT:    sd a7, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and a7, a0, a6
+; RV64-NEXT:    srliw a7, a7, 8
+; RV64-NEXT:    addi t0, a1, 1
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    sb a7, 0(t0)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi a7, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(a7)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    addi a3, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    ld a1, 0(a0)
+; RV64-NEXT:    addi a1, a1, 3
+; RV64-NEXT:    andi a1, a1, -4
+; RV64-NEXT:    addi a2, a1, 4
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    lw a0, 0(a1)
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+; TODO: improve constant materialization of stack addresses
+
+define i32 @va_large_stack(ptr %fmt, ...) {
+; RV32-LABEL: va_large_stack:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 304
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 100000048
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 276
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 280
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 284
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 288
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    sw a4, 0(a0)
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 292
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 296
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a1, sp, 12
+; RV32-NEXT:    lui a2, 24414
+; RV32-NEXT:    addi a2, a2, 276
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    addi a4, a2, 2
+; RV32-NEXT:    lui a5, 16
+; RV32-NEXT:    addi a5, a5, -1
+; RV32-NEXT:    and a5, a1, a5
+; RV32-NEXT:    srli a5, a5, 8
+; RV32-NEXT:    sb a1, 0(a2)
+; RV32-NEXT:    addi a2, a2, 1
+; RV32-NEXT:    sb a5, 0(a2)
+; RV32-NEXT:    srli a2, a3, 8
+; RV32-NEXT:    addi a5, a4, 1
+; RV32-NEXT:    sb a3, 0(a4)
+; RV32-NEXT:    sb a2, 0(a5)
+; RV32-NEXT:    lw a2, 0(a1)
+; RV32-NEXT:    sw a6, 0(a0)
+; RV32-NEXT:    lui a0, 24414
+; RV32-NEXT:    addi a0, a0, 300
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    sw a7, 0(a0)
+; RV32-NEXT:    addi a0, a2, 4
+; RV32-NEXT:    sw a0, 0(a1)
+; RV32-NEXT:    lw a0, 0(a2)
+; RV32-NEXT:    lui a1, 24414
+; RV32-NEXT:    addi a1, a1, 304
+; RV32-NEXT:    add sp, sp, a1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: va_large_stack:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 336
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 100000080
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 280
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 288
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 296
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    sd a3, 0(a0)
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 304
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    sd a4, 0(a0)
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 312
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    sd a5, 0(a0)
+; RV64-NEXT:    lui a0, 24414
+; RV64-NEXT:    addiw a0, a0, 320
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    sd a6, 0(a0)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    lui a1, 24414
+; RV64-NEXT:    addiw a1, a1, 280
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    srli a2, a0, 32
+; RV64-NEXT:    addi a3, a1, 4
+; RV64-NEXT:    srliw a4, a0, 16
+; RV64-NEXT:    addi a5, a1, 2
+; RV64-NEXT:    lui a6, 16
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    and t0, a0, a6
+; RV64-NEXT:    srliw t0, t0, 8
+; RV64-NEXT:    sb a0, 0(a1)
+; RV64-NEXT:    addi a1, a1, 1
+; RV64-NEXT:    sb t0, 0(a1)
+; RV64-NEXT:    srliw a1, a4, 8
+; RV64-NEXT:    addi t0, a5, 1
+; RV64-NEXT:    sb a4, 0(a5)
+; RV64-NEXT:    sb a1, 0(t0)
+; RV64-NEXT:    srliw a1, a2, 16
+; RV64-NEXT:    addi a4, a3, 2
+; RV64-NEXT:    and a5, a2, a6
+; RV64-NEXT:    srliw a5, a5, 8
+; RV64-NEXT:    addi a6, a3, 1
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    sb a5, 0(a6)
+; RV64-NEXT:    srliw a2, a1, 8
+; RV64-NEXT:    addi a3, a4, 1
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    sb a2, 0(a3)
+; RV64-NEXT:    addi a1, a0, 4
+; RV64-NEXT:    lw a2, 0(a1)
+; RV64-NEXT:    lwu a3, 0(a0)
+; RV64-NEXT:    lui a4, 24414
+; RV64-NEXT:    addiw a4, a4, 328
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    sd a7, 0(a4)
+; RV64-NEXT:    slli a2, a2, 32
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    addi a3, a2, 4
+; RV64-NEXT:    srli a4, a3, 32
+; RV64-NEXT:    sw a3, 0(a0)
+; RV64-NEXT:    sw a4, 0(a1)
+; RV64-NEXT:    lw a0, 0(a2)
+; RV64-NEXT:    lui a1, 24414
+; RV64-NEXT:    addiw a1, a1, 336
+; RV64-NEXT:    add sp, sp, a1
+; RV64-NEXT:    ret
+  %large = alloca [ 100000000 x i8 ]
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va, align 4
+  %1 = load i32, ptr %argp.cur, align 4
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; LP64: {{.*}}
+; LP64D: {{.*}}
+; LP64F: {{.*}}

>From 80974e985bf95da2e547d536442c558cb3260e54 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 27 Nov 2023 16:53:52 -0800
Subject: [PATCH 8/9] use legalizeIntrinsic

---
 llvm/docs/GlobalISel/GenericOpcode.rst        | 11 ----
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |  1 -
 llvm/include/llvm/Support/TargetOpcodes.def   |  4 --
 llvm/include/llvm/Target/GenericOpcodes.td    |  7 ---
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |  6 ---
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    | 31 -----------
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 51 +++++++++++++++++--
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |  3 ++
 8 files changed, 51 insertions(+), 63 deletions(-)

diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 7d71fa313108693..6c42ddcaff1eccf 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -898,17 +898,6 @@ G_VAARG
 
   I found no documentation for this instruction at the time of writing.
 
-G_VACOPY
-^^^^^^^^
-
-In a target-dependent way, it copies the source va_list element into the
-destination va_list element. This opcode is necessary because the copy may be
-arbitrarily complex.
-
-.. code-block:: none
-
-  G_VACOPY %2(p0), %3(p0)
-
 Other Operations
 ----------------
 
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index a2a343144e82976..350c91ad6fa4f61 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -431,7 +431,6 @@ class LegalizerHelper {
   LegalizeResult lowerMemcpyInline(MachineInstr &MI);
   LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
   LegalizeResult lowerVAArg(MachineInstr &MI);
-  LegalizeResult lowerVACopy(MachineInstr &MI);
 };
 
 /// Helper function that creates a libcall to the given \p Name using the given
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 5c3da9e65c74060..16a747d23e73e2e 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -457,10 +457,6 @@ HANDLE_TARGET_OPCODE(G_VASTART)
 /// Generic va_arg instruction. Stores to its one pointer operand.
 HANDLE_TARGET_OPCODE(G_VAARG)
 
-/// Generic va_copy instruction. Copies the source element into the destination
-/// element.
-HANDLE_TARGET_OPCODE(G_VACOPY)
-
 // Generic sign extend
 HANDLE_TARGET_OPCODE(G_SEXT)
 HANDLE_TARGET_OPCODE(G_SEXT_INREG)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 3b26ab35fa509f2..9a9c09d3c20d612 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -155,13 +155,6 @@ def G_VASTART : GenericInstruction {
   let mayStore = true;
 }
 
-def G_VACOPY : GenericInstruction {
-  let OutOperandList = (outs);
-  let InOperandList = (ins type0:$dest, type0:$src);
-  let hasSideEffects = true;
-  let mayStore = true;
-}
-
 def G_VAARG : GenericInstruction {
   let OutOperandList = (outs type0:$val);
   let InOperandList = (ins type1:$list, unknown:$align);
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 8f898c7d500da20..62450e4c43ff3e6 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2075,12 +2075,6 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                                 ListSize, Align(1)));
     return true;
   }
-  case Intrinsic::vacopy: {
-    Register DstList = getOrCreateVReg(*CI.getArgOperand(0));
-    Register SrcList = getOrCreateVReg(*CI.getArgOperand(1));
-    MIRBuilder.buildInstr(TargetOpcode::G_VACOPY, {}, {DstList, SrcList});
-    return true;
-  }
   case Intrinsic::dbg_value: {
     // This form of DBG_VALUE is target-independent.
     const DbgValueInst &DI = cast<DbgValueInst>(CI);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 1c254883c7bc6da..310b71dca37bf3c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3782,8 +3782,6 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
     return lowerVectorReduction(MI);
   case G_VAARG:
     return lowerVAArg(MI);
-  case G_VACOPY:
-    return lowerVACopy(MI);
   }
 }
 
@@ -7934,35 +7932,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerVAArg(MachineInstr &MI) {
   return Legalized;
 }
 
-LegalizerHelper::LegalizeResult LegalizerHelper::lowerVACopy(MachineInstr &MI) {
-  MachineFunction &MF = *MI.getMF();
-  const DataLayout &DL = MIRBuilder.getDataLayout();
-  LLVMContext &Ctx = MF.getFunction().getContext();
-
-  Register DstLst = MI.getOperand(0).getReg();
-  LLT PtrTy = MRI.getType(DstLst);
-
-  // Load the source va_list
-  Align Alignment = Align(DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx)));
-  MachineMemOperand *LoadMMO =
-      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
-                              MachineMemOperand::MOLoad, PtrTy, Alignment);
-  Register Tmp = MRI.createGenericVirtualRegister(PtrTy);
-  Register SrcLst = MI.getOperand(1).getReg();
-  MIRBuilder.buildLoad(Tmp, SrcLst, *LoadMMO);
-
-  // Store the result in the destination va_list
-  MachineMemOperand *StoreMMO =
-      MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
-                              MachineMemOperand::MOStore, PtrTy, Alignment);
-  MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
-
-  Observer.changedInstr(MI);
-  Observer.erasingInstr(MI);
-  MI.eraseFromParent();
-  return Legalized;
-}
-
 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
   // On Darwin, -Os means optimize for size without hurting performance, so
   // only really optimize for size when -Oz (MinSize) is used.
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e1b4cdf486577cf..6fd78923775574b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -275,12 +275,57 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
   getActionDefinitionsBuilder(G_VAARG).lowerForCartesianProduct(
       {s8, s16, s32, s64, p0}, {p0});
 
-  // The va_list arguments must be a pointer
-  getActionDefinitionsBuilder(G_VACOPY).lowerFor({p0});
-
   getLegacyLegalizerInfo().computeTables();
 }
 
+static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
+  if (Ty.isVector())
+    return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
+                                Ty.getNumElements());
+  return IntegerType::get(C, Ty.getSizeInBits());
+}
+
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
+                                           MachineInstr &MI) const {
+  Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
+  switch (IntrinsicID) {
+  default:
+    return false;
+  case Intrinsic::vacopy: {
+    // vacopy arguments must be legal because of the intrinsic signature.
+    // No need to check here.
+
+    MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
+    MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
+    MachineFunction &MF = *MI.getMF();
+    const DataLayout &DL = MIRBuilder.getDataLayout();
+    LLVMContext &Ctx = MF.getFunction().getContext();
+
+    Register DstLst = MI.getOperand(0).getReg();
+    LLT PtrTy = MRI.getType(DstLst);
+
+    // Load the source va_list
+    Align Alignment = Align(DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx)));
+    MachineMemOperand *LoadMMO =
+        MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                                MachineMemOperand::MOLoad, PtrTy, Alignment);
+    Register Tmp = MRI.createGenericVirtualRegister(PtrTy);
+    Register SrcLst = MI.getOperand(1).getReg();
+    MIRBuilder.buildLoad(Tmp, SrcLst, *LoadMMO);
+
+    // Store the result in the destination va_list
+    MachineMemOperand *StoreMMO =
+        MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
+                                MachineMemOperand::MOStore, PtrTy, Alignment);
+    MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
+
+    MI.eraseFromParent();
+    return true;
+  }
+  }
+}
+
 bool RISCVLegalizerInfo::legalizeShlAshrLshr(
     MachineInstr &MI, MachineIRBuilder &MIRBuilder,
     GISelChangeObserver &Observer) const {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index daf6b2d13d1cd4f..1d038509aff1b2c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -28,6 +28,9 @@ class RISCVLegalizerInfo : public LegalizerInfo {
 
   bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override;
 
+  bool legalizeIntrinsic(LegalizerHelper &Helper,
+                         MachineInstr &MI) const override;
+
 private:
   bool legalizeShlAshrLshr(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
                            GISelChangeObserver &Observer) const;

>From a5642538a43e18e91320f6af690beb6aade8e404 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 29 Nov 2023 10:43:13 -0800
Subject: [PATCH 9/9] !fixup fix MPO and simplify builder calls

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 18 ++++++---------
 .../RISCV/GlobalISel/irtranslator/vacopy.ll   |  4 ++--
 .../GlobalISel/legalizer/legalize-vacopy.mir  | 23 +++++++++++++++++++
 3 files changed, 32 insertions(+), 13 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 6fd78923775574b..7cb2cdc602374b2 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -13,6 +13,7 @@
 #include "RISCVLegalizerInfo.h"
 #include "RISCVMachineFunctionInfo.h"
 #include "RISCVSubtarget.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -285,7 +286,6 @@ static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
   return IntegerType::get(C, Ty.getSizeInBits());
 }
 
-#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
                                            MachineInstr &MI) const {
   Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
@@ -302,22 +302,18 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     const DataLayout &DL = MIRBuilder.getDataLayout();
     LLVMContext &Ctx = MF.getFunction().getContext();
 
-    Register DstLst = MI.getOperand(0).getReg();
+    Register DstLst = MI.getOperand(1).getReg();
     LLT PtrTy = MRI.getType(DstLst);
 
     // Load the source va_list
     Align Alignment = Align(DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx)));
-    MachineMemOperand *LoadMMO =
-        MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
-                                MachineMemOperand::MOLoad, PtrTy, Alignment);
-    Register Tmp = MRI.createGenericVirtualRegister(PtrTy);
-    Register SrcLst = MI.getOperand(1).getReg();
-    MIRBuilder.buildLoad(Tmp, SrcLst, *LoadMMO);
+    MachineMemOperand *LoadMMO = MF.getMachineMemOperand(
+        MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, Alignment);
+    auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
 
     // Store the result in the destination va_list
-    MachineMemOperand *StoreMMO =
-        MF.getMachineMemOperand(MachinePointerInfo::getUnknownStack(MF),
-                                MachineMemOperand::MOStore, PtrTy, Alignment);
+    MachineMemOperand *StoreMMO = MF.getMachineMemOperand(
+        MachinePointerInfo(), MachineMemOperand::MOStore, PtrTy, Alignment);
     MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
 
     MI.eraseFromParent();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll
index 1fdd2e1cdc7650b..48d72108335e469 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll
@@ -12,7 +12,7 @@ define void @test_va_copy(ptr %dest_list, ptr %src_list) {
   ; RV32I-NEXT: {{  $}}
   ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
-  ; RV32I-NEXT:   G_VACOPY [[COPY]](p0), [[COPY1]]
+  ; RV32I-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[COPY]](p0), [[COPY1]](p0)
   ; RV32I-NEXT:   PseudoRET
   ;
   ; RV64I-LABEL: name: test_va_copy
@@ -21,7 +21,7 @@ define void @test_va_copy(ptr %dest_list, ptr %src_list) {
   ; RV64I-NEXT: {{  $}}
   ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
-  ; RV64I-NEXT:   G_VACOPY [[COPY]](p0), [[COPY1]]
+  ; RV64I-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[COPY]](p0), [[COPY1]](p0)
   ; RV64I-NEXT:   PseudoRET
   call void @llvm.va_copy(ptr %dest_list, ptr %src_list)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
new file mode 100644
index 000000000000000..f9eda1252937e87
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
@@ -0,0 +1,23 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name:            test_va_copy
+body:             |
+  bb.1:
+    liveins: $x10, $x11
+
+    ; CHECK-LABEL: name: test_va_copy
+    ; CHECK: liveins: $x10, $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY1]](p0) :: (load (p0))
+    ; CHECK-NEXT: G_STORE [[COPY]](p0), [[LOAD]](p0) :: (store (p0))
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(p0) = COPY $x11
+    G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), %0(p0), %1(p0)
+    PseudoRET
+...



More information about the llvm-commits mailing list