[llvm] [RISCV] Allocate the varargs GPR save area as a single object. (PR #74354)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 10:29:33 PST 2023


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/74354

Previously we allocated one object for each GPR. We also allocated the same offset twice, once to save for VASTART and then again for the first register in the save loop.

This patch uses a single object for all the registers and shares this with VASTART. This is more consistent with other targets like AArch64 and ARM.

I've removed the setValue(nullptr) from the memory operand now. Having a single object makes me a lot more comfortable about alias analysis being able to see what is going on. This led to the scheduling changes in push-pop-popret.ll and vararg.ll.

>From 71218d9f100da6b51adfb9f9910873d1b0b08ed4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 1 Dec 2023 17:22:56 -0800
Subject: [PATCH] [RISCV] Allocate the varargs GPR save area as a single
 object.

Previously we allocated one object for each GPR. We also allocated
the same offset twice, once to save for VASTART and then again for
the first register in the save loop.

This patch uses a single object for all the registers and shares
this with VASTART. This is more consistent with other targets like
AArch64 and ARM.

I've removed the setValue(nullptr) from the memory operand now.
Having a single object makes me a lot more comfortable about alias
analysis being able to see what is going on. This led to the scheduling
changes in push-pop-popret.ll and vararg.ll.
---
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  |  83 +++---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  64 ++---
 .../irtranslator/lower-args-vararg.ll         | 252 ++++++++++--------
 llvm/test/CodeGen/RISCV/push-pop-popret.ll    |  24 +-
 llvm/test/CodeGen/RISCV/vararg.ll             |  32 +--
 5 files changed, 243 insertions(+), 212 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 9e96fba069c4e..d3357b70e5143 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -436,60 +436,63 @@ void RISCVCallLowering::saveVarArgRegisters(
   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
   unsigned XLenInBytes = Subtarget.getXLen() / 8;
   ArrayRef<MCPhysReg> ArgRegs(ArgGPRs);
+  MachineRegisterInfo &MRI = MF.getRegInfo();
   unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
 
-  // Offset of the first variable argument from stack pointer, and size of
-  // the vararg save area. For now, the varargs save area is either zero or
-  // large enough to hold a0-a7.
-  int VaArgOffset;
+  // Size of the vararg save area. For now, the varargs save area is either
+  // zero or large enough to hold a0-a7.
   int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+  int FI;
 
   // If all registers are allocated, then all varargs must be passed on the
   // stack and we don't need to save any argregs.
   if (VarArgsSaveSize == 0) {
-    VaArgOffset = Assigner.StackSize;
+    int VaArgOffset = Assigner.StackSize;
+    FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
   } else {
-    VaArgOffset = -VarArgsSaveSize;
+    int VaArgOffset = -VarArgsSaveSize;
+    FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
+
+    // If saving an odd number of registers then create an extra stack slot to
+    // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+    // offsets to even-numbered registered remain 2*XLEN-aligned.
+    if (Idx % 2) {
+      MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
+      VarArgsSaveSize += XLenInBytes;
+    }
+
+    const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(),
+                                Subtarget.getXLen());
+    const LLT sXLen = LLT::scalar(Subtarget.getXLen());
+
+    auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
+    auto Offset = MIRBuilder.buildConstant(
+        MRI.createGenericVirtualRegister(sXLen), XLenInBytes);
+
+    // Copy the integer registers that may have been used for passing varargs
+    // to the vararg save area.
+    const MVT XLenVT = Subtarget.getXLenVT();
+    for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
+      const Register VReg = MRI.createGenericVirtualRegister(sXLen);
+      Handler.assignValueToReg(
+          VReg, ArgRegs[I],
+          CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
+                              ArgRegs[I], XLenVT, CCValAssign::Full));
+      auto MPO =
+          MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes);
+      auto Store =
+          MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+      FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
+                                   FIN.getReg(0), Offset);
+    }
   }
 
   // Record the frame index of the first variable argument which is a value
   // necessary to G_VASTART.
-  MachineFrameInfo &MFI = MF.getFrameInfo();
-  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
-  RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
   RVFI->setVarArgsFrameIndex(FI);
-
-  // If saving an odd number of registers then create an extra stack slot to
-  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
-  // offsets to even-numbered registered remain 2*XLEN-aligned.
-  if (Idx % 2) {
-    MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
-    VarArgsSaveSize += XLenInBytes;
-  }
   RVFI->setVarArgsSaveSize(VarArgsSaveSize);
-
-  // Copy the integer registers that may have been used for passing varargs
-  // to the vararg save area.
-  const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(),
-                              Subtarget.getXLen());
-  const LLT sXLen = LLT::scalar(Subtarget.getXLen());
-  const MVT XLenVT = Subtarget.getXLenVT();
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) {
-    const Register VReg = MRI.createGenericVirtualRegister(sXLen);
-    Handler.assignValueToReg(
-        VReg, ArgRegs[I],
-        CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
-                            ArgRegs[I], XLenVT, CCValAssign::Full));
-    FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
-    auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
-    auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
-    auto Store =
-        MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
-    // This was taken from  SelectionDAG, but we are not sure why it exists.
-    // It is being investigated in github.com/llvm/llvm-project/issues/73735.
-    Store->memoperands()[0]->setValue((Value *)nullptr);
-  }
 }
 
 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cf1b11c14b6d0..97cc92bcb125f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17691,49 +17691,49 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
     MachineRegisterInfo &RegInfo = MF.getRegInfo();
     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
 
-    // Offset of the first variable argument from stack pointer, and size of
-    // the vararg save area. For now, the varargs save area is either zero or
-    // large enough to hold a0-a7.
-    int VaArgOffset;
+    // Size of the vararg save area. For now, the varargs save area is either
+    // zero or large enough to hold a0-a7.
     int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+    int FI;
 
     // If all registers are allocated, then all varargs must be passed on the
     // stack and we don't need to save any argregs.
     if (VarArgsSaveSize == 0) {
-      VaArgOffset = CCInfo.getStackSize();
+      int VaArgOffset = CCInfo.getStackSize();
+      FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
     } else {
-      VaArgOffset = -VarArgsSaveSize;
+      int VaArgOffset = -VarArgsSaveSize;
+      FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
+
+      // If saving an odd number of registers then create an extra stack slot to
+      // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+      // offsets to even-numbered registered remain 2*XLEN-aligned.
+      if (Idx % 2) {
+        MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
+                              true);
+        VarArgsSaveSize += XLenInBytes;
+      }
+
+      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+
+      // Copy the integer registers that may have been used for passing varargs
+      // to the vararg save area.
+      for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
+        const Register Reg = RegInfo.createVirtualRegister(RC);
+        RegInfo.addLiveIn(ArgRegs[I], Reg);
+        SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
+        SDValue Store = DAG.getStore(
+            Chain, DL, ArgValue, FIN,
+            MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes));
+        OutChains.push_back(Store);
+        FIN =
+            DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(XLenInBytes), DL);
+      }
     }
 
     // Record the frame index of the first variable argument
     // which is a value necessary to VASTART.
-    int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
     RVFI->setVarArgsFrameIndex(FI);
-
-    // If saving an odd number of registers then create an extra stack slot to
-    // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
-    // offsets to even-numbered registered remain 2*XLEN-aligned.
-    if (Idx % 2) {
-      MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
-      VarArgsSaveSize += XLenInBytes;
-    }
-
-    // Copy the integer registers that may have been used for passing varargs
-    // to the vararg save area.
-    for (unsigned I = Idx; I < ArgRegs.size();
-         ++I, VaArgOffset += XLenInBytes) {
-      const Register Reg = RegInfo.createVirtualRegister(RC);
-      RegInfo.addLiveIn(ArgRegs[I], Reg);
-      SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
-      FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
-      SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
-      SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
-                                   MachinePointerInfo::getFixedStack(MF, FI));
-      cast<StoreSDNode>(Store.getNode())
-          ->getMemOperand()
-          ->setValue((Value *)nullptr);
-      OutChains.push_back(Store);
-    }
     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
   }
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
index ecfccc48bb34f..020f171076995 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
@@ -10,27 +10,29 @@ define void @va1arg(ptr %a, ...) {
   ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
   ; RV32-NEXT: {{  $}}
   ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
+  ; RV32-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
+  ; RV32-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
+  ; RV32-NEXT:   [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va1arg
@@ -38,27 +40,29 @@ define void @va1arg(ptr %a, ...) {
   ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
   ; RV64-NEXT: {{  $}}
   ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
+  ; RV64-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
+  ; RV64-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
+  ; RV64-NEXT:   [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -70,24 +74,26 @@ define void @va2arg(ptr %a, ptr %b, ...) {
   ; RV32-NEXT: {{  $}}
   ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 8)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.0 + 16, align 8)
+  ; RV32-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.0 + 20)
+  ; RV32-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va2arg
@@ -96,24 +102,26 @@ define void @va2arg(ptr %a, ptr %b, ...) {
   ; RV64-NEXT: {{  $}}
   ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16)
+  ; RV64-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40)
+  ; RV64-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -126,21 +134,23 @@ define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
   ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
+  ; RV32-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va3arg
@@ -150,21 +160,23 @@ define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
   ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
+  ; RV64-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -178,18 +190,20 @@ define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 16)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va4arg
@@ -200,18 +214,20 @@ define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -226,15 +242,17 @@ define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va5arg
@@ -246,15 +264,17 @@ define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -270,12 +290,14 @@ define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 8)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va6arg
@@ -288,12 +310,14 @@ define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -310,9 +334,11 @@ define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va7arg
@@ -326,9 +352,11 @@ define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 776944b177636..f60a6a186c1d8 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -1020,14 +1020,14 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
 ; RV64IZCMP-NEXT:    sd a5, 56(sp)
 ; RV64IZCMP-NEXT:    sd a4, 48(sp)
 ; RV64IZCMP-NEXT:    sd a3, 40(sp)
-; RV64IZCMP-NEXT:    sd a2, 32(sp)
-; RV64IZCMP-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-NEXT:    addi a0, sp, 24
 ; RV64IZCMP-NEXT:    sd a0, 8(sp)
 ; RV64IZCMP-NEXT:    lwu a0, 12(sp)
-; RV64IZCMP-NEXT:    lwu a1, 8(sp)
+; RV64IZCMP-NEXT:    lwu a3, 8(sp)
+; RV64IZCMP-NEXT:    sd a2, 32(sp)
+; RV64IZCMP-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-NEXT:    slli a0, a0, 32
-; RV64IZCMP-NEXT:    or a0, a0, a1
+; RV64IZCMP-NEXT:    or a0, a0, a3
 ; RV64IZCMP-NEXT:    addi a1, a0, 4
 ; RV64IZCMP-NEXT:    sw a1, 8(sp)
 ; RV64IZCMP-NEXT:    srli a1, a1, 32
@@ -1060,14 +1060,14 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
 ; RV64IZCMP-SR-NEXT:    sd a5, 56(sp)
 ; RV64IZCMP-SR-NEXT:    sd a4, 48(sp)
 ; RV64IZCMP-SR-NEXT:    sd a3, 40(sp)
-; RV64IZCMP-SR-NEXT:    sd a2, 32(sp)
-; RV64IZCMP-SR-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-SR-NEXT:    addi a0, sp, 24
 ; RV64IZCMP-SR-NEXT:    sd a0, 8(sp)
 ; RV64IZCMP-SR-NEXT:    lwu a0, 12(sp)
-; RV64IZCMP-SR-NEXT:    lwu a1, 8(sp)
+; RV64IZCMP-SR-NEXT:    lwu a3, 8(sp)
+; RV64IZCMP-SR-NEXT:    sd a2, 32(sp)
+; RV64IZCMP-SR-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-SR-NEXT:    slli a0, a0, 32
-; RV64IZCMP-SR-NEXT:    or a0, a0, a1
+; RV64IZCMP-SR-NEXT:    or a0, a0, a3
 ; RV64IZCMP-SR-NEXT:    addi a1, a0, 4
 ; RV64IZCMP-SR-NEXT:    sw a1, 8(sp)
 ; RV64IZCMP-SR-NEXT:    srli a1, a1, 32
@@ -1100,14 +1100,14 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
 ; RV64I-NEXT:    sd a5, 56(sp)
 ; RV64I-NEXT:    sd a4, 48(sp)
 ; RV64I-NEXT:    sd a3, 40(sp)
-; RV64I-NEXT:    sd a2, 32(sp)
-; RV64I-NEXT:    sd a1, 24(sp)
 ; RV64I-NEXT:    addi a0, sp, 24
 ; RV64I-NEXT:    sd a0, 8(sp)
 ; RV64I-NEXT:    lwu a0, 12(sp)
-; RV64I-NEXT:    lwu a1, 8(sp)
+; RV64I-NEXT:    lwu a3, 8(sp)
+; RV64I-NEXT:    sd a2, 32(sp)
+; RV64I-NEXT:    sd a1, 24(sp)
 ; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:    addi a1, a0, 4
 ; RV64I-NEXT:    sw a1, 8(sp)
 ; RV64I-NEXT:    srli a1, a1, 32
diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 59aa1d9ae2893..2db9bad77fc53 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -489,12 +489,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -48
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a7, 44(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw a6, 40(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a5, 36(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a4, 32(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a3, 28(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw a1, 20(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw a6, 40(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a2, 24(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw a1, 20(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, sp, 20
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 12(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, sp, 27
@@ -513,12 +513,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
+; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a5, 20(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a4, 16(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a3, 12(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    sw a1, 4(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a2, 8(s0)
+; ILP32-ILP32F-WITHFP-NEXT:    sw a1, 4(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, s0, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, -12(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, s0, 11
@@ -536,12 +536,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -48
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a7, 44(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a6, 40(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a5, 36(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a4, 32(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a3, 28(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a1, 20(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a6, 40(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a2, 24(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a1, 20(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, sp, 20
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 12(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, sp, 27
@@ -560,12 +560,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 48(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, sp, 24
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    lw a0, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 7
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    srli a1, a1, 32
@@ -587,12 +587,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, s0, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -24(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a0, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 7
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    srli a1, a1, 32
@@ -882,11 +882,11 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 56(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 48(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 40(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 32(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 24(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    lw a0, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 24(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 16(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 7
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a0, 32
@@ -909,11 +909,11 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 40(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 32(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 16(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 8(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, s0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -24(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a0, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 8(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 0(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 7
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a0, 32



More information about the llvm-commits mailing list