[llvm] 3c5b42a - [RISCV] Allocate the varargs GPR save area as a single object. (#74354)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 5 10:30:05 PST 2023


Author: Craig Topper
Date: 2023-12-05T10:30:01-08:00
New Revision: 3c5b42acd3e2022da40f6e4b50f5035279b8b93a

URL: https://github.com/llvm/llvm-project/commit/3c5b42acd3e2022da40f6e4b50f5035279b8b93a
DIFF: https://github.com/llvm/llvm-project/commit/3c5b42acd3e2022da40f6e4b50f5035279b8b93a.diff

LOG: [RISCV] Allocate the varargs GPR save area as a single object. (#74354)

Previously we allocated one object for each GPR. We also allocated the
same offset twice, once to save for VASTART and then again for the first
register in the save loop.

This patch uses a single object for all the registers and shares this
with VASTART. This is more consistent with other targets like AArch64
and ARM.

I've removed the setValue(nullptr) from the memory operand now. Having a
single object makes me a lot more comfortable about alias analysis being
able to see what is going on. This led to the scheduling changes in
push-pop-popret.ll and vararg.ll.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
    llvm/test/CodeGen/RISCV/vararg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 5a827cab89e09..50ed85acdec00 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -432,60 +432,63 @@ void RISCVCallLowering::saveVarArgRegisters(
   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
   unsigned XLenInBytes = Subtarget.getXLen() / 8;
   ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs();
+  MachineRegisterInfo &MRI = MF.getRegInfo();
   unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
 
-  // Offset of the first variable argument from stack pointer, and size of
-  // the vararg save area. For now, the varargs save area is either zero or
-  // large enough to hold a0-a7.
-  int VaArgOffset;
+  // Size of the vararg save area. For now, the varargs save area is either
+  // zero or large enough to hold a0-a7.
   int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+  int FI;
 
   // If all registers are allocated, then all varargs must be passed on the
   // stack and we don't need to save any argregs.
   if (VarArgsSaveSize == 0) {
-    VaArgOffset = Assigner.StackSize;
+    int VaArgOffset = Assigner.StackSize;
+    FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
   } else {
-    VaArgOffset = -VarArgsSaveSize;
+    int VaArgOffset = -VarArgsSaveSize;
+    FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
+
+    // If saving an odd number of registers then create an extra stack slot to
+    // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+    // offsets to even-numbered registered remain 2*XLEN-aligned.
+    if (Idx % 2) {
+      MFI.CreateFixedObject(XLenInBytes,
+                            VaArgOffset - static_cast<int>(XLenInBytes), true);
+      VarArgsSaveSize += XLenInBytes;
+    }
+
+    const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(),
+                                Subtarget.getXLen());
+    const LLT sXLen = LLT::scalar(Subtarget.getXLen());
+
+    auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
+    auto Offset = MIRBuilder.buildConstant(
+        MRI.createGenericVirtualRegister(sXLen), XLenInBytes);
+
+    // Copy the integer registers that may have been used for passing varargs
+    // to the vararg save area.
+    const MVT XLenVT = Subtarget.getXLenVT();
+    for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
+      const Register VReg = MRI.createGenericVirtualRegister(sXLen);
+      Handler.assignValueToReg(
+          VReg, ArgRegs[I],
+          CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
+                              ArgRegs[I], XLenVT, CCValAssign::Full));
+      auto MPO =
+          MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes);
+      MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
+      FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
+                                   FIN.getReg(0), Offset);
+    }
   }
 
   // Record the frame index of the first variable argument which is a value
   // necessary to G_VASTART.
-  MachineFrameInfo &MFI = MF.getFrameInfo();
-  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
-  RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
   RVFI->setVarArgsFrameIndex(FI);
-
-  // If saving an odd number of registers then create an extra stack slot to
-  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
-  // offsets to even-numbered registered remain 2*XLEN-aligned.
-  if (Idx % 2) {
-    MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
-    VarArgsSaveSize += XLenInBytes;
-  }
   RVFI->setVarArgsSaveSize(VarArgsSaveSize);
-
-  // Copy the integer registers that may have been used for passing varargs
-  // to the vararg save area.
-  const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(),
-                              Subtarget.getXLen());
-  const LLT sXLen = LLT::scalar(Subtarget.getXLen());
-  const MVT XLenVT = Subtarget.getXLenVT();
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) {
-    const Register VReg = MRI.createGenericVirtualRegister(sXLen);
-    Handler.assignValueToReg(
-        VReg, ArgRegs[I],
-        CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
-                            ArgRegs[I], XLenVT, CCValAssign::Full));
-    FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
-    auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
-    auto MPO = MachinePointerInfo::getFixedStack(MF, FI);
-    auto Store =
-        MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
-    // This was taken from  SelectionDAG, but we are not sure why it exists.
-    // It is being investigated in github.com/llvm/llvm-project/issues/73735.
-    Store->memoperands()[0]->setValue((Value *)nullptr);
-  }
 }
 
 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 72de6d1807989..f2ec422b54a92 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17698,49 +17698,49 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
     MachineRegisterInfo &RegInfo = MF.getRegInfo();
     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
 
-    // Offset of the first variable argument from stack pointer, and size of
-    // the vararg save area. For now, the varargs save area is either zero or
-    // large enough to hold a0-a7.
-    int VaArgOffset;
+    // Size of the vararg save area. For now, the varargs save area is either
+    // zero or large enough to hold a0-a7.
     int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+    int FI;
 
     // If all registers are allocated, then all varargs must be passed on the
     // stack and we don't need to save any argregs.
     if (VarArgsSaveSize == 0) {
-      VaArgOffset = CCInfo.getStackSize();
+      int VaArgOffset = CCInfo.getStackSize();
+      FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
     } else {
-      VaArgOffset = -VarArgsSaveSize;
+      int VaArgOffset = -VarArgsSaveSize;
+      FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
+
+      // If saving an odd number of registers then create an extra stack slot to
+      // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+      // offsets to even-numbered registered remain 2*XLEN-aligned.
+      if (Idx % 2) {
+        MFI.CreateFixedObject(
+            XLenInBytes, VaArgOffset - static_cast<int>(XLenInBytes), true);
+        VarArgsSaveSize += XLenInBytes;
+      }
+
+      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+
+      // Copy the integer registers that may have been used for passing varargs
+      // to the vararg save area.
+      for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
+        const Register Reg = RegInfo.createVirtualRegister(RC);
+        RegInfo.addLiveIn(ArgRegs[I], Reg);
+        SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
+        SDValue Store = DAG.getStore(
+            Chain, DL, ArgValue, FIN,
+            MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes));
+        OutChains.push_back(Store);
+        FIN =
+            DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(XLenInBytes), DL);
+      }
     }
 
     // Record the frame index of the first variable argument
     // which is a value necessary to VASTART.
-    int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
     RVFI->setVarArgsFrameIndex(FI);
-
-    // If saving an odd number of registers then create an extra stack slot to
-    // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
-    // offsets to even-numbered registered remain 2*XLEN-aligned.
-    if (Idx % 2) {
-      MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
-      VarArgsSaveSize += XLenInBytes;
-    }
-
-    // Copy the integer registers that may have been used for passing varargs
-    // to the vararg save area.
-    for (unsigned I = Idx; I < ArgRegs.size();
-         ++I, VaArgOffset += XLenInBytes) {
-      const Register Reg = RegInfo.createVirtualRegister(RC);
-      RegInfo.addLiveIn(ArgRegs[I], Reg);
-      SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
-      FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
-      SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
-      SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
-                                   MachinePointerInfo::getFixedStack(MF, FI));
-      cast<StoreSDNode>(Store.getNode())
-          ->getMemOperand()
-          ->setValue((Value *)nullptr);
-      OutChains.push_back(Store);
-    }
     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
   }
 

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
index ecfccc48bb34f..020f171076995 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll
@@ -10,27 +10,29 @@ define void @va1arg(ptr %a, ...) {
   ; RV32-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
   ; RV32-NEXT: {{  $}}
   ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
+  ; RV32-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
+  ; RV32-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
+  ; RV32-NEXT:   [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va1arg
@@ -38,27 +40,29 @@ define void @va1arg(ptr %a, ...) {
   ; RV64-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
   ; RV64-NEXT: {{  $}}
   ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
+  ; RV64-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
+  ; RV64-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
+  ; RV64-NEXT:   [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -70,24 +74,26 @@ define void @va2arg(ptr %a, ptr %b, ...) {
   ; RV32-NEXT: {{  $}}
   ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 8)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.0 + 16, align 8)
+  ; RV32-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.0 + 20)
+  ; RV32-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va2arg
@@ -96,24 +102,26 @@ define void @va2arg(ptr %a, ptr %b, ...) {
   ; RV64-NEXT: {{  $}}
   ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16)
+  ; RV64-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40)
+  ; RV64-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -126,21 +134,23 @@ define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
   ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
+  ; RV32-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va3arg
@@ -150,21 +160,23 @@ define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
   ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
+  ; RV64-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -178,18 +190,20 @@ define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
   ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 16)
+  ; RV32-NEXT:   G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 16)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
+  ; RV32-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va4arg
@@ -200,18 +214,20 @@ define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
   ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
+  ; RV64-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -226,15 +242,17 @@ define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
   ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
+  ; RV32-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va5arg
@@ -246,15 +264,17 @@ define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
   ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
+  ; RV64-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -270,12 +290,14 @@ define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
   ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8)
+  ; RV32-NEXT:   G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 8)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
+  ; RV32-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va6arg
@@ -288,12 +310,14 @@ define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
   ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16)
+  ; RV64-NEXT:   G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
+  ; RV64-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }
@@ -310,9 +334,11 @@ define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...)
   ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
   ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
-  ; RV32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+  ; RV32-NEXT:   G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
+  ; RV32-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: va7arg
@@ -326,9 +352,11 @@ define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...)
   ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
   ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
   ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
+  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
-  ; RV64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
+  ; RV64-NEXT:   G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
+  ; RV64-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
   ; RV64-NEXT:   PseudoRET
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 608825e24d1cc..cbed059e12c2e 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -489,12 +489,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -48
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a7, 44(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw a6, 40(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a5, 36(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a4, 32(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a3, 28(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw a1, 20(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw a6, 40(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a2, 24(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw a1, 20(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, sp, 20
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 12(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, sp, 27
@@ -513,12 +513,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
+; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a5, 20(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a4, 16(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a3, 12(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    sw a1, 4(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a2, 8(s0)
+; ILP32-ILP32F-WITHFP-NEXT:    sw a1, 4(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, s0, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, -12(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, s0, 11
@@ -536,12 +536,12 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -48
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a7, 44(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a6, 40(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a5, 36(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a4, 32(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a3, 28(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a1, 20(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a6, 40(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a2, 24(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a1, 20(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, sp, 20
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 12(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, sp, 27


        


More information about the llvm-commits mailing list