[llvm] 859b09d - [RISCV] Promote i32 ISD::VAARG to i64 for -riscv-experimental-rv64-legal-i32.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 4 11:10:44 PST 2024


Author: Craig Topper
Date: 2024-02-04T11:03:12-08:00
New Revision: 859b09da08c2a47026ba0a7d2f21b7dca705864d

URL: https://github.com/llvm/llvm-project/commit/859b09da08c2a47026ba0a7d2f21b7dca705864d
DIFF: https://github.com/llvm/llvm-project/commit/859b09da08c2a47026ba0a7d2f21b7dca705864d.diff

LOG: [RISCV] Promote i32 ISD::VAARG to i64 for -riscv-experimental-rv64-legal-i32.

Added: 
    llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b4bde4c5dd5dd..eca560f08e22f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -263,6 +263,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
   setOperationAction(ISD::VASTART, MVT::Other, Custom);
   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
+  if (RV64LegalI32 && Subtarget.is64Bit())
+    setOperationAction(ISD::VAARG, MVT::i32, Promote);
 
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
 

diff  --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll
new file mode 100644
index 0000000000000..2fb674f560804
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll
@@ -0,0 +1,1391 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs \
+; RUN:   -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64f \
+; RUN:     -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
+; RUN:   | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
+; RUN:     -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
+; RUN:   | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all \
+; RUN:   -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=LP64-LP64F-LP64D-WITHFP %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -target-abi lp64e \
+; RUN:     -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
+; RUN:   | FileCheck -check-prefix=LP64E-FPELIM %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -target-abi lp64e -frame-pointer=all \
+; RUN:     -verify-machineinstrs -riscv-experimental-rv64-legal-i32 \
+; RUN:   | FileCheck -check-prefix=LP64E-WITHFP %s
+
+; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
+; lp64/lp64f/lp64d. Different CHECK lines are required for RV32D due to slight
+; codegen 
diff erences due to the way the f64 load operations are lowered.
+; The nounwind attribute is omitted for some of the tests, to check that CFI
+; directives are correctly generated.
+
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+
+declare void @notdead(ptr)
+
+; Although frontends are recommended to not generate va_arg due to the lack of
+; support for aggregate types, we test simple cases here to ensure they are
+; lowered correctly
+
+define i32 @va1(ptr %fmt, ...) {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va1:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    .cfi_def_cfa_offset 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 72(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, sp, 28
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va1:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset ra, -72
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset s0, -80
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa s0, 64
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 12
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va1:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -64
+; LP64E-FPELIM-NEXT:    .cfi_def_cfa_offset 64
+; LP64E-FPELIM-NEXT:    mv a0, a1
+; LP64E-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64E-FPELIM-NEXT:    addi a1, sp, 28
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 64
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va1:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -80
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa_offset 80
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    .cfi_offset ra, -56
+; LP64E-WITHFP-NEXT:    .cfi_offset s0, -64
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa s0, 48
+; LP64E-WITHFP-NEXT:    mv a0, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a1, s0, 12
+; LP64E-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 80
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va, align 4
+  %1 = load i32, ptr %argp.cur, align 4
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 72(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, sp, 32
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va1_va_arg:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -64
+; LP64E-FPELIM-NEXT:    mv a0, a1
+; LP64E-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64E-FPELIM-NEXT:    addi a1, sp, 32
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 64
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va1_va_arg:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -80
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    mv a0, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a1, s0, 16
+; LP64E-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 80
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+; Ensure the adjustment when restoring the stack pointer using the frame
+; pointer is correct
+define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg_alloca:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv s1, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, s0, 16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, -32(s0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a0, a1, 32
+; LP64-LP64F-LP64D-FPELIM-NEXT:    srli a0, a0, 32
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 15
+; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sub a0, sp, a0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv sp, a0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, s1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, s0, -32
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg_alloca:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv s1, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, s0, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a0, a1, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    srli a0, a0, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 15
+; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sub a0, sp, a0
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv sp, a0
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, s1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, s0, -32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va1_va_arg_alloca:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -80
+; LP64E-FPELIM-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    addi s0, sp, 32
+; LP64E-FPELIM-NEXT:    mv s1, a1
+; LP64E-FPELIM-NEXT:    sd a5, 40(s0)
+; LP64E-FPELIM-NEXT:    sd a4, 32(s0)
+; LP64E-FPELIM-NEXT:    sd a3, 24(s0)
+; LP64E-FPELIM-NEXT:    sd a2, 16(s0)
+; LP64E-FPELIM-NEXT:    sd a1, 8(s0)
+; LP64E-FPELIM-NEXT:    addi a0, s0, 16
+; LP64E-FPELIM-NEXT:    sd a0, -32(s0)
+; LP64E-FPELIM-NEXT:    slli a0, a1, 32
+; LP64E-FPELIM-NEXT:    srli a0, a0, 32
+; LP64E-FPELIM-NEXT:    addi a0, a0, 7
+; LP64E-FPELIM-NEXT:    andi a0, a0, -8
+; LP64E-FPELIM-NEXT:    sub a0, sp, a0
+; LP64E-FPELIM-NEXT:    mv sp, a0
+; LP64E-FPELIM-NEXT:    call notdead
+; LP64E-FPELIM-NEXT:    mv a0, s1
+; LP64E-FPELIM-NEXT:    addi sp, s0, -32
+; LP64E-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    addi sp, sp, 80
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va1_va_arg_alloca:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -80
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    mv s1, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a0, s0, 16
+; LP64E-WITHFP-NEXT:    sd a0, -32(s0)
+; LP64E-WITHFP-NEXT:    slli a0, a1, 32
+; LP64E-WITHFP-NEXT:    srli a0, a0, 32
+; LP64E-WITHFP-NEXT:    addi a0, a0, 7
+; LP64E-WITHFP-NEXT:    andi a0, a0, -8
+; LP64E-WITHFP-NEXT:    sub a0, sp, a0
+; LP64E-WITHFP-NEXT:    mv sp, a0
+; LP64E-WITHFP-NEXT:    call notdead
+; LP64E-WITHFP-NEXT:    mv a0, s1
+; LP64E-WITHFP-NEXT:    addi sp, s0, -32
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 80
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  %2 = alloca i8, i32 %1
+  call void @notdead(ptr %2)
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+define void @va1_caller() nounwind {
+; Pass a double, as a float would be promoted by a C/C++ frontend
+; LP64-LP64F-LP64D-FPELIM-LABEL: va1_caller:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 52
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va1_caller:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 52
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va1_caller:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -8
+; LP64E-FPELIM-NEXT:    sd ra, 0(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    li a1, 1023
+; LP64E-FPELIM-NEXT:    slli a1, a1, 52
+; LP64E-FPELIM-NEXT:    li a2, 2
+; LP64E-FPELIM-NEXT:    call va1
+; LP64E-FPELIM-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    addi sp, sp, 8
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va1_caller:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -16
+; LP64E-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 16
+; LP64E-WITHFP-NEXT:    li a1, 1023
+; LP64E-WITHFP-NEXT:    slli a1, a1, 52
+; LP64E-WITHFP-NEXT:    li a2, 2
+; LP64E-WITHFP-NEXT:    call va1
+; LP64E-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 16
+; LP64E-WITHFP-NEXT:    ret
+  %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
+  ret void
+}
+
+; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
+; register pair (where the first register is even-numbered).
+
+define i64 @va2(ptr %fmt, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va2:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 72(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, sp, 39
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va2:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 23
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va2:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -64
+; LP64E-FPELIM-NEXT:    mv a0, a1
+; LP64E-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64E-FPELIM-NEXT:    addi a1, sp, 39
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 64
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va2:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -80
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    mv a0, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a1, s0, 23
+; LP64E-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 80
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va
+  %ptrint = ptrtoint ptr %argp.cur to iXLen
+  %1 = add iXLen %ptrint, 7
+  %2 = and iXLen %1, -8
+  %argp.cur.aligned = inttoptr iXLen %1 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %va
+  %3 = inttoptr iXLen %2 to ptr
+  %4 = load double, ptr %3, align 8
+  %5 = bitcast double %4 to i64
+  call void @llvm.va_end(ptr %va)
+  ret i64 %5
+}
+
+define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va2_va_arg:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 72(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, sp, 32
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va2_va_arg:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va2_va_arg:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -64
+; LP64E-FPELIM-NEXT:    mv a0, a1
+; LP64E-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64E-FPELIM-NEXT:    addi a1, sp, 32
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 64
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va2_va_arg:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -80
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    mv a0, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a1, s0, 16
+; LP64E-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 80
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr %va)
+  %2 = bitcast double %1 to i64
+  ret i64 %2
+}
+
+define void @va2_caller() nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va2_caller:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 52
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va2_caller:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1023
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 52
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va2_caller:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -8
+; LP64E-FPELIM-NEXT:    sd ra, 0(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    li a1, 1023
+; LP64E-FPELIM-NEXT:    slli a1, a1, 52
+; LP64E-FPELIM-NEXT:    call va2
+; LP64E-FPELIM-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    addi sp, sp, 8
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va2_caller:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -16
+; LP64E-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 16
+; LP64E-WITHFP-NEXT:    li a1, 1023
+; LP64E-WITHFP-NEXT:    slli a1, a1, 52
+; LP64E-WITHFP-NEXT:    call va2
+; LP64E-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 16
+; LP64E-WITHFP-NEXT:    ret
+ %1 = call i64 (ptr, ...) @va2(ptr undef, double 1.000000e+00)
+ ret void
+}
+
+; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
+; vararg double is passed in a4 and a5 (rather than a3 and a4)
+
+define i64 @va3(i32 %a, i64 %b, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va3:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -64
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 16(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a3, sp, 31
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a0, a1, a2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 64
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va3:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 0(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a3, s0, 15
+; LP64-LP64F-LP64D-WITHFP-NEXT:    add a0, a1, a2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va3:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -48
+; LP64E-FPELIM-NEXT:    sd a5, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 24(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 16(sp)
+; LP64E-FPELIM-NEXT:    addi a3, sp, 31
+; LP64E-FPELIM-NEXT:    add a0, a1, a2
+; LP64E-FPELIM-NEXT:    sd a3, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 48
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va3:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -64
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    sd a5, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 8(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 0(s0)
+; LP64E-WITHFP-NEXT:    addi a3, s0, 15
+; LP64E-WITHFP-NEXT:    add a0, a1, a2
+; LP64E-WITHFP-NEXT:    sd a3, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 64
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va
+  %ptrint = ptrtoint ptr %argp.cur to iXLen
+  %1 = add iXLen %ptrint, 7
+  %2 = and iXLen %1, -8
+  %argp.cur.aligned = inttoptr iXLen %1 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %va
+  %3 = inttoptr iXLen %2 to ptr
+  %4 = load double, ptr %3, align 8
+  call void @llvm.va_end(ptr %va)
+  %5 = bitcast double %4 to i64
+  %6 = add i64 %b, %5
+  ret i64 %6
+}
+
+define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va3_va_arg:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -64
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 16(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a3, sp, 24
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a0, a1, a2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 64
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va3_va_arg:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 0(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a3, s0, 8
+; LP64-LP64F-LP64D-WITHFP-NEXT:    add a0, a1, a2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va3_va_arg:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -48
+; LP64E-FPELIM-NEXT:    sd a5, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 24(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 16(sp)
+; LP64E-FPELIM-NEXT:    addi a3, sp, 24
+; LP64E-FPELIM-NEXT:    add a0, a1, a2
+; LP64E-FPELIM-NEXT:    sd a3, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 48
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va3_va_arg:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -64
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    sd a5, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 8(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 0(s0)
+; LP64E-WITHFP-NEXT:    addi a3, s0, 8
+; LP64E-WITHFP-NEXT:    add a0, a1, a2
+; LP64E-WITHFP-NEXT:    sd a3, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 64
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr %va)
+  %2 = bitcast double %1 to i64
+  %3 = add i64 %b, %2
+  ret i64 %3
+}
+
+define void @va3_caller() nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va3_caller:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a2, 62
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1111
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va3
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va3_caller:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a2, 62
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1111
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va3
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va3_caller:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -8
+; LP64E-FPELIM-NEXT:    sd ra, 0(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    li a2, 1
+; LP64E-FPELIM-NEXT:    slli a2, a2, 62
+; LP64E-FPELIM-NEXT:    li a0, 2
+; LP64E-FPELIM-NEXT:    li a1, 1111
+; LP64E-FPELIM-NEXT:    call va3
+; LP64E-FPELIM-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    addi sp, sp, 8
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va3_caller:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -16
+; LP64E-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 16
+; LP64E-WITHFP-NEXT:    li a2, 1
+; LP64E-WITHFP-NEXT:    slli a2, a2, 62
+; LP64E-WITHFP-NEXT:    li a0, 2
+; LP64E-WITHFP-NEXT:    li a1, 1111
+; LP64E-WITHFP-NEXT:    call va3
+; LP64E-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 16
+; LP64E-WITHFP-NEXT:    ret
+ %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, double 2.000000e+00)
+ ret void
+}
+
+declare void @llvm.va_copy(ptr, ptr)
+
+define i32 @va4_va_copy(i32 %argno, ...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va4_va_copy:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv s0, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 88(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 80(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 72(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 64(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, sp, 48
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 0(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a0, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 3
+; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -4
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, a0, 8
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lw a1, 0(a0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 11
+; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -4
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a2, a0, 8
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lw a2, 0(a0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 11
+; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -4
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a3, a0, 8
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lw a0, 0(a0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, a1, s0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, a1, a2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addw a0, a1, a0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va4_va_copy:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -112
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 48
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv s1, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, s0, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, -32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 3
+; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -4
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a0, 8
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a1, 0(a0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 11
+; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -4
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a2, a0, 8
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, -32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a2, 0(a0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 11
+; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -4
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a3, a0, 8
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, -32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a0, 0(a0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    add a1, a1, s1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    add a1, a1, a2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addw a0, a1, a0
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 112
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va4_va_copy:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -80
+; LP64E-FPELIM-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    mv s0, a1
+; LP64E-FPELIM-NEXT:    sd a5, 72(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 64(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 56(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 48(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 40(sp)
+; LP64E-FPELIM-NEXT:    addi a0, sp, 48
+; LP64E-FPELIM-NEXT:    sd a0, 8(sp)
+; LP64E-FPELIM-NEXT:    sd a0, 0(sp)
+; LP64E-FPELIM-NEXT:    call notdead
+; LP64E-FPELIM-NEXT:    ld a0, 8(sp)
+; LP64E-FPELIM-NEXT:    addi a0, a0, 3
+; LP64E-FPELIM-NEXT:    andi a0, a0, -4
+; LP64E-FPELIM-NEXT:    addi a1, a0, 8
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    lw a1, 0(a0)
+; LP64E-FPELIM-NEXT:    addi a0, a0, 11
+; LP64E-FPELIM-NEXT:    andi a0, a0, -4
+; LP64E-FPELIM-NEXT:    addi a2, a0, 8
+; LP64E-FPELIM-NEXT:    sd a2, 8(sp)
+; LP64E-FPELIM-NEXT:    lw a2, 0(a0)
+; LP64E-FPELIM-NEXT:    addi a0, a0, 11
+; LP64E-FPELIM-NEXT:    andi a0, a0, -4
+; LP64E-FPELIM-NEXT:    addi a3, a0, 8
+; LP64E-FPELIM-NEXT:    sd a3, 8(sp)
+; LP64E-FPELIM-NEXT:    lw a0, 0(a0)
+; LP64E-FPELIM-NEXT:    add a1, a1, s0
+; LP64E-FPELIM-NEXT:    add a1, a1, a2
+; LP64E-FPELIM-NEXT:    addw a0, a1, a0
+; LP64E-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    addi sp, sp, 80
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va4_va_copy:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -88
+; LP64E-WITHFP-NEXT:    sd ra, 32(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 40
+; LP64E-WITHFP-NEXT:    mv s1, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a0, s0, 16
+; LP64E-WITHFP-NEXT:    sd a0, -32(s0)
+; LP64E-WITHFP-NEXT:    sd a0, -40(s0)
+; LP64E-WITHFP-NEXT:    call notdead
+; LP64E-WITHFP-NEXT:    ld a0, -32(s0)
+; LP64E-WITHFP-NEXT:    addi a0, a0, 3
+; LP64E-WITHFP-NEXT:    andi a0, a0, -4
+; LP64E-WITHFP-NEXT:    addi a1, a0, 8
+; LP64E-WITHFP-NEXT:    sd a1, -32(s0)
+; LP64E-WITHFP-NEXT:    lw a1, 0(a0)
+; LP64E-WITHFP-NEXT:    addi a0, a0, 11
+; LP64E-WITHFP-NEXT:    andi a0, a0, -4
+; LP64E-WITHFP-NEXT:    addi a2, a0, 8
+; LP64E-WITHFP-NEXT:    sd a2, -32(s0)
+; LP64E-WITHFP-NEXT:    lw a2, 0(a0)
+; LP64E-WITHFP-NEXT:    addi a0, a0, 11
+; LP64E-WITHFP-NEXT:    andi a0, a0, -4
+; LP64E-WITHFP-NEXT:    addi a3, a0, 8
+; LP64E-WITHFP-NEXT:    sd a3, -32(s0)
+; LP64E-WITHFP-NEXT:    lw a0, 0(a0)
+; LP64E-WITHFP-NEXT:    add a1, a1, s1
+; LP64E-WITHFP-NEXT:    add a1, a1, a2
+; LP64E-WITHFP-NEXT:    addw a0, a1, a0
+; LP64E-WITHFP-NEXT:    ld ra, 32(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 88
+; LP64E-WITHFP-NEXT:    ret
+  %vargs = alloca ptr
+  %wargs = alloca ptr
+  call void @llvm.va_start(ptr %vargs)
+  %1 = va_arg ptr %vargs, i32
+  call void @llvm.va_copy(ptr %wargs, ptr %vargs)
+  %2 = load ptr, ptr %wargs, align 4
+  call void @notdead(ptr %2)
+  %3 = va_arg ptr %vargs, i32
+  %4 = va_arg ptr %vargs, i32
+  %5 = va_arg ptr %vargs, i32
+  call void @llvm.va_end(ptr %vargs)
+  call void @llvm.va_end(ptr %wargs)
+  %add1 = add i32 %3, %1
+  %add2 = add i32 %add1, %4
+  %add3 = add i32 %add2, %5
+  ret i32 %add3
+}
+
+; Check 2x*xlen values are aligned appropriately when passed on the stack in a vararg call
+
+declare i32 @va5_aligned_stack_callee(i32, ...)
+
+define void @va5_aligned_stack_caller() nounwind {
+; The double should be 8-byte aligned on the stack, but the two-element array
+; should only be 4-byte aligned
+; LP64-LP64F-LP64D-FPELIM-LABEL: va5_aligned_stack_caller:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -48
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 17
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 16(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 15
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, %hi(.LCPI11_0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld t0, %lo(.LCPI11_0)(a0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, %hi(.LCPI11_1)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a2, %lo(.LCPI11_1)(a0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, %hi(.LCPI11_2)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a3, %lo(.LCPI11_2)(a0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, 2384
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a6, a0, 761
+; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a6, a6, 11
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 11
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a4, 12
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a5, 13
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a7, 14
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd t0, 0(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va5_aligned_stack_callee
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 48
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va5_aligned_stack_caller:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -48
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 48
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 17
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 15
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 8(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, %hi(.LCPI11_0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld t0, %lo(.LCPI11_0)(a0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, %hi(.LCPI11_1)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a2, %lo(.LCPI11_1)(a0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, %hi(.LCPI11_2)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a3, %lo(.LCPI11_2)(a0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, 2384
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a6, a0, 761
+; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a6, a6, 11
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 11
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a4, 12
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a5, 13
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a7, 14
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd t0, 0(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va5_aligned_stack_callee
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 48
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va5_aligned_stack_caller:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -56
+; LP64E-FPELIM-NEXT:    sd ra, 48(sp) # 8-byte Folded Spill
+; LP64E-FPELIM-NEXT:    li a0, 17
+; LP64E-FPELIM-NEXT:    sd a0, 40(sp)
+; LP64E-FPELIM-NEXT:    li a0, 16
+; LP64E-FPELIM-NEXT:    lui a1, %hi(.LCPI11_0)
+; LP64E-FPELIM-NEXT:    ld a1, %lo(.LCPI11_0)(a1)
+; LP64E-FPELIM-NEXT:    sd a0, 32(sp)
+; LP64E-FPELIM-NEXT:    li a0, 15
+; LP64E-FPELIM-NEXT:    sd a0, 24(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 16(sp)
+; LP64E-FPELIM-NEXT:    li a0, 14
+; LP64E-FPELIM-NEXT:    sd a0, 8(sp)
+; LP64E-FPELIM-NEXT:    lui a0, 2384
+; LP64E-FPELIM-NEXT:    addiw a0, a0, 761
+; LP64E-FPELIM-NEXT:    slli a6, a0, 11
+; LP64E-FPELIM-NEXT:    lui a0, %hi(.LCPI11_1)
+; LP64E-FPELIM-NEXT:    ld a2, %lo(.LCPI11_1)(a0)
+; LP64E-FPELIM-NEXT:    lui a0, %hi(.LCPI11_2)
+; LP64E-FPELIM-NEXT:    ld a3, %lo(.LCPI11_2)(a0)
+; LP64E-FPELIM-NEXT:    li a0, 1
+; LP64E-FPELIM-NEXT:    li a1, 11
+; LP64E-FPELIM-NEXT:    li a4, 12
+; LP64E-FPELIM-NEXT:    li a5, 13
+; LP64E-FPELIM-NEXT:    sd a6, 0(sp)
+; LP64E-FPELIM-NEXT:    call va5_aligned_stack_callee
+; LP64E-FPELIM-NEXT:    ld ra, 48(sp) # 8-byte Folded Reload
+; LP64E-FPELIM-NEXT:    addi sp, sp, 56
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va5_aligned_stack_caller:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -64
+; LP64E-WITHFP-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 64
+; LP64E-WITHFP-NEXT:    li a0, 17
+; LP64E-WITHFP-NEXT:    sd a0, 40(sp)
+; LP64E-WITHFP-NEXT:    li a0, 16
+; LP64E-WITHFP-NEXT:    lui a1, %hi(.LCPI11_0)
+; LP64E-WITHFP-NEXT:    ld a1, %lo(.LCPI11_0)(a1)
+; LP64E-WITHFP-NEXT:    sd a0, 32(sp)
+; LP64E-WITHFP-NEXT:    li a0, 15
+; LP64E-WITHFP-NEXT:    sd a0, 24(sp)
+; LP64E-WITHFP-NEXT:    sd a1, 16(sp)
+; LP64E-WITHFP-NEXT:    li a0, 14
+; LP64E-WITHFP-NEXT:    sd a0, 8(sp)
+; LP64E-WITHFP-NEXT:    lui a0, 2384
+; LP64E-WITHFP-NEXT:    addiw a0, a0, 761
+; LP64E-WITHFP-NEXT:    slli a6, a0, 11
+; LP64E-WITHFP-NEXT:    lui a0, %hi(.LCPI11_1)
+; LP64E-WITHFP-NEXT:    ld a2, %lo(.LCPI11_1)(a0)
+; LP64E-WITHFP-NEXT:    lui a0, %hi(.LCPI11_2)
+; LP64E-WITHFP-NEXT:    ld a3, %lo(.LCPI11_2)(a0)
+; LP64E-WITHFP-NEXT:    li a0, 1
+; LP64E-WITHFP-NEXT:    li a1, 11
+; LP64E-WITHFP-NEXT:    li a4, 12
+; LP64E-WITHFP-NEXT:    li a5, 13
+; LP64E-WITHFP-NEXT:    sd a6, 0(sp)
+; LP64E-WITHFP-NEXT:    call va5_aligned_stack_callee
+; LP64E-WITHFP-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 64
+; LP64E-WITHFP-NEXT:    ret
+  %1 = call i32 (i32, ...) @va5_aligned_stack_callee(i32 1, i32 11,
+    fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13, i64 20000000000,
+    i32 14, double 2.720000e+00, i32 15, [2 x i32] [i32 16, i32 17])
+  ret void
+}
+
+; A function with no fixed arguments is not valid C, but can be
+; specified in LLVM IR. We must ensure the vararg save area is
+; still set up correctly.
+
+define i32 @va6_no_fixed_args(...) nounwind {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va6_no_fixed_args:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 72(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 16(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, sp, 24
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va6_no_fixed_args:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 0(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 8
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va6_no_fixed_args:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    addi sp, sp, -64
+; LP64E-FPELIM-NEXT:    sd a5, 56(sp)
+; LP64E-FPELIM-NEXT:    sd a4, 48(sp)
+; LP64E-FPELIM-NEXT:    sd a3, 40(sp)
+; LP64E-FPELIM-NEXT:    sd a2, 32(sp)
+; LP64E-FPELIM-NEXT:    sd a1, 24(sp)
+; LP64E-FPELIM-NEXT:    sd a0, 16(sp)
+; LP64E-FPELIM-NEXT:    addi a1, sp, 24
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    addi sp, sp, 64
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va6_no_fixed_args:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -80
+; LP64E-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    addi s0, sp, 32
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    sd a0, 0(s0)
+; LP64E-WITHFP-NEXT:    addi a1, s0, 8
+; LP64E-WITHFP-NEXT:    sd a1, -24(s0)
+; LP64E-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 80
+; LP64E-WITHFP-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %1 = va_arg ptr %va, i32
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+; TODO: improve constant materialization of stack addresses
+
+define i32 @va_large_stack(ptr %fmt, ...) {
+; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack:
+; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a0, a0, 336
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sub sp, sp, a0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    .cfi_def_cfa_offset 100000080
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui t0, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add t0, sp, t0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 328(t0)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a7, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a7, sp, a7
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 320(a7)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a6, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a6, sp, a6
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 312(a6)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a5, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a5, sp, a5
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a4, 304(a5)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a4, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a4, sp, a4
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 296(a4)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a3, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a3, sp, a3
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 288(a3)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a2, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a2, sp, a2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 280(a2)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a1, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a1, a1, 284
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, sp, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a1, 24414
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a1, a1, 336
+; LP64-LP64F-LP64D-FPELIM-NEXT:    add sp, sp, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
+;
+; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack:
+; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -2032
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 1960(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 1952(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset ra, -72
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset s0, -80
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 1968
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa s0, 64
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, 24414
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a0, a0, -1680
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sub sp, sp, a0
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 12
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a2, 24414
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sub a2, s0, a2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -288(a2)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a1, 24414
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a1, a1, -1680
+; LP64-LP64F-LP64D-WITHFP-NEXT:    add sp, sp, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 1960(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 1952(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 2032
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
+;
+; LP64E-FPELIM-LABEL: va_large_stack:
+; LP64E-FPELIM:       # %bb.0:
+; LP64E-FPELIM-NEXT:    lui a0, 24414
+; LP64E-FPELIM-NEXT:    addiw a0, a0, 320
+; LP64E-FPELIM-NEXT:    sub sp, sp, a0
+; LP64E-FPELIM-NEXT:    .cfi_def_cfa_offset 100000064
+; LP64E-FPELIM-NEXT:    mv a0, a1
+; LP64E-FPELIM-NEXT:    lui a6, 24414
+; LP64E-FPELIM-NEXT:    add a6, sp, a6
+; LP64E-FPELIM-NEXT:    sd a5, 312(a6)
+; LP64E-FPELIM-NEXT:    lui a5, 24414
+; LP64E-FPELIM-NEXT:    add a5, sp, a5
+; LP64E-FPELIM-NEXT:    sd a4, 304(a5)
+; LP64E-FPELIM-NEXT:    lui a4, 24414
+; LP64E-FPELIM-NEXT:    add a4, sp, a4
+; LP64E-FPELIM-NEXT:    sd a3, 296(a4)
+; LP64E-FPELIM-NEXT:    lui a3, 24414
+; LP64E-FPELIM-NEXT:    add a3, sp, a3
+; LP64E-FPELIM-NEXT:    sd a2, 288(a3)
+; LP64E-FPELIM-NEXT:    lui a2, 24414
+; LP64E-FPELIM-NEXT:    add a2, sp, a2
+; LP64E-FPELIM-NEXT:    sd a1, 280(a2)
+; LP64E-FPELIM-NEXT:    lui a1, 24414
+; LP64E-FPELIM-NEXT:    addiw a1, a1, 284
+; LP64E-FPELIM-NEXT:    add a1, sp, a1
+; LP64E-FPELIM-NEXT:    sd a1, 8(sp)
+; LP64E-FPELIM-NEXT:    lui a1, 24414
+; LP64E-FPELIM-NEXT:    addiw a1, a1, 320
+; LP64E-FPELIM-NEXT:    add sp, sp, a1
+; LP64E-FPELIM-NEXT:    ret
+;
+; LP64E-WITHFP-LABEL: va_large_stack:
+; LP64E-WITHFP:       # %bb.0:
+; LP64E-WITHFP-NEXT:    addi sp, sp, -2040
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa_offset 2040
+; LP64E-WITHFP-NEXT:    sd ra, 1984(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    sd s0, 1976(sp) # 8-byte Folded Spill
+; LP64E-WITHFP-NEXT:    .cfi_offset ra, -56
+; LP64E-WITHFP-NEXT:    .cfi_offset s0, -64
+; LP64E-WITHFP-NEXT:    addi s0, sp, 1992
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa s0, 48
+; LP64E-WITHFP-NEXT:    lui a0, 24414
+; LP64E-WITHFP-NEXT:    addiw a0, a0, -1704
+; LP64E-WITHFP-NEXT:    sub sp, sp, a0
+; LP64E-WITHFP-NEXT:    mv a0, a1
+; LP64E-WITHFP-NEXT:    sd a5, 40(s0)
+; LP64E-WITHFP-NEXT:    sd a4, 32(s0)
+; LP64E-WITHFP-NEXT:    sd a3, 24(s0)
+; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
+; LP64E-WITHFP-NEXT:    sd a1, 8(s0)
+; LP64E-WITHFP-NEXT:    addi a1, s0, 12
+; LP64E-WITHFP-NEXT:    lui a2, 24414
+; LP64E-WITHFP-NEXT:    sub a2, s0, a2
+; LP64E-WITHFP-NEXT:    sd a1, -288(a2)
+; LP64E-WITHFP-NEXT:    lui a1, 24414
+; LP64E-WITHFP-NEXT:    addiw a1, a1, -1704
+; LP64E-WITHFP-NEXT:    add sp, sp, a1
+; LP64E-WITHFP-NEXT:    ld ra, 1984(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    ld s0, 1976(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    addi sp, sp, 2040
+; LP64E-WITHFP-NEXT:    ret
+  %large = alloca [ 100000000 x i8 ]
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va, align 4
+  %1 = load i32, ptr %argp.cur, align 4
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}


        


More information about the llvm-commits mailing list