[llvm] f4bc189 - [RISCV][GISel] Pass the IsFixed flag into CC_RISCV for outgoing arguments.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 5 13:08:26 PST 2023


Author: Craig Topper
Date: 2023-11-05T13:06:23-08:00
New Revision: f4bc18916a0d610441d340c746505e8b9a9e61cc

URL: https://github.com/llvm/llvm-project/commit/f4bc18916a0d610441d340c746505e8b9a9e61cc
DIFF: https://github.com/llvm/llvm-project/commit/f4bc18916a0d610441d340c746505e8b9a9e61cc.diff

LOG: [RISCV][GISel] Pass the IsFixed flag into CC_RISCV for outgoing arguments.

This is needed to make FP values be pased in a GPR as required by
the variadic function ABI.

Added: 
    llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 4b0296f60d13012..da26423644140e7 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -48,7 +48,7 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
     const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
 
     return RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
-                         LocInfo, Flags, State, /*IsFixed=*/true, IsRet,
+                         LocInfo, Flags, State, Info.IsFixed, IsRet,
                          Info.Ty, *Subtarget.getTargetLowering(),
                          /*FirstMaskArgument=*/std::nullopt);
   }

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
new file mode 100644
index 000000000000000..992c6bea4a22ac5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
@@ -0,0 +1,351 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=ILP32 %s
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32D-ILP32 %s
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32f \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32D-ILP32F %s
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32d \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32D-ILP32D %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=LP64 %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64f \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=LP64F %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64d \
+; RUN:     -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=LP64D %s
+
+; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
+; lp64/lp64f/lp64d. Different CHECK lines are required for RV32D due to slight
+; codegen 
diff erences due to the way the f64 load operations are lowered.
+; The nounwind attribute is omitted for some of the tests, to check that CFI
+; directives are correctly generated.
+
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+
+declare void @notdead(ptr)
+
+declare i32 @va1(ptr %fmt, ...)
+
+define void @va1_caller() nounwind {
+  ; ILP32-LABEL: name: va1_caller
+  ; ILP32: bb.1 (%ir-block.0):
+  ; ILP32-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; ILP32-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; ILP32-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; ILP32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; ILP32-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; ILP32-NEXT:   $x12 = COPY [[UV]](s32)
+  ; ILP32-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; ILP32-NEXT:   $x14 = COPY [[C1]](s32)
+  ; ILP32-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+  ; ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; ILP32-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32-LABEL: name: va1_caller
+  ; RV32D-ILP32: bb.1 (%ir-block.0):
+  ; RV32D-ILP32-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32D-ILP32-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32D-ILP32-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32D-ILP32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32D-ILP32-NEXT:   $x12 = COPY [[UV]](s32)
+  ; RV32D-ILP32-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; RV32D-ILP32-NEXT:   $x14 = COPY [[C1]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+  ; RV32D-ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32F-LABEL: name: va1_caller
+  ; RV32D-ILP32F: bb.1 (%ir-block.0):
+  ; RV32D-ILP32F-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32D-ILP32F-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32D-ILP32F-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32D-ILP32F-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32D-ILP32F-NEXT:   $x12 = COPY [[UV]](s32)
+  ; RV32D-ILP32F-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; RV32D-ILP32F-NEXT:   $x14 = COPY [[C1]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+  ; RV32D-ILP32F-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32F-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32D-LABEL: name: va1_caller
+  ; RV32D-ILP32D: bb.1 (%ir-block.0):
+  ; RV32D-ILP32D-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32D-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32D-ILP32D-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32D-ILP32D-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32D-ILP32D-NEXT:   $x12 = COPY [[UV]](s32)
+  ; RV32D-ILP32D-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; RV32D-ILP32D-NEXT:   $x14 = COPY [[C1]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+  ; RV32D-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32D-NEXT:   PseudoRET
+  ;
+  ; LP64-LABEL: name: va1_caller
+  ; LP64: bb.1 (%ir-block.0):
+  ; LP64-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; LP64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; LP64-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; LP64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+  ; LP64-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; LP64-NEXT:   $x11 = COPY [[C]](s64)
+  ; LP64-NEXT:   $x12 = COPY [[ANYEXT]](s64)
+  ; LP64-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; LP64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; LP64-NEXT:   PseudoRET
+  ;
+  ; LP64F-LABEL: name: va1_caller
+  ; LP64F: bb.1 (%ir-block.0):
+  ; LP64F-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; LP64F-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; LP64F-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; LP64F-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+  ; LP64F-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; LP64F-NEXT:   $x11 = COPY [[C]](s64)
+  ; LP64F-NEXT:   $x12 = COPY [[ANYEXT]](s64)
+  ; LP64F-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; LP64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64F-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; LP64F-NEXT:   PseudoRET
+  ;
+  ; LP64D-LABEL: name: va1_caller
+  ; LP64D: bb.1 (%ir-block.0):
+  ; LP64D-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; LP64D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; LP64D-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; LP64D-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+  ; LP64D-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; LP64D-NEXT:   $x11 = COPY [[C]](s64)
+  ; LP64D-NEXT:   $x12 = COPY [[ANYEXT]](s64)
+  ; LP64D-NEXT:   PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; LP64D-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64D-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; LP64D-NEXT:   PseudoRET
+  %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
+  ret void
+}
+
+; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
+; register pair (where the first register is even-numbered).
+
+declare i64 @va2(ptr %fmt, ...) nounwind
+
+define void @va2_caller() nounwind {
+  ; ILP32-LABEL: name: va2_caller
+  ; ILP32: bb.1 (%ir-block.0):
+  ; ILP32-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; ILP32-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; ILP32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; ILP32-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; ILP32-NEXT:   $x12 = COPY [[UV]](s32)
+  ; ILP32-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; ILP32-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+  ; ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; ILP32-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; ILP32-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32-LABEL: name: va2_caller
+  ; RV32D-ILP32: bb.1 (%ir-block.0):
+  ; RV32D-ILP32-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32D-ILP32-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32D-ILP32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32D-ILP32-NEXT:   $x12 = COPY [[UV]](s32)
+  ; RV32D-ILP32-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+  ; RV32D-ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32F-LABEL: name: va2_caller
+  ; RV32D-ILP32F: bb.1 (%ir-block.0):
+  ; RV32D-ILP32F-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32D-ILP32F-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32D-ILP32F-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32D-ILP32F-NEXT:   $x12 = COPY [[UV]](s32)
+  ; RV32D-ILP32F-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+  ; RV32D-ILP32F-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32F-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32F-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32D-LABEL: name: va2_caller
+  ; RV32D-ILP32D: bb.1 (%ir-block.0):
+  ; RV32D-ILP32D-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32D-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32D-ILP32D-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32D-ILP32D-NEXT:   $x12 = COPY [[UV]](s32)
+  ; RV32D-ILP32D-NEXT:   $x13 = COPY [[UV1]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+  ; RV32D-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32D-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32D-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoRET
+  ;
+  ; LP64-LABEL: name: va2_caller
+  ; LP64: bb.1 (%ir-block.0):
+  ; LP64-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; LP64-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; LP64-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; LP64-NEXT:   $x11 = COPY [[C]](s64)
+  ; LP64-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+  ; LP64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64-NEXT:   PseudoRET
+  ;
+  ; LP64F-LABEL: name: va2_caller
+  ; LP64F: bb.1 (%ir-block.0):
+  ; LP64F-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; LP64F-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; LP64F-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; LP64F-NEXT:   $x11 = COPY [[C]](s64)
+  ; LP64F-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+  ; LP64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64F-NEXT:   PseudoRET
+  ;
+  ; LP64D-LABEL: name: va2_caller
+  ; LP64D: bb.1 (%ir-block.0):
+  ; LP64D-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; LP64D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; LP64D-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; LP64D-NEXT:   $x11 = COPY [[C]](s64)
+  ; LP64D-NEXT:   PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+  ; LP64D-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64D-NEXT:   PseudoRET
+ %1 = call i64 (ptr, ...) @va2(ptr undef, double 1.000000e+00)
+ ret void
+}
+
+; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
+; vararg double is passed in a4 and a5 (rather than a3 and a4)
+
+declare i64 @va3(i32 %a, i64 %b, ...) nounwind
+
+define void @va3_caller() nounwind {
+  ; ILP32-LABEL: name: va3_caller
+  ; ILP32: bb.1 (%ir-block.0):
+  ; ILP32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; ILP32-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; ILP32-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; ILP32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+  ; ILP32-NEXT:   [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+  ; ILP32-NEXT:   $x10 = COPY [[C]](s32)
+  ; ILP32-NEXT:   $x11 = COPY [[UV]](s32)
+  ; ILP32-NEXT:   $x12 = COPY [[UV1]](s32)
+  ; ILP32-NEXT:   $x14 = COPY [[UV2]](s32)
+  ; ILP32-NEXT:   $x15 = COPY [[UV3]](s32)
+  ; ILP32-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+  ; ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; ILP32-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; ILP32-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32-LABEL: name: va3_caller
+  ; RV32D-ILP32: bb.1 (%ir-block.0):
+  ; RV32D-ILP32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32D-ILP32-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; RV32D-ILP32-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV32D-ILP32-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+  ; RV32D-ILP32-NEXT:   [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+  ; RV32D-ILP32-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32D-ILP32-NEXT:   $x11 = COPY [[UV]](s32)
+  ; RV32D-ILP32-NEXT:   $x12 = COPY [[UV1]](s32)
+  ; RV32D-ILP32-NEXT:   $x14 = COPY [[UV2]](s32)
+  ; RV32D-ILP32-NEXT:   $x15 = COPY [[UV3]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+  ; RV32D-ILP32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32D-ILP32-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32F-LABEL: name: va3_caller
+  ; RV32D-ILP32F: bb.1 (%ir-block.0):
+  ; RV32D-ILP32F-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32D-ILP32F-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; RV32D-ILP32F-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV32D-ILP32F-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+  ; RV32D-ILP32F-NEXT:   [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+  ; RV32D-ILP32F-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32D-ILP32F-NEXT:   $x11 = COPY [[UV]](s32)
+  ; RV32D-ILP32F-NEXT:   $x12 = COPY [[UV1]](s32)
+  ; RV32D-ILP32F-NEXT:   $x14 = COPY [[UV2]](s32)
+  ; RV32D-ILP32F-NEXT:   $x15 = COPY [[UV3]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+  ; RV32D-ILP32F-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32F-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32F-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32D-ILP32F-NEXT:   PseudoRET
+  ;
+  ; RV32D-ILP32D-LABEL: name: va3_caller
+  ; RV32D-ILP32D: bb.1 (%ir-block.0):
+  ; RV32D-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32D-ILP32D-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; RV32D-ILP32D-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV32D-ILP32D-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+  ; RV32D-ILP32D-NEXT:   [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+  ; RV32D-ILP32D-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32D-ILP32D-NEXT:   $x11 = COPY [[UV]](s32)
+  ; RV32D-ILP32D-NEXT:   $x12 = COPY [[UV1]](s32)
+  ; RV32D-ILP32D-NEXT:   $x14 = COPY [[UV2]](s32)
+  ; RV32D-ILP32D-NEXT:   $x15 = COPY [[UV3]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+  ; RV32D-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32D-ILP32D-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32D-ILP32D-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32D-ILP32D-NEXT:   PseudoRET
+  ;
+  ; LP64-LABEL: name: va3_caller
+  ; LP64: bb.1 (%ir-block.0):
+  ; LP64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; LP64-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; LP64-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; LP64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; LP64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64-NEXT:   $x11 = COPY [[C1]](s64)
+  ; LP64-NEXT:   $x12 = COPY [[C2]](s64)
+  ; LP64-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; LP64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64-NEXT:   PseudoRET
+  ;
+  ; LP64F-LABEL: name: va3_caller
+  ; LP64F: bb.1 (%ir-block.0):
+  ; LP64F-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; LP64F-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; LP64F-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; LP64F-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; LP64F-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64F-NEXT:   $x11 = COPY [[C1]](s64)
+  ; LP64F-NEXT:   $x12 = COPY [[C2]](s64)
+  ; LP64F-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; LP64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64F-NEXT:   PseudoRET
+  ;
+  ; LP64D-LABEL: name: va3_caller
+  ; LP64D: bb.1 (%ir-block.0):
+  ; LP64D-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; LP64D-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
+  ; LP64D-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; LP64D-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; LP64D-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; LP64D-NEXT:   $x11 = COPY [[C1]](s64)
+  ; LP64D-NEXT:   $x12 = COPY [[C2]](s64)
+  ; LP64D-NEXT:   PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; LP64D-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; LP64D-NEXT:   PseudoRET
+ %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, double 2.000000e+00)
+ ret void
+}
+
+declare void @llvm.va_copy(ptr, ptr)


        


More information about the llvm-commits mailing list