[libc-commits] [clang] [libc] [llvm] [libc] Implement (v|f)printf on the GPU (PR #96369)
Joseph Huber via libc-commits
libc-commits at lists.llvm.org
Tue Jun 25 08:19:04 PDT 2024
https://github.com/jhuber6 updated https://github.com/llvm/llvm-project/pull/96369
>From ee06122c86d041b6a3d34b13a6cfd64a575566a3 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Fri, 21 Jun 2024 19:17:42 -0500
Subject: [PATCH 1/3] [LLVM] Fix incorrect alignment on AMDGPU variadics
Summary:
The variadics lowering for AMDGPU puts all the arguments into a void
pointer struct. The current logic dictates that the minimum alignment is
four regardless of what the underlying type is. This is incorrect in
the following case.
```c
void foo(int, ...);
void bar() {
int x;
void *p;
foo(0, x, p);
}
```
Here, because the minimum alignment is 4, we will only increment the
buffer by 4, resulting in an incorrect alignment when we then try to
access the void pointer. We need to set a minimum of 4, but increase it
to 8 in cases like this.
---
clang/lib/CodeGen/Targets/AMDGPU.cpp | 11 +-
clang/test/CodeGen/amdgpu-variadic-call.c | 32 +-
llvm/lib/Transforms/IPO/ExpandVariadics.cpp | 6 +-
.../CodeGen/AMDGPU/expand-variadic-call.ll | 574 +++++++++---------
4 files changed, 316 insertions(+), 307 deletions(-)
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 4d3275e17c386e..a169a7d920456d 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -121,7 +121,7 @@ void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
RValue AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty, AggValueSlot Slot) const {
const bool IsIndirect = false;
- const bool AllowHigherAlign = false;
+ const bool AllowHigherAlign = true;
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(4), AllowHigherAlign, Slot);
@@ -212,13 +212,8 @@ ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
Ty = useFirstFieldIfTransparentUnion(Ty);
- if (Variadic) {
- return ABIArgInfo::getDirect(/*T=*/nullptr,
- /*Offset=*/0,
- /*Padding=*/nullptr,
- /*CanBeFlattened=*/false,
- /*Align=*/0);
- }
+ if (Variadic)
+ return ABIArgInfo::getDirect();
if (isAggregateTypeForABI(Ty)) {
// Records with non-trivial destructors/copy-constructors should not be
diff --git a/clang/test/CodeGen/amdgpu-variadic-call.c b/clang/test/CodeGen/amdgpu-variadic-call.c
index 17eda215211a2a..0529d6b3171c80 100644
--- a/clang/test/CodeGen/amdgpu-variadic-call.c
+++ b/clang/test/CodeGen/amdgpu-variadic-call.c
@@ -1,4 +1,3 @@
-// REQUIRES: amdgpu-registered-target
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
// RUN: %clang_cc1 -cc1 -std=c23 -triple amdgcn-amd-amdhsa -emit-llvm -O1 %s -o - | FileCheck %s
@@ -179,11 +178,9 @@ typedef struct
// CHECK-LABEL: define {{[^@]+}}@one_pair_f64
// CHECK-SAME: (i32 noundef [[F0:%.*]], double noundef [[F1:%.*]], double [[V0_COERCE0:%.*]], double [[V0_COERCE1:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[STRUCT_PAIR_F64:%.*]] poison, double [[V0_COERCE0]], 0
-// CHECK-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue [[STRUCT_PAIR_F64]] [[DOTFCA_0_INSERT]], double [[V0_COERCE1]], 1
-// CHECK-NEXT: tail call void (...) @sink_0([[STRUCT_PAIR_F64]] [[DOTFCA_1_INSERT]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], [[STRUCT_PAIR_F64]] [[DOTFCA_1_INSERT]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], [[STRUCT_PAIR_F64]] [[DOTFCA_1_INSERT]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (...) @sink_0(double [[V0_COERCE0]], double [[V0_COERCE1]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], double [[V0_COERCE0]], double [[V0_COERCE1]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], double [[V0_COERCE0]], double [[V0_COERCE1]]) #[[ATTR2]]
// CHECK-NEXT: ret void
//
void one_pair_f64(int f0, double f1, pair_f64 v0)
@@ -220,10 +217,9 @@ typedef union
// CHECK-SAME: (i32 noundef [[F0:%.*]], double noundef [[F1:%.*]], i32 [[V0_COERCE:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[V0_COERCE]] to float
-// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[UNION_UNION_F32_I32:%.*]] poison, float [[TMP0]], 0
-// CHECK-NEXT: tail call void (...) @sink_0([[UNION_UNION_F32_I32]] [[DOTFCA_0_INSERT]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], [[UNION_UNION_F32_I32]] [[DOTFCA_0_INSERT]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], [[UNION_UNION_F32_I32]] [[DOTFCA_0_INSERT]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (...) @sink_0(float [[TMP0]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], float [[TMP0]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], float [[TMP0]]) #[[ATTR2]]
// CHECK-NEXT: ret void
//
void one_pair_union_f32_i32(int f0, double f1, union_f32_i32 v0)
@@ -242,10 +238,9 @@ typedef union
// CHECK-LABEL: define {{[^@]+}}@one_pair_transparent_union_f32_i32
// CHECK-SAME: (i32 noundef [[F0:%.*]], double noundef [[F1:%.*]], i32 [[V0_COERCE:%.*]]) local_unnamed_addr #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[UNION_TRANSPARENT_UNION_F32_I32:%.*]] poison, i32 [[V0_COERCE]], 0
-// CHECK-NEXT: tail call void (...) @sink_0([[UNION_TRANSPARENT_UNION_F32_I32]] [[DOTFCA_0_INSERT]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], [[UNION_TRANSPARENT_UNION_F32_I32]] [[DOTFCA_0_INSERT]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], [[UNION_TRANSPARENT_UNION_F32_I32]] [[DOTFCA_0_INSERT]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (...) @sink_0(i32 [[V0_COERCE]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], i32 [[V0_COERCE]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], i32 [[V0_COERCE]]) #[[ATTR2]]
// CHECK-NEXT: ret void
//
void one_pair_transparent_union_f32_i32(int f0, double f1, transparent_union_f32_i32 v0)
@@ -277,12 +272,9 @@ void multiple_one(int f0, double f1, int v0, double v1)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[V2_COERCE]] to float
// CHECK-NEXT: [[CONV:%.*]] = fpext float [[V1]] to double
-// CHECK-NEXT: [[DOTFCA_0_INSERT16:%.*]] = insertvalue [[STRUCT_PAIR_F64:%.*]] poison, double [[V0_COERCE0]], 0
-// CHECK-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue [[STRUCT_PAIR_F64]] [[DOTFCA_0_INSERT16]], double [[V0_COERCE1]], 1
-// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[UNION_UNION_F32_I32:%.*]] poison, float [[TMP0]], 0
-// CHECK-NEXT: tail call void (...) @sink_0([[STRUCT_PAIR_F64]] [[DOTFCA_1_INSERT]], double noundef [[CONV]], [[UNION_UNION_F32_I32]] [[DOTFCA_0_INSERT]], i32 noundef [[V3]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], [[STRUCT_PAIR_F64]] [[DOTFCA_1_INSERT]], double noundef [[CONV]], [[UNION_UNION_F32_I32]] [[DOTFCA_0_INSERT]], i32 noundef [[V3]]) #[[ATTR2]]
-// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], [[STRUCT_PAIR_F64]] [[DOTFCA_1_INSERT]], double noundef [[CONV]], [[UNION_UNION_F32_I32]] [[DOTFCA_0_INSERT]], i32 noundef [[V3]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (...) @sink_0(double [[V0_COERCE0]], double [[V0_COERCE1]], double noundef [[CONV]], float [[TMP0]], i32 noundef [[V3]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (i32, ...) @sink_1(i32 noundef [[F0]], double [[V0_COERCE0]], double [[V0_COERCE1]], double noundef [[CONV]], float [[TMP0]], i32 noundef [[V3]]) #[[ATTR2]]
+// CHECK-NEXT: tail call void (double, i32, ...) @sink_2(double noundef [[F1]], i32 noundef [[F0]], double [[V0_COERCE0]], double [[V0_COERCE1]], double noundef [[CONV]], float [[TMP0]], i32 noundef [[V3]]) #[[ATTR2]]
// CHECK-NEXT: ret void
//
void multiple_two(int f0, double f1, pair_f64 v0, float v1, union_f32_i32 v2, int v3)
diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
index d340bc041ccdad..489e13410d3b15 100644
--- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
+++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
@@ -934,7 +934,11 @@ struct Amdgpu final : public VariadicABIInfo {
}
VAArgSlotInfo slotInfo(const DataLayout &DL, Type *Parameter) override {
- return {Align(4), false};
+ const unsigned MinAlign = 1;
+ Align A = DL.getABITypeAlign(Parameter);
+ if (A < MinAlign)
+ A = Align(MinAlign);
+ return {A, false};
}
};
diff --git a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
index ce55558dabaf13..e7450707ac94a0 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
@@ -1,50 +1,48 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: -p --function-signature
-; RUN: opt -S --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s
-; REQUIRES: amdgpu-registered-target
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
-target triple = "amdgcn-amd-amdhsa"
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=amdgcn-- --passes=expand-variadics \
+; RUN: --expand-variadics-override=lowering < %s | FileCheck %s
-; Check the variables are lowered to the locations this target expects
+%struct.libcS = type { i8, i16, i32, i64, float, double }
-; The types show the call frames
+; The types show the call frames.
; CHECK: %single_i32.vararg = type <{ i32 }>
; CHECK: %single_double.vararg = type <{ double }>
; CHECK: %single_v4f32.vararg = type <{ <4 x float> }>
; CHECK: %single_v8f32.vararg = type <{ <8 x float> }>
; CHECK: %single_v16f32.vararg = type <{ <16 x float> }>
; CHECK: %single_v32f32.vararg = type <{ <32 x float> }>
-; CHECK: %i32_double.vararg = type <{ i32, double }>
+; CHECK: %i32_double.vararg = type <{ i32, [4 x i8], double }>
; CHECK: %double_i32.vararg = type <{ double, i32 }>
-; CHECK: %i32_libcS.vararg = type <{ i32, %struct.libcS }>
+; CHECK: %i32_libcS.vararg = type <{ i32, [4 x i8], %struct.libcS }>
+; CHECK: %struct.libcS = type { i8, i16, i32, i64, float, double }
; CHECK: %libcS_i32.vararg = type <{ %struct.libcS, i32 }>
-; CHECK: %i32_v4f32.vararg = type <{ i32, <4 x float> }>
+; CHECK: %i32_v4f32.vararg = type <{ i32, [12 x i8], <4 x float> }>
; CHECK: %v4f32_i32.vararg = type <{ <4 x float>, i32 }>
-; CHECK: %i32_v8f32.vararg = type <{ i32, <8 x float> }>
+; CHECK: %i32_v8f32.vararg = type <{ i32, [28 x i8], <8 x float> }>
; CHECK: %v8f32_i32.vararg = type <{ <8 x float>, i32 }>
-; CHECK: %i32_v16f32.vararg = type <{ i32, <16 x float> }>
+; CHECK: %i32_v16f32.vararg = type <{ i32, [60 x i8], <16 x float> }>
; CHECK: %v16f32_i32.vararg = type <{ <16 x float>, i32 }>
-; CHECK: %i32_v32f32.vararg = type <{ i32, <32 x float> }>
+; CHECK: %i32_v32f32.vararg = type <{ i32, [124 x i8], <32 x float> }>
; CHECK: %v32f32_i32.vararg = type <{ <32 x float>, i32 }>
; CHECK: %fptr_single_i32.vararg = type <{ i32 }>
; CHECK: %fptr_libcS.vararg = type <{ %struct.libcS }>
-%struct.libcS = type { i8, i16, i32, i64, float, double }
-
@vararg_ptr = hidden addrspace(1) global ptr @vararg, align 8
define hidden void @copy(ptr noundef %va) {
-; CHECK-LABEL: define {{[^@]+}}@copy(ptr noundef %va) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %va.addr = alloca ptr, align 8, addrspace(5)
-; CHECK-NEXT: %cp = alloca ptr, align 8, addrspace(5)
-; CHECK-NEXT: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr
-; CHECK-NEXT: %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr
-; CHECK-NEXT: store ptr %va, ptr addrspace(5) %va.addr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp.ascast, ptr %va.addr.ascast, i32 8, i1 false)
-; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %cp, align 8
-; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp)
+; CHECK-LABEL: define hidden void @copy(
+; CHECK-SAME: ptr noundef [[VA:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VA_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: [[CP:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: [[VA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VA_ADDR]] to ptr
+; CHECK-NEXT: [[CP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CP]] to ptr
+; CHECK-NEXT: store ptr [[VA]], ptr addrspace(5) [[VA_ADDR]], align 8
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[CP]])
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[CP_ASCAST]], ptr [[VA_ADDR_ASCAST]], i32 8, i1 false)
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr addrspace(5) [[CP]], align 8
+; CHECK-NEXT: call void @valist(ptr noundef [[TMP0]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[CP]])
; CHECK-NEXT: ret void
;
entry:
@@ -70,15 +68,16 @@ declare hidden void @valist(ptr noundef)
declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture)
define hidden void @start_once(...) {
-; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %s = alloca ptr, align 8, addrspace(5)
-; CHECK-NEXT: %s.ascast = addrspacecast ptr addrspace(5) %s to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s)
-; CHECK-NEXT: store ptr %varargs, ptr %s.ascast, align 8
-; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s, align 8
-; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s)
+; CHECK-LABEL: define hidden void @start_once(
+; CHECK-SAME: ptr [[VARARGS:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[S:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: [[S_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[S]] to ptr
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[S]])
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[S_ASCAST]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr addrspace(5) [[S]], align 8
+; CHECK-NEXT: call void @valist(ptr noundef [[TMP0]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[S]])
; CHECK-NEXT: ret void
;
entry:
@@ -98,22 +97,23 @@ declare void @llvm.va_start.p0(ptr)
declare void @llvm.va_end.p0(ptr)
define hidden void @start_twice(...) {
-; CHECK-LABEL: define {{[^@]+}}@start_twice(ptr %varargs) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %s0 = alloca ptr, align 8, addrspace(5)
-; CHECK-NEXT: %s1 = alloca ptr, align 8, addrspace(5)
-; CHECK-NEXT: %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr
-; CHECK-NEXT: %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1)
-; CHECK-NEXT: store ptr %varargs, ptr %s0.ascast, align 8
-; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s0, align 8
-; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: store ptr %varargs, ptr %s1.ascast, align 8
-; CHECK-NEXT: %1 = load ptr, ptr addrspace(5) %s1, align 8
-; CHECK-NEXT: call void @valist(ptr noundef %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0)
+; CHECK-LABEL: define hidden void @start_twice(
+; CHECK-SAME: ptr [[VARARGS:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[S0:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: [[S1:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT: [[S0_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[S0]] to ptr
+; CHECK-NEXT: [[S1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[S1]] to ptr
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[S0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[S1]])
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[S0_ASCAST]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr addrspace(5) [[S0]], align 8
+; CHECK-NEXT: call void @valist(ptr noundef [[TMP0]])
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[S1_ASCAST]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr addrspace(5) [[S1]], align 8
+; CHECK-NEXT: call void @valist(ptr noundef [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[S0]])
; CHECK-NEXT: ret void
;
entry:
@@ -137,15 +137,16 @@ entry:
}
define hidden void @single_i32(i32 noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @single_i32(
+; CHECK-SAME: i32 noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[SINGLE_I32_VARARG:%.*]], align 4, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[SINGLE_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -156,15 +157,16 @@ entry:
declare hidden void @vararg(...)
define hidden void @single_double(double noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %single_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8
-; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @single_double(
+; CHECK-SAME: double noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[SINGLE_DOUBLE_VARARG:%.*]], align 8, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[SINGLE_DOUBLE_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store double [[X]], ptr addrspace(5) [[TMP0]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -173,15 +175,16 @@ entry:
}
define hidden void @single_v4f32(<4 x float> noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %single_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16
-; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @single_v4f32(
+; CHECK-SAME: <4 x float> noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[SINGLE_V4F32_VARARG:%.*]], align 16, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[SINGLE_V4F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <4 x float> [[X]], ptr addrspace(5) [[TMP0]], align 16
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -190,15 +193,16 @@ entry:
}
define hidden void @single_v8f32(<8 x float> noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %single_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32
-; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @single_v8f32(
+; CHECK-SAME: <8 x float> noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[SINGLE_V8F32_VARARG:%.*]], align 32, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[SINGLE_V8F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <8 x float> [[X]], ptr addrspace(5) [[TMP0]], align 32
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -207,15 +211,16 @@ entry:
}
define hidden void @single_v16f32(<16 x float> noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %single_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64
-; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @single_v16f32(
+; CHECK-SAME: <16 x float> noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[SINGLE_V16F32_VARARG:%.*]], align 64, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[SINGLE_V16F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <16 x float> [[X]], ptr addrspace(5) [[TMP0]], align 64
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -224,15 +229,16 @@ entry:
}
define hidden void @single_v32f32(<32 x float> noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %single_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128
-; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @single_v32f32(
+; CHECK-SAME: <32 x float> noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[SINGLE_V32F32_VARARG:%.*]], align 128, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[SINGLE_V32F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <32 x float> [[X]], ptr addrspace(5) [[TMP0]], align 128
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -241,17 +247,18 @@ entry:
}
define hidden void @i32_double(i32 noundef %x, double noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = getelementptr inbounds %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store double %y, ptr addrspace(5) %1, align 8
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @i32_double(
+; CHECK-SAME: i32 noundef [[X:%.*]], double noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_DOUBLE_VARARG:%.*]], align 8, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[I32_DOUBLE_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[I32_DOUBLE_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store double [[Y]], ptr addrspace(5) [[TMP1]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -260,17 +267,18 @@ entry:
}
define hidden void @double_i32(double noundef %x, i32 noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8
-; CHECK-NEXT: %1 = getelementptr inbounds %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @double_i32(
+; CHECK-SAME: double noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[DOUBLE_I32_VARARG:%.*]], align 8, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[DOUBLE_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store double [[X]], ptr addrspace(5) [[TMP0]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[DOUBLE_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -279,23 +287,24 @@ entry:
}
define hidden void @i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32 %y.coerce2, i64 %y.coerce3, float %y.coerce4, double %y.coerce5) {
-; CHECK-LABEL: define {{[^@]+}}@i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32 %y.coerce2, i64 %y.coerce3, float %y.coerce4, double %y.coerce5) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %i32_libcS.vararg, align 4, addrspace(5)
-; CHECK-NEXT: %.fca.0.insert = insertvalue %struct.libcS poison, i8 %y.coerce0, 0
-; CHECK-NEXT: %.fca.1.insert = insertvalue %struct.libcS %.fca.0.insert, i16 %y.coerce1, 1
-; CHECK-NEXT: %.fca.2.insert = insertvalue %struct.libcS %.fca.1.insert, i32 %y.coerce2, 2
-; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %y.coerce3, 3
-; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %y.coerce4, 4
-; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %y.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = getelementptr inbounds %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @i32_libcS(
+; CHECK-SAME: i32 noundef [[X:%.*]], i8 [[Y_COERCE0:%.*]], i16 [[Y_COERCE1:%.*]], i32 [[Y_COERCE2:%.*]], i64 [[Y_COERCE3:%.*]], float [[Y_COERCE4:%.*]], double [[Y_COERCE5:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_VARARG:%.*]], align 8, addrspace(5)
+; CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS:%.*]] poison, i8 [[Y_COERCE0]], 0
+; CHECK-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_0_INSERT]], i16 [[Y_COERCE1]], 1
+; CHECK-NEXT: [[DOTFCA_2_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_1_INSERT]], i32 [[Y_COERCE2]], 2
+; CHECK-NEXT: [[DOTFCA_3_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_2_INSERT]], i64 [[Y_COERCE3]], 3
+; CHECK-NEXT: [[DOTFCA_4_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_3_INSERT]], float [[Y_COERCE4]], 4
+; CHECK-NEXT: [[DOTFCA_5_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_4_INSERT]], double [[Y_COERCE5]], 5
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 40, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[I32_LIBCS_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[I32_LIBCS_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store [[STRUCT_LIBCS]] [[DOTFCA_5_INSERT]], ptr addrspace(5) [[TMP1]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 40, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -310,23 +319,24 @@ entry:
}
define hidden void @libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 %x.coerce3, float %x.coerce4, double %x.coerce5, i32 noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 %x.coerce3, float %x.coerce4, double %x.coerce5, i32 noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %libcS_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: %.fca.0.insert = insertvalue %struct.libcS poison, i8 %x.coerce0, 0
-; CHECK-NEXT: %.fca.1.insert = insertvalue %struct.libcS %.fca.0.insert, i16 %x.coerce1, 1
-; CHECK-NEXT: %.fca.2.insert = insertvalue %struct.libcS %.fca.1.insert, i32 %x.coerce2, 2
-; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3
-; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4
-; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %0, align 8
-; CHECK-NEXT: %1 = getelementptr inbounds %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @libcS_i32(
+; CHECK-SAME: i8 [[X_COERCE0:%.*]], i16 [[X_COERCE1:%.*]], i32 [[X_COERCE2:%.*]], i64 [[X_COERCE3:%.*]], float [[X_COERCE4:%.*]], double [[X_COERCE5:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_VARARG:%.*]], align 8, addrspace(5)
+; CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS:%.*]] poison, i8 [[X_COERCE0]], 0
+; CHECK-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_0_INSERT]], i16 [[X_COERCE1]], 1
+; CHECK-NEXT: [[DOTFCA_2_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_1_INSERT]], i32 [[X_COERCE2]], 2
+; CHECK-NEXT: [[DOTFCA_3_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_2_INSERT]], i64 [[X_COERCE3]], 3
+; CHECK-NEXT: [[DOTFCA_4_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_3_INSERT]], float [[X_COERCE4]], 4
+; CHECK-NEXT: [[DOTFCA_5_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_4_INSERT]], double [[X_COERCE5]], 5
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[LIBCS_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store [[STRUCT_LIBCS]] [[DOTFCA_5_INSERT]], ptr addrspace(5) [[TMP0]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[LIBCS_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -341,17 +351,18 @@ entry:
}
define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = getelementptr inbounds %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store <4 x float> %y, ptr addrspace(5) %1, align 16
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @i32_v4f32(
+; CHECK-SAME: i32 noundef [[X:%.*]], <4 x float> noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_V4F32_VARARG:%.*]], align 16, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[I32_V4F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[I32_V4F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store <4 x float> [[Y]], ptr addrspace(5) [[TMP1]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -360,17 +371,18 @@ entry:
}
define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16
-; CHECK-NEXT: %1 = getelementptr inbounds %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @v4f32_i32(
+; CHECK-SAME: <4 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[V4F32_I32_VARARG:%.*]], align 16, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[V4F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <4 x float> [[X]], ptr addrspace(5) [[TMP0]], align 16
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[V4F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -379,17 +391,18 @@ entry:
}
define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = getelementptr inbounds %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store <8 x float> %y, ptr addrspace(5) %1, align 32
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @i32_v8f32(
+; CHECK-SAME: i32 noundef [[X:%.*]], <8 x float> noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_V8F32_VARARG:%.*]], align 32, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[I32_V8F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[I32_V8F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store <8 x float> [[Y]], ptr addrspace(5) [[TMP1]], align 32
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -398,17 +411,18 @@ entry:
}
define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32
-; CHECK-NEXT: %1 = getelementptr inbounds %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @v8f32_i32(
+; CHECK-SAME: <8 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[V8F32_I32_VARARG:%.*]], align 32, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[V8F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <8 x float> [[X]], ptr addrspace(5) [[TMP0]], align 32
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[V8F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -417,17 +431,18 @@ entry:
}
define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = getelementptr inbounds %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store <16 x float> %y, ptr addrspace(5) %1, align 64
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @i32_v16f32(
+; CHECK-SAME: i32 noundef [[X:%.*]], <16 x float> noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_V16F32_VARARG:%.*]], align 64, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[I32_V16F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[I32_V16F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store <16 x float> [[Y]], ptr addrspace(5) [[TMP1]], align 64
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -436,17 +451,18 @@ entry:
}
define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64
-; CHECK-NEXT: %1 = getelementptr inbounds %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @v16f32_i32(
+; CHECK-SAME: <16 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[V16F32_I32_VARARG:%.*]], align 64, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[V16F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <16 x float> [[X]], ptr addrspace(5) [[TMP0]], align 64
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[V16F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -455,17 +471,18 @@ entry:
}
define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
-; CHECK-NEXT: %1 = getelementptr inbounds %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store <32 x float> %y, ptr addrspace(5) %1, align 128
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @i32_v32f32(
+; CHECK-SAME: i32 noundef [[X:%.*]], <32 x float> noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_V32F32_VARARG:%.*]], align 128, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 256, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[I32_V32F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[I32_V32F32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store <32 x float> [[Y]], ptr addrspace(5) [[TMP1]], align 128
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 256, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -474,17 +491,18 @@ entry:
}
define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
-; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %0 = getelementptr inbounds %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128
-; CHECK-NEXT: %1 = getelementptr inbounds %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
-; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @v32f32_i32(
+; CHECK-SAME: <32 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[V32F32_I32_VARARG:%.*]], align 128, addrspace(5)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[V32F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <32 x float> [[X]], ptr addrspace(5) [[TMP0]], align 128
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[V32F32_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void @vararg(ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -493,16 +511,17 @@ entry:
}
define hidden void @fptr_single_i32(i32 noundef %x) {
-; CHECK-LABEL: define {{[^@]+}}@fptr_single_i32(i32 noundef %x) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %1 = getelementptr inbounds %fptr_single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store i32 %x, ptr addrspace(5) %1, align 4
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void %0(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @fptr_single_i32(
+; CHECK-SAME: i32 noundef [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_SINGLE_I32_VARARG:%.*]], align 4, addrspace(5)
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[FPTR_SINGLE_I32_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[X]], ptr addrspace(5) [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void [[TMP0]](ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -512,22 +531,23 @@ entry:
}
define hidden void @fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 %x.coerce3, float %x.coerce4, double %x.coerce5) {
-; CHECK-LABEL: define {{[^@]+}}@fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 %x.coerce3, float %x.coerce4, double %x.coerce5) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %vararg_buffer = alloca %fptr_libcS.vararg, align 4, addrspace(5)
-; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8
-; CHECK-NEXT: %.fca.0.insert = insertvalue %struct.libcS poison, i8 %x.coerce0, 0
-; CHECK-NEXT: %.fca.1.insert = insertvalue %struct.libcS %.fca.0.insert, i16 %x.coerce1, 1
-; CHECK-NEXT: %.fca.2.insert = insertvalue %struct.libcS %.fca.1.insert, i32 %x.coerce2, 2
-; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3
-; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4
-; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer)
-; CHECK-NEXT: %1 = getelementptr inbounds %fptr_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
-; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8
-; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
-; CHECK-NEXT: call void %0(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-LABEL: define hidden void @fptr_libcS(
+; CHECK-SAME: i8 [[X_COERCE0:%.*]], i16 [[X_COERCE1:%.*]], i32 [[X_COERCE2:%.*]], i64 [[X_COERCE3:%.*]], float [[X_COERCE4:%.*]], double [[X_COERCE5:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_LIBCS_VARARG:%.*]], align 8, addrspace(5)
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8
+; CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS:%.*]] poison, i8 [[X_COERCE0]], 0
+; CHECK-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_0_INSERT]], i16 [[X_COERCE1]], 1
+; CHECK-NEXT: [[DOTFCA_2_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_1_INSERT]], i32 [[X_COERCE2]], 2
+; CHECK-NEXT: [[DOTFCA_3_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_2_INSERT]], i64 [[X_COERCE3]], 3
+; CHECK-NEXT: [[DOTFCA_4_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_3_INSERT]], float [[X_COERCE4]], 4
+; CHECK-NEXT: [[DOTFCA_5_INSERT:%.*]] = insertvalue [[STRUCT_LIBCS]] [[DOTFCA_4_INSERT]], double [[X_COERCE5]], 5
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[FPTR_LIBCS_VARARG]], ptr addrspace(5) [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store [[STRUCT_LIBCS]] [[DOTFCA_5_INSERT]], ptr addrspace(5) [[TMP1]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(5) [[VARARG_BUFFER]] to ptr
+; CHECK-NEXT: call void [[TMP0]](ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
@@ -541,5 +561,3 @@ entry:
tail call void (...) %0(%struct.libcS %.fca.5.insert)
ret void
}
-
-
>From 25c2bad135e58849d552ad84d621b7f11bd0249c Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Mon, 17 Jun 2024 15:32:31 -0500
Subject: [PATCH 2/3] [NVPTX] Implement variadic functions using IR lowering
Summary:
This patch implements support for variadic functions for NVPTX targets.
The implementation here mainly follows what was done to implement it for
AMDGPU in https://github.com/llvm/llvm-project/pull/93362.
We change the NVPTX codegen to lower all variadic arguments to functions
by-value. This creates a flattened set of arguments that the IR lowering
pass converts into a struct with the proper alignment.
The behavior of this function was determined by iteratively checking
what the NVCC copmiler generates for its output. See examples like
https://godbolt.org/z/KavfTGY93. I have noted the main methods that
NVIDIA uses to lower variadic functions.
1. All arguments are passed in a pointer to aggregate.
2. The minimum alignment for a plain argument is 4 bytes.
3. Alignment is dictated by the underlying type
4. Structs are flattened and do not have their alignment changed.
5. NVPTX never passes any arguments indirectly, even very large ones.
This patch passes the tests in the `libc` project currently, including
support for `sprintf`.
---
clang/lib/Basic/Targets/NVPTX.h | 3 +-
clang/lib/CodeGen/Targets/NVPTX.cpp | 11 +-
clang/test/CodeGen/variadic-nvptx.c | 77 ++++
libc/test/src/__support/CMakeLists.txt | 21 +-
llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 2 +
llvm/lib/Transforms/IPO/ExpandVariadics.cpp | 43 +-
llvm/test/CodeGen/NVPTX/variadics-backend.ll | 427 ++++++++++++++++++
llvm/test/CodeGen/NVPTX/variadics-lowering.ll | 348 ++++++++++++++
8 files changed, 912 insertions(+), 20 deletions(-)
create mode 100644 clang/test/CodeGen/variadic-nvptx.c
create mode 100644 llvm/test/CodeGen/NVPTX/variadics-backend.ll
create mode 100644 llvm/test/CodeGen/NVPTX/variadics-lowering.ll
diff --git a/clang/lib/Basic/Targets/NVPTX.h b/clang/lib/Basic/Targets/NVPTX.h
index f476d49047c013..e30eaf808ca939 100644
--- a/clang/lib/Basic/Targets/NVPTX.h
+++ b/clang/lib/Basic/Targets/NVPTX.h
@@ -116,8 +116,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo {
}
BuiltinVaListKind getBuiltinVaListKind() const override {
- // FIXME: implement
- return TargetInfo::CharPtrBuiltinVaList;
+ return TargetInfo::VoidPtrBuiltinVaList;
}
bool isValidCPUName(StringRef Name) const override {
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 423485c9ca16e8..01a0b07856103a 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -203,8 +203,12 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ unsigned ArgumentsCount = 0;
for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
+ I.info = ArgumentsCount++ < FI.getNumRequiredArgs()
+ ? classifyArgumentType(I.type)
+ : ABIArgInfo::getDirect();
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -215,7 +219,10 @@ void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
RValue NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty, AggValueSlot Slot) const {
- llvm_unreachable("NVPTX does not support varargs");
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign=*/true, Slot);
}
void NVPTXTargetCodeGenInfo::setTargetAttributes(
diff --git a/clang/test/CodeGen/variadic-nvptx.c b/clang/test/CodeGen/variadic-nvptx.c
new file mode 100644
index 00000000000000..f2f0768ae31ee7
--- /dev/null
+++ b/clang/test/CodeGen/variadic-nvptx.c
@@ -0,0 +1,77 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -emit-llvm -o - %s | FileCheck %s
+
+extern void varargs_simple(int, ...);
+
+// CHECK-LABEL: define dso_local void @foo(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[C:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[S:%.*]] = alloca i16, align 2
+// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[L:%.*]] = alloca i64, align 8
+// CHECK-NEXT: [[F:%.*]] = alloca float, align 4
+// CHECK-NEXT: [[D:%.*]] = alloca double, align 8
+// CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
+// CHECK-NEXT: [[V:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT: store i8 1, ptr [[C]], align 1
+// CHECK-NEXT: store i16 1, ptr [[S]], align 2
+// CHECK-NEXT: store i32 1, ptr [[I]], align 4
+// CHECK-NEXT: store i64 1, ptr [[L]], align 8
+// CHECK-NEXT: store float 1.000000e+00, ptr [[F]], align 4
+// CHECK-NEXT: store double 1.000000e+00, ptr [[D]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[C]], align 1
+// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[S]], align 2
+// CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[L]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F]], align 4
+// CHECK-NEXT: [[CONV2:%.*]] = fpext float [[TMP4]] to double
+// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[D]], align 8
+// CHECK-NEXT: call void (i32, ...) @varargs_simple(i32 noundef 0, i32 noundef [[CONV]], i32 noundef [[CONV1]], i32 noundef [[TMP2]], i64 noundef [[TMP3]], double noundef [[CONV2]], double noundef [[TMP5]])
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[A]], ptr align 4 @__const.foo.a, i64 12, i1 false)
+// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[A]], i32 0, i32 0
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[A]], i32 0, i32 1
+// CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[A]], i32 0, i32 2
+// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
+// CHECK-NEXT: call void (i32, ...) @varargs_simple(i32 noundef 0, i32 [[TMP7]], i8 [[TMP9]], i32 [[TMP11]])
+// CHECK-NEXT: store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr [[V]], align 16
+// CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr [[V]], align 16
+// CHECK-NEXT: call void (i32, ...) @varargs_simple(i32 noundef 0, <4 x i32> noundef [[TMP12]])
+// CHECK-NEXT: ret void
+//
+void foo() {
+ char c = '\x1';
+ short s = 1;
+ int i = 1;
+ long l = 1;
+ float f = 1.f;
+ double d = 1.;
+ varargs_simple(0, c, s, i, l, f, d);
+
+ struct {int x; char c; int y;} a = {1, '\x1', 1};
+ varargs_simple(0, a);
+
+ typedef int __attribute__((ext_vector_type(4))) int4;
+ int4 v = {1, 1, 1, 1};
+ varargs_simple(0, v);
+}
+
+typedef struct {long x; long y;} S;
+extern void varargs_complex(S, S, ...);
+
+// CHECK-LABEL: define dso_local void @bar(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.bar.s, i64 16, i1 false)
+// CHECK-NEXT: call void (ptr, ptr, ...) @varargs_complex(ptr noundef byval([[STRUCT_S]]) align 8 [[S]], ptr noundef byval([[STRUCT_S]]) align 8 [[S]], i32 noundef 1, i64 noundef 1, double noundef 1.000000e+00)
+// CHECK-NEXT: ret void
+//
+void bar() {
+ S s = {1l, 1l};
+ varargs_complex(s, s, 1, 1l, 1.0);
+}
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index ce8413fed71721..c84ea86f5c4007 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -130,18 +130,15 @@ add_libc_test(
libc.src.__support.uint128
)
-# NVPTX does not support varargs currently.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
- add_libc_test(
- arg_list_test
- SUITE
- libc-support-tests
- SRCS
- arg_list_test.cpp
- DEPENDS
- libc.src.__support.arg_list
- )
-endif()
+add_libc_test(
+ arg_list_test
+ SUITE
+ libc-support-tests
+ SRCS
+ arg_list_test.cpp
+ DEPENDS
+ libc.src.__support.arg_list
+)
if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_libc_test(
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 152f200b9d0f36..097e29527eed9f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -33,6 +33,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
+#include "llvm/Transforms/IPO/ExpandVariadics.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h"
@@ -342,6 +343,7 @@ void NVPTXPassConfig::addIRPasses() {
}
addPass(createAtomicExpandLegacyPass());
+ addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering));
addPass(createNVPTXCtorDtorLoweringLegacyPass());
// === LSR and other generic IR passes ===
diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
index 489e13410d3b15..e8b479162c77ca 100644
--- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
+++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
@@ -456,8 +456,8 @@ bool ExpandVariadics::runOnFunction(Module &M, IRBuilder<> &Builder,
// Replace known calls to the variadic with calls to the va_list equivalent
for (User *U : make_early_inc_range(VariadicWrapper->users())) {
if (CallBase *CB = dyn_cast<CallBase>(U)) {
- Value *calledOperand = CB->getCalledOperand();
- if (VariadicWrapper == calledOperand)
+ Value *CalledOperand = CB->getCalledOperand();
+ if (VariadicWrapper == CalledOperand)
Changed |=
expandCall(M, Builder, CB, VariadicWrapper->getFunctionType(),
FixedArityReplacement);
@@ -942,6 +942,36 @@ struct Amdgpu final : public VariadicABIInfo {
}
};
+struct NVPTX final : public VariadicABIInfo {
+
+ bool enableForTarget() override { return true; }
+
+ bool vaListPassedInSSARegister() override { return true; }
+
+ Type *vaListType(LLVMContext &Ctx) override {
+ return PointerType::getUnqual(Ctx);
+ }
+
+ Type *vaListParameterType(Module &M) override {
+ return PointerType::getUnqual(M.getContext());
+ }
+
+ Value *initializeVaList(Module &M, LLVMContext &Ctx, IRBuilder<> &Builder,
+ AllocaInst *, Value *Buffer) override {
+ return Builder.CreateAddrSpaceCast(Buffer, vaListParameterType(M));
+ }
+
+ VAArgSlotInfo slotInfo(const DataLayout &DL, Type *Parameter) override {
+ // NVPTX expects natural alignment in all cases. The variadic call ABI will
+ // handle promoting types to their appropriate size and alignment.
+ const unsigned MinAlign = 1;
+ Align A = DL.getABITypeAlign(Parameter);
+ if (A < MinAlign)
+ A = Align(MinAlign);
+ return {A, false};
+ }
+};
+
struct Wasm final : public VariadicABIInfo {
bool enableForTarget() override {
@@ -971,8 +1001,8 @@ struct Wasm final : public VariadicABIInfo {
if (A < MinAlign)
A = Align(MinAlign);
- if (auto s = dyn_cast<StructType>(Parameter)) {
- if (s->getNumElements() > 1) {
+ if (auto *S = dyn_cast<StructType>(Parameter)) {
+ if (S->getNumElements() > 1) {
return {DL.getABITypeAlign(PointerType::getUnqual(Ctx)), true};
}
}
@@ -992,6 +1022,11 @@ std::unique_ptr<VariadicABIInfo> VariadicABIInfo::create(const Triple &T) {
return std::make_unique<Wasm>();
}
+ case Triple::nvptx:
+ case Triple::nvptx64: {
+ return std::make_unique<NVPTX>();
+ }
+
default:
return {};
}
diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
new file mode 100644
index 00000000000000..0e0c89d3e0214f
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
@@ -0,0 +1,427 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64-- -march=nvptx64 -mcpu=sm_52 -mattr=+ptx64 < %s | FileCheck %s --check-prefix=CHECK-PTX
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64-- -march=nvptx64 -mcpu=sm_52 -mattr=+ptx64 | %ptxas-verify %}
+
+%struct.S1 = type { i32, i8, i64 }
+%struct.S2 = type { i64, i64 }
+
+ at __const.bar.s1 = private unnamed_addr constant %struct.S1 { i32 1, i8 1, i64 1 }, align 8
+ at __const.qux.s = private unnamed_addr constant %struct.S2 { i64 1, i64 1 }, align 8
+
+define dso_local i32 @variadics1(i32 noundef %first, ...) {
+; CHECK-PTX-LABEL: variadics1(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .reg .b32 %r<11>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<11>;
+; CHECK-PTX-NEXT: .reg .f64 %fd<7>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: ld.param.u32 %r1, [variadics1_param_0];
+; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics1_param_1];
+; CHECK-PTX-NEXT: ld.u32 %r2, [%rd1];
+; CHECK-PTX-NEXT: add.s32 %r3, %r1, %r2;
+; CHECK-PTX-NEXT: ld.u32 %r4, [%rd1+4];
+; CHECK-PTX-NEXT: add.s32 %r5, %r3, %r4;
+; CHECK-PTX-NEXT: ld.u32 %r6, [%rd1+8];
+; CHECK-PTX-NEXT: add.s32 %r7, %r5, %r6;
+; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 19;
+; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
+; CHECK-PTX-NEXT: ld.u64 %rd4, [%rd3];
+; CHECK-PTX-NEXT: cvt.u64.u32 %rd5, %r7;
+; CHECK-PTX-NEXT: add.s64 %rd6, %rd5, %rd4;
+; CHECK-PTX-NEXT: cvt.u32.u64 %r8, %rd6;
+; CHECK-PTX-NEXT: add.s64 %rd7, %rd3, 15;
+; CHECK-PTX-NEXT: and.b64 %rd8, %rd7, -8;
+; CHECK-PTX-NEXT: ld.f64 %fd1, [%rd8];
+; CHECK-PTX-NEXT: cvt.rn.f64.s32 %fd2, %r8;
+; CHECK-PTX-NEXT: add.rn.f64 %fd3, %fd2, %fd1;
+; CHECK-PTX-NEXT: cvt.rzi.s32.f64 %r9, %fd3;
+; CHECK-PTX-NEXT: add.s64 %rd9, %rd8, 15;
+; CHECK-PTX-NEXT: and.b64 %rd10, %rd9, -8;
+; CHECK-PTX-NEXT: ld.f64 %fd4, [%rd10];
+; CHECK-PTX-NEXT: cvt.rn.f64.s32 %fd5, %r9;
+; CHECK-PTX-NEXT: add.rn.f64 %fd6, %fd5, %fd4;
+; CHECK-PTX-NEXT: cvt.rzi.s32.f64 %r10, %fd6;
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r10;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %vlist = alloca ptr, align 8
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur, i64 4
+ store ptr %argp.next, ptr %vlist, align 8
+ %0 = load i32, ptr %argp.cur, align 4
+ %add = add nsw i32 %first, %0
+ %argp.cur1 = load ptr, ptr %vlist, align 8
+ %argp.next2 = getelementptr inbounds i8, ptr %argp.cur1, i64 4
+ store ptr %argp.next2, ptr %vlist, align 8
+ %1 = load i32, ptr %argp.cur1, align 4
+ %add3 = add nsw i32 %add, %1
+ %argp.cur4 = load ptr, ptr %vlist, align 8
+ %argp.next5 = getelementptr inbounds i8, ptr %argp.cur4, i64 4
+ store ptr %argp.next5, ptr %vlist, align 8
+ %2 = load i32, ptr %argp.cur4, align 4
+ %add6 = add nsw i32 %add3, %2
+ %argp.cur7 = load ptr, ptr %vlist, align 8
+ %3 = getelementptr inbounds i8, ptr %argp.cur7, i32 7
+ %argp.cur7.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %3, i64 -8)
+ %argp.next8 = getelementptr inbounds i8, ptr %argp.cur7.aligned, i64 8
+ store ptr %argp.next8, ptr %vlist, align 8
+ %4 = load i64, ptr %argp.cur7.aligned, align 8
+ %conv = sext i32 %add6 to i64
+ %add9 = add nsw i64 %conv, %4
+ %conv10 = trunc i64 %add9 to i32
+ %argp.cur11 = load ptr, ptr %vlist, align 8
+ %5 = getelementptr inbounds i8, ptr %argp.cur11, i32 7
+ %argp.cur11.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %5, i64 -8)
+ %argp.next12 = getelementptr inbounds i8, ptr %argp.cur11.aligned, i64 8
+ store ptr %argp.next12, ptr %vlist, align 8
+ %6 = load double, ptr %argp.cur11.aligned, align 8
+ %conv13 = sitofp i32 %conv10 to double
+ %add14 = fadd double %conv13, %6
+ %conv15 = fptosi double %add14 to i32
+ %argp.cur16 = load ptr, ptr %vlist, align 8
+ %7 = getelementptr inbounds i8, ptr %argp.cur16, i32 7
+ %argp.cur16.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %7, i64 -8)
+ %argp.next17 = getelementptr inbounds i8, ptr %argp.cur16.aligned, i64 8
+ store ptr %argp.next17, ptr %vlist, align 8
+ %8 = load double, ptr %argp.cur16.aligned, align 8
+ %conv18 = sitofp i32 %conv15 to double
+ %add19 = fadd double %conv18, %8
+ %conv20 = fptosi double %add19 to i32
+ call void @llvm.va_end.p0(ptr %vlist)
+ ret i32 %conv20
+}
+
+declare void @llvm.va_start.p0(ptr)
+
+declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
+
+declare void @llvm.va_end.p0(ptr)
+
+define dso_local i32 @foo() {
+; CHECK-PTX-LABEL: foo(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot1[40];
+; CHECK-PTX-NEXT: .reg .b64 %SP;
+; CHECK-PTX-NEXT: .reg .b64 %SPL;
+; CHECK-PTX-NEXT: .reg .b32 %r<4>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<5>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot1;
+; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-PTX-NEXT: mov.u64 %rd1, 4294967297;
+; CHECK-PTX-NEXT: st.u64 [%SP+0], %rd1;
+; CHECK-PTX-NEXT: mov.b32 %r1, 1;
+; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1;
+; CHECK-PTX-NEXT: mov.u64 %rd2, 1;
+; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd2;
+; CHECK-PTX-NEXT: mov.u64 %rd3, 4607182418800017408;
+; CHECK-PTX-NEXT: st.u64 [%SP+24], %rd3;
+; CHECK-PTX-NEXT: st.u64 [%SP+32], %rd3;
+; CHECK-PTX-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-PTX-NEXT: { // callseq 0, 0
+; CHECK-PTX-NEXT: .param .b32 param0;
+; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1;
+; CHECK-PTX-NEXT: .param .b64 param1;
+; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd4;
+; CHECK-PTX-NEXT: .param .b32 retval0;
+; CHECK-PTX-NEXT: call.uni (retval0),
+; CHECK-PTX-NEXT: variadics1,
+; CHECK-PTX-NEXT: (
+; CHECK-PTX-NEXT: param0,
+; CHECK-PTX-NEXT: param1
+; CHECK-PTX-NEXT: );
+; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0];
+; CHECK-PTX-NEXT: } // callseq 0
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %conv = sext i8 1 to i32
+ %conv1 = sext i16 1 to i32
+ %conv2 = fpext float 1.000000e+00 to double
+ %call = call i32 (i32, ...) @variadics1(i32 noundef 1, i32 noundef %conv, i32 noundef %conv1, i32 noundef 1, i64 noundef 1, double noundef %conv2, double noundef 1.000000e+00)
+ ret i32 %call
+}
+
+define dso_local i32 @variadics2(i32 noundef %first, ...) {
+; CHECK-PTX-LABEL: variadics2(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .local .align 2 .b8 __local_depot2[4];
+; CHECK-PTX-NEXT: .reg .b64 %SP;
+; CHECK-PTX-NEXT: .reg .b64 %SPL;
+; CHECK-PTX-NEXT: .reg .b16 %rs<6>;
+; CHECK-PTX-NEXT: .reg .b32 %r<7>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<11>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot2;
+; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-PTX-NEXT: ld.param.u32 %r1, [variadics2_param_0];
+; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics2_param_1];
+; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
+; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
+; CHECK-PTX-NEXT: ld.u32 %r2, [%rd3];
+; CHECK-PTX-NEXT: or.b64 %rd4, %rd3, 4;
+; CHECK-PTX-NEXT: ld.s8 %r3, [%rd4];
+; CHECK-PTX-NEXT: or.b64 %rd5, %rd3, 5;
+; CHECK-PTX-NEXT: or.b64 %rd6, %rd3, 7;
+; CHECK-PTX-NEXT: ld.u8 %rs1, [%rd6];
+; CHECK-PTX-NEXT: st.u8 [%SP+2], %rs1;
+; CHECK-PTX-NEXT: ld.u8 %rs2, [%rd5];
+; CHECK-PTX-NEXT: or.b64 %rd7, %rd3, 6;
+; CHECK-PTX-NEXT: ld.u8 %rs3, [%rd7];
+; CHECK-PTX-NEXT: shl.b16 %rs4, %rs3, 8;
+; CHECK-PTX-NEXT: or.b16 %rs5, %rs4, %rs2;
+; CHECK-PTX-NEXT: st.u16 [%SP+0], %rs5;
+; CHECK-PTX-NEXT: ld.u64 %rd8, [%rd3+8];
+; CHECK-PTX-NEXT: add.s32 %r4, %r1, %r2;
+; CHECK-PTX-NEXT: add.s32 %r5, %r4, %r3;
+; CHECK-PTX-NEXT: cvt.u64.u32 %rd9, %r5;
+; CHECK-PTX-NEXT: add.s64 %rd10, %rd9, %rd8;
+; CHECK-PTX-NEXT: cvt.u32.u64 %r6, %rd10;
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r6;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %vlist = alloca ptr, align 8
+ %s1.sroa.3 = alloca [3 x i8], align 1
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
+ %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
+ store ptr %argp.next, ptr %vlist, align 8
+ %s1.sroa.0.0.copyload = load i32, ptr %argp.cur.aligned, align 8
+ %s1.sroa.2.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 4
+ %s1.sroa.2.0.copyload = load i8, ptr %s1.sroa.2.0.argp.cur.aligned.sroa_idx, align 4
+ %s1.sroa.3.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 5
+ call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 %s1.sroa.3.0.argp.cur.aligned.sroa_idx, i64 3, i1 false)
+ %s1.sroa.31.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
+ %s1.sroa.31.0.copyload = load i64, ptr %s1.sroa.31.0.argp.cur.aligned.sroa_idx, align 8
+ %add = add nsw i32 %first, %s1.sroa.0.0.copyload
+ %conv = sext i8 %s1.sroa.2.0.copyload to i32
+ %add1 = add nsw i32 %add, %conv
+ %conv2 = sext i32 %add1 to i64
+ %add3 = add nsw i64 %conv2, %s1.sroa.31.0.copyload
+ %conv4 = trunc i64 %add3 to i32
+ call void @llvm.va_end.p0(ptr %vlist)
+ ret i32 %conv4
+}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+
+define dso_local i32 @bar() {
+; CHECK-PTX-LABEL: bar(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot3[24];
+; CHECK-PTX-NEXT: .reg .b64 %SP;
+; CHECK-PTX-NEXT: .reg .b64 %SPL;
+; CHECK-PTX-NEXT: .reg .b16 %rs<10>;
+; CHECK-PTX-NEXT: .reg .b32 %r<4>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<8>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot3;
+; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-PTX-NEXT: mov.u64 %rd1, __const_$_bar_$_s1;
+; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
+; CHECK-PTX-NEXT: ld.global.nc.u8 %rs1, [%rd2];
+; CHECK-PTX-NEXT: cvt.u16.u8 %rs2, %rs1;
+; CHECK-PTX-NEXT: st.u8 [%SP+2], %rs2;
+; CHECK-PTX-NEXT: add.s64 %rd3, %rd1, 5;
+; CHECK-PTX-NEXT: ld.global.nc.u8 %rs3, [%rd3];
+; CHECK-PTX-NEXT: cvt.u16.u8 %rs4, %rs3;
+; CHECK-PTX-NEXT: add.s64 %rd4, %rd1, 6;
+; CHECK-PTX-NEXT: ld.global.nc.u8 %rs5, [%rd4];
+; CHECK-PTX-NEXT: cvt.u16.u8 %rs6, %rs5;
+; CHECK-PTX-NEXT: shl.b16 %rs7, %rs6, 8;
+; CHECK-PTX-NEXT: or.b16 %rs8, %rs7, %rs4;
+; CHECK-PTX-NEXT: st.u16 [%SP+0], %rs8;
+; CHECK-PTX-NEXT: mov.b32 %r1, 1;
+; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1;
+; CHECK-PTX-NEXT: add.u64 %rd5, %SP, 8;
+; CHECK-PTX-NEXT: or.b64 %rd6, %rd5, 4;
+; CHECK-PTX-NEXT: mov.u16 %rs9, 1;
+; CHECK-PTX-NEXT: st.u8 [%rd6], %rs9;
+; CHECK-PTX-NEXT: mov.u64 %rd7, 1;
+; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd7;
+; CHECK-PTX-NEXT: { // callseq 1, 0
+; CHECK-PTX-NEXT: .param .b32 param0;
+; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1;
+; CHECK-PTX-NEXT: .param .b64 param1;
+; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd5;
+; CHECK-PTX-NEXT: .param .b32 retval0;
+; CHECK-PTX-NEXT: call.uni (retval0),
+; CHECK-PTX-NEXT: variadics2,
+; CHECK-PTX-NEXT: (
+; CHECK-PTX-NEXT: param0,
+; CHECK-PTX-NEXT: param1
+; CHECK-PTX-NEXT: );
+; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0];
+; CHECK-PTX-NEXT: } // callseq 1
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %s1.sroa.3 = alloca [3 x i8], align 1
+ %s1.sroa.0.0.copyload = load i32, ptr @__const.bar.s1, align 8
+ %s1.sroa.2.0.copyload = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
+ call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
+ %s1.sroa.31.0.copyload = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
+ %call = call i32 (i32, ...) @variadics2(i32 noundef 1, i32 %s1.sroa.0.0.copyload, i8 %s1.sroa.2.0.copyload, i64 %s1.sroa.31.0.copyload)
+ ret i32 %call
+}
+
+define dso_local i32 @variadics3(i32 noundef %first, ...) {
+; CHECK-PTX-LABEL: variadics3(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .reg .b32 %r<8>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<4>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics3_param_1];
+; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 15;
+; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -16;
+; CHECK-PTX-NEXT: ld.v4.u32 {%r1, %r2, %r3, %r4}, [%rd3];
+; CHECK-PTX-NEXT: add.s32 %r5, %r1, %r2;
+; CHECK-PTX-NEXT: add.s32 %r6, %r5, %r3;
+; CHECK-PTX-NEXT: add.s32 %r7, %r6, %r4;
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r7;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %vlist = alloca ptr, align 8
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %0 = getelementptr inbounds i8, ptr %argp.cur, i32 15
+ %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -16)
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
+ store ptr %argp.next, ptr %vlist, align 8
+ %1 = load <4 x i32>, ptr %argp.cur.aligned, align 16
+ call void @llvm.va_end.p0(ptr %vlist)
+ %2 = extractelement <4 x i32> %1, i64 0
+ %3 = extractelement <4 x i32> %1, i64 1
+ %add = add nsw i32 %2, %3
+ %4 = extractelement <4 x i32> %1, i64 2
+ %add1 = add nsw i32 %add, %4
+ %5 = extractelement <4 x i32> %1, i64 3
+ %add2 = add nsw i32 %add1, %5
+ ret i32 %add2
+}
+
+define dso_local i32 @baz() {
+; CHECK-PTX-LABEL: baz(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .local .align 16 .b8 __local_depot5[16];
+; CHECK-PTX-NEXT: .reg .b64 %SP;
+; CHECK-PTX-NEXT: .reg .b64 %SPL;
+; CHECK-PTX-NEXT: .reg .b32 %r<4>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot5;
+; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-PTX-NEXT: mov.b32 %r1, 1;
+; CHECK-PTX-NEXT: st.v4.u32 [%SP+0], {%r1, %r1, %r1, %r1};
+; CHECK-PTX-NEXT: add.u64 %rd1, %SP, 0;
+; CHECK-PTX-NEXT: { // callseq 2, 0
+; CHECK-PTX-NEXT: .param .b32 param0;
+; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1;
+; CHECK-PTX-NEXT: .param .b64 param1;
+; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd1;
+; CHECK-PTX-NEXT: .param .b32 retval0;
+; CHECK-PTX-NEXT: call.uni (retval0),
+; CHECK-PTX-NEXT: variadics3,
+; CHECK-PTX-NEXT: (
+; CHECK-PTX-NEXT: param0,
+; CHECK-PTX-NEXT: param1
+; CHECK-PTX-NEXT: );
+; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0];
+; CHECK-PTX-NEXT: } // callseq 2
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %call = call i32 (i32, ...) @variadics3(i32 noundef 1, <4 x i32> noundef <i32 1, i32 1, i32 1, i32 1>)
+ ret i32 %call
+}
+
+define dso_local i32 @variadics4(ptr noundef byval(%struct.S2) align 8 %first, ...) {
+; CHECK-PTX-LABEL: variadics4(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .reg .b32 %r<2>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<9>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics4_param_1];
+; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
+; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
+; CHECK-PTX-NEXT: ld.u64 %rd4, [%rd3];
+; CHECK-PTX-NEXT: ld.param.u64 %rd5, [variadics4_param_0];
+; CHECK-PTX-NEXT: ld.param.u64 %rd6, [variadics4_param_0+8];
+; CHECK-PTX-NEXT: add.s64 %rd7, %rd5, %rd6;
+; CHECK-PTX-NEXT: add.s64 %rd8, %rd7, %rd4;
+; CHECK-PTX-NEXT: cvt.u32.u64 %r1, %rd8;
+; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r1;
+; CHECK-PTX-NEXT: ret;
+entry:
+ %vlist = alloca ptr, align 8
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
+ %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
+ store ptr %argp.next, ptr %vlist, align 8
+ %1 = load i64, ptr %argp.cur.aligned, align 8
+ %x1 = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 0
+ %2 = load i64, ptr %x1, align 8
+ %y = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 1
+ %3 = load i64, ptr %y, align 8
+ %add = add nsw i64 %2, %3
+ %add2 = add nsw i64 %add, %1
+ %conv = trunc i64 %add2 to i32
+ call void @llvm.va_end.p0(ptr %vlist)
+ ret i32 %conv
+}
+
+define dso_local void @qux() {
+; CHECK-PTX-LABEL: qux(
+; CHECK-PTX: {
+; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot7[24];
+; CHECK-PTX-NEXT: .reg .b64 %SP;
+; CHECK-PTX-NEXT: .reg .b64 %SPL;
+; CHECK-PTX-NEXT: .reg .b32 %r<3>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<7>;
+; CHECK-PTX-EMPTY:
+; CHECK-PTX-NEXT: // %bb.0: // %entry
+; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot7;
+; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-PTX-NEXT: ld.global.nc.u64 %rd1, [__const_$_qux_$_s];
+; CHECK-PTX-NEXT: st.u64 [%SP+0], %rd1;
+; CHECK-PTX-NEXT: mov.u64 %rd2, __const_$_qux_$_s;
+; CHECK-PTX-NEXT: add.s64 %rd3, %rd2, 8;
+; CHECK-PTX-NEXT: ld.global.nc.u64 %rd4, [%rd3];
+; CHECK-PTX-NEXT: st.u64 [%SP+8], %rd4;
+; CHECK-PTX-NEXT: mov.u64 %rd5, 1;
+; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd5;
+; CHECK-PTX-NEXT: add.u64 %rd6, %SP, 16;
+; CHECK-PTX-NEXT: { // callseq 3, 0
+; CHECK-PTX-NEXT: .param .align 8 .b8 param0[16];
+; CHECK-PTX-NEXT: st.param.b64 [param0+0], %rd1;
+; CHECK-PTX-NEXT: st.param.b64 [param0+8], %rd4;
+; CHECK-PTX-NEXT: .param .b64 param1;
+; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd6;
+; CHECK-PTX-NEXT: .param .b32 retval0;
+; CHECK-PTX-NEXT: call.uni (retval0),
+; CHECK-PTX-NEXT: variadics4,
+; CHECK-PTX-NEXT: (
+; CHECK-PTX-NEXT: param0,
+; CHECK-PTX-NEXT: param1
+; CHECK-PTX-NEXT: );
+; CHECK-PTX-NEXT: ld.param.b32 %r1, [retval0+0];
+; CHECK-PTX-NEXT: } // callseq 3
+; CHECK-PTX-NEXT: ret;
+entry:
+ %s = alloca %struct.S2, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %s, ptr align 8 @__const.qux.s, i64 16, i1 false)
+ %call = call i32 (ptr, ...) @variadics4(ptr noundef byval(%struct.S2) align 8 %s, i64 noundef 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
new file mode 100644
index 00000000000000..e40fdff6c19cd3
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
@@ -0,0 +1,348 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=nvptx64-- --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s
+
+%struct.S1 = type { i32, i8, i64 }
+%struct.S2 = type { i64, i64 }
+
+ at __const.bar.s1 = private unnamed_addr constant %struct.S1 { i32 1, i8 1, i64 1 }, align 8
+ at __const.qux.s = private unnamed_addr constant %struct.S2 { i64 1, i64 1 }, align 8
+
+define dso_local i32 @variadics1(i32 noundef %first, ...) {
+; CHECK-LABEL: define dso_local i32 @variadics1(
+; CHECK-SAME: i32 noundef [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 4
+; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[FIRST]], [[TMP0]]
+; CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i64 4
+; CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGP_CUR1]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[TMP1]]
+; CHECK-NEXT: [[ARGP_CUR4:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR4]], i64 4
+; CHECK-NEXT: store ptr [[ARGP_NEXT5]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR4]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD3]], [[TMP2]]
+; CHECK-NEXT: [[ARGP_CUR7:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR7]], i32 7
+; CHECK-NEXT: [[ARGP_CUR7_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP3]], i64 -8)
+; CHECK-NEXT: [[ARGP_NEXT8:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR7_ALIGNED]], i64 8
+; CHECK-NEXT: store ptr [[ARGP_NEXT8]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[ARGP_CUR7_ALIGNED]], align 8
+; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[ADD6]] to i64
+; CHECK-NEXT: [[ADD9:%.*]] = add nsw i64 [[CONV]], [[TMP4]]
+; CHECK-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32
+; CHECK-NEXT: [[ARGP_CUR11:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR11]], i32 7
+; CHECK-NEXT: [[ARGP_CUR11_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP5]], i64 -8)
+; CHECK-NEXT: [[ARGP_NEXT12:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR11_ALIGNED]], i64 8
+; CHECK-NEXT: store ptr [[ARGP_NEXT12]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr [[ARGP_CUR11_ALIGNED]], align 8
+; CHECK-NEXT: [[CONV13:%.*]] = sitofp i32 [[CONV10]] to double
+; CHECK-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], [[TMP6]]
+; CHECK-NEXT: [[CONV15:%.*]] = fptosi double [[ADD14]] to i32
+; CHECK-NEXT: [[ARGP_CUR16:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR16]], i32 7
+; CHECK-NEXT: [[ARGP_CUR16_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP7]], i64 -8)
+; CHECK-NEXT: [[ARGP_NEXT17:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR16_ALIGNED]], i64 8
+; CHECK-NEXT: store ptr [[ARGP_NEXT17]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load double, ptr [[ARGP_CUR16_ALIGNED]], align 8
+; CHECK-NEXT: [[CONV18:%.*]] = sitofp i32 [[CONV15]] to double
+; CHECK-NEXT: [[ADD19:%.*]] = fadd double [[CONV18]], [[TMP8]]
+; CHECK-NEXT: [[CONV20:%.*]] = fptosi double [[ADD19]] to i32
+; CHECK-NEXT: ret i32 [[CONV20]]
+;
+entry:
+ %vlist = alloca ptr, align 8
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur, i64 4
+ store ptr %argp.next, ptr %vlist, align 8
+ %0 = load i32, ptr %argp.cur, align 4
+ %add = add nsw i32 %first, %0
+ %argp.cur1 = load ptr, ptr %vlist, align 8
+ %argp.next2 = getelementptr inbounds i8, ptr %argp.cur1, i64 4
+ store ptr %argp.next2, ptr %vlist, align 8
+ %1 = load i32, ptr %argp.cur1, align 4
+ %add3 = add nsw i32 %add, %1
+ %argp.cur4 = load ptr, ptr %vlist, align 8
+ %argp.next5 = getelementptr inbounds i8, ptr %argp.cur4, i64 4
+ store ptr %argp.next5, ptr %vlist, align 8
+ %2 = load i32, ptr %argp.cur4, align 4
+ %add6 = add nsw i32 %add3, %2
+ %argp.cur7 = load ptr, ptr %vlist, align 8
+ %3 = getelementptr inbounds i8, ptr %argp.cur7, i32 7
+ %argp.cur7.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %3, i64 -8)
+ %argp.next8 = getelementptr inbounds i8, ptr %argp.cur7.aligned, i64 8
+ store ptr %argp.next8, ptr %vlist, align 8
+ %4 = load i64, ptr %argp.cur7.aligned, align 8
+ %conv = sext i32 %add6 to i64
+ %add9 = add nsw i64 %conv, %4
+ %conv10 = trunc i64 %add9 to i32
+ %argp.cur11 = load ptr, ptr %vlist, align 8
+ %5 = getelementptr inbounds i8, ptr %argp.cur11, i32 7
+ %argp.cur11.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %5, i64 -8)
+ %argp.next12 = getelementptr inbounds i8, ptr %argp.cur11.aligned, i64 8
+ store ptr %argp.next12, ptr %vlist, align 8
+ %6 = load double, ptr %argp.cur11.aligned, align 8
+ %conv13 = sitofp i32 %conv10 to double
+ %add14 = fadd double %conv13, %6
+ %conv15 = fptosi double %add14 to i32
+ %argp.cur16 = load ptr, ptr %vlist, align 8
+ %7 = getelementptr inbounds i8, ptr %argp.cur16, i32 7
+ %argp.cur16.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %7, i64 -8)
+ %argp.next17 = getelementptr inbounds i8, ptr %argp.cur16.aligned, i64 8
+ store ptr %argp.next17, ptr %vlist, align 8
+ %8 = load double, ptr %argp.cur16.aligned, align 8
+ %conv18 = sitofp i32 %conv15 to double
+ %add19 = fadd double %conv18, %8
+ %conv20 = fptosi double %add19 to i32
+ call void @llvm.va_end.p0(ptr %vlist)
+ ret i32 %conv20
+}
+
+declare void @llvm.va_start.p0(ptr)
+
+declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
+
+declare void @llvm.va_end.p0(ptr)
+
+define dso_local i32 @foo() {
+; CHECK-LABEL: define dso_local i32 @foo() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FOO_VARARG:%.*]], align 8
+; CHECK-NEXT: [[CONV:%.*]] = sext i8 1 to i32
+; CHECK-NEXT: [[CONV1:%.*]] = sext i16 1 to i32
+; CHECK-NEXT: [[CONV2:%.*]] = fpext float 1.000000e+00 to double
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i32 [[CONV1]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
+; CHECK-NEXT: store i32 1, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 4
+; CHECK-NEXT: store i64 1, ptr [[TMP3]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 5
+; CHECK-NEXT: store double [[CONV2]], ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 6
+; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP5]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics1(i32 noundef 1, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: ret i32 [[CALL]]
+;
+entry:
+ %conv = sext i8 1 to i32
+ %conv1 = sext i16 1 to i32
+ %conv2 = fpext float 1.000000e+00 to double
+ %call = call i32 (i32, ...) @variadics1(i32 noundef 1, i32 noundef %conv, i32 noundef %conv1, i32 noundef 1, i64 noundef 1, double noundef %conv2, double noundef 1.000000e+00)
+ ret i32 %call
+}
+
+define dso_local i32 @variadics2(i32 noundef %first, ...) {
+; CHECK-LABEL: define dso_local i32 @variadics2(
+; CHECK-SAME: i32 noundef [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[S1_SROA_3:%.*]] = alloca [3 x i8], align 1
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
+; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -8)
+; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
+; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[S1_SROA_0_0_COPYLOAD:%.*]] = load i32, ptr [[ARGP_CUR_ALIGNED]], align 8
+; CHECK-NEXT: [[S1_SROA_2_0_ARGP_CUR_ALIGNED_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 4
+; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr [[S1_SROA_2_0_ARGP_CUR_ALIGNED_SROA_IDX]], align 4
+; CHECK-NEXT: [[S1_SROA_3_0_ARGP_CUR_ALIGNED_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 5
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 [[S1_SROA_3_0_ARGP_CUR_ALIGNED_SROA_IDX]], i64 3, i1 false)
+; CHECK-NEXT: [[S1_SROA_31_0_ARGP_CUR_ALIGNED_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 8
+; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr [[S1_SROA_31_0_ARGP_CUR_ALIGNED_SROA_IDX]], align 8
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[FIRST]], [[S1_SROA_0_0_COPYLOAD]]
+; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[S1_SROA_2_0_COPYLOAD]] to i32
+; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[CONV]]
+; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[ADD1]] to i64
+; CHECK-NEXT: [[ADD3:%.*]] = add nsw i64 [[CONV2]], [[S1_SROA_31_0_COPYLOAD]]
+; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[ADD3]] to i32
+; CHECK-NEXT: ret i32 [[CONV4]]
+;
+entry:
+ %vlist = alloca ptr, align 8
+ %s1.sroa.3 = alloca [3 x i8], align 1
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
+ %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
+ store ptr %argp.next, ptr %vlist, align 8
+ %s1.sroa.0.0.copyload = load i32, ptr %argp.cur.aligned, align 8
+ %s1.sroa.2.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 4
+ %s1.sroa.2.0.copyload = load i8, ptr %s1.sroa.2.0.argp.cur.aligned.sroa_idx, align 4
+ %s1.sroa.3.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 5
+ call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 %s1.sroa.3.0.argp.cur.aligned.sroa_idx, i64 3, i1 false)
+ %s1.sroa.31.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
+ %s1.sroa.31.0.copyload = load i64, ptr %s1.sroa.31.0.argp.cur.aligned.sroa_idx, align 8
+ %add = add nsw i32 %first, %s1.sroa.0.0.copyload
+ %conv = sext i8 %s1.sroa.2.0.copyload to i32
+ %add1 = add nsw i32 %add, %conv
+ %conv2 = sext i32 %add1 to i64
+ %add3 = add nsw i64 %conv2, %s1.sroa.31.0.copyload
+ %conv4 = trunc i64 %add3 to i32
+ call void @llvm.va_end.p0(ptr %vlist)
+ ret i32 %conv4
+}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+
+define dso_local i32 @bar() {
+; CHECK-LABEL: define dso_local i32 @bar() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[S1_SROA_3:%.*]] = alloca [3 x i8], align 1
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAR_VARARG:%.*]], align 8
+; CHECK-NEXT: [[S1_SROA_0_0_COPYLOAD:%.*]] = load i32, ptr @__const.bar.s1, align 8
+; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
+; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i32 [[S1_SROA_0_0_COPYLOAD]], ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
+; CHECK-NEXT: store i8 [[S1_SROA_2_0_COPYLOAD]], ptr [[TMP1]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3
+; CHECK-NEXT: store i64 [[S1_SROA_31_0_COPYLOAD]], ptr [[TMP2]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics2(i32 noundef 1, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: ret i32 [[CALL]]
+;
+entry:
+ %s1.sroa.3 = alloca [3 x i8], align 1
+ %s1.sroa.0.0.copyload = load i32, ptr @__const.bar.s1, align 8
+ %s1.sroa.2.0.copyload = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
+ call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
+ %s1.sroa.31.0.copyload = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
+ %call = call i32 (i32, ...) @variadics2(i32 noundef 1, i32 %s1.sroa.0.0.copyload, i8 %s1.sroa.2.0.copyload, i64 %s1.sroa.31.0.copyload)
+ ret i32 %call
+}
+
+define dso_local i32 @variadics3(i32 noundef %first, ...) {
+; CHECK-LABEL: define dso_local i32 @variadics3(
+; CHECK-SAME: i32 noundef [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
+; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
+; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
+; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARGP_CUR_ALIGNED]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP1]], i64 1
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP1]], i64 2
+; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP1]], i64 3
+; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD1]], [[TMP5]]
+; CHECK-NEXT: ret i32 [[ADD2]]
+;
+entry:
+ %vlist = alloca ptr, align 8
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %0 = getelementptr inbounds i8, ptr %argp.cur, i32 15
+ %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -16)
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
+ store ptr %argp.next, ptr %vlist, align 8
+ %1 = load <4 x i32>, ptr %argp.cur.aligned, align 16
+ call void @llvm.va_end.p0(ptr %vlist)
+ %2 = extractelement <4 x i32> %1, i64 0
+ %3 = extractelement <4 x i32> %1, i64 1
+ %add = add nsw i32 %2, %3
+ %4 = extractelement <4 x i32> %1, i64 2
+ %add1 = add nsw i32 %add, %4
+ %5 = extractelement <4 x i32> %1, i64 3
+ %add2 = add nsw i32 %add1, %5
+ ret i32 %add2
+}
+
+define dso_local i32 @baz() {
+; CHECK-LABEL: define dso_local i32 @baz() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAZ_VARARG:%.*]], align 16
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[BAZ_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr [[TMP0]], align 16
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics3(i32 noundef 1, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: ret i32 [[CALL]]
+;
+entry:
+ %call = call i32 (i32, ...) @variadics3(i32 noundef 1, <4 x i32> noundef <i32 1, i32 1, i32 1, i32 1>)
+ ret i32 %call
+}
+
+define dso_local i32 @variadics4(ptr noundef byval(%struct.S2) align 8 %first, ...) {
+; CHECK-LABEL: define dso_local i32 @variadics4(
+; CHECK-SAME: ptr noundef byval([[STRUCT_S2:%.*]]) align 8 [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
+; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -8)
+; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 8
+; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGP_CUR_ALIGNED]], align 8
+; CHECK-NEXT: [[X1:%.*]] = getelementptr inbounds [[STRUCT_S2]], ptr [[FIRST]], i32 0, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[X1]], align 8
+; CHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_S2]], ptr [[FIRST]], i32 0, i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[Y]], align 8
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP2]], [[TMP3]]
+; CHECK-NEXT: [[ADD2:%.*]] = add nsw i64 [[ADD]], [[TMP1]]
+; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[ADD2]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %vlist = alloca ptr, align 8
+ call void @llvm.va_start.p0(ptr %vlist)
+ %argp.cur = load ptr, ptr %vlist, align 8
+ %0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
+ %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
+ store ptr %argp.next, ptr %vlist, align 8
+ %1 = load i64, ptr %argp.cur.aligned, align 8
+ %x1 = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 0
+ %2 = load i64, ptr %x1, align 8
+ %y = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 1
+ %3 = load i64, ptr %y, align 8
+ %add = add nsw i64 %2, %3
+ %add2 = add nsw i64 %add, %1
+ %conv = trunc i64 %add2 to i32
+ call void @llvm.va_end.p0(ptr %vlist)
+ ret i32 %conv
+}
+
+define dso_local void @qux() {
+; CHECK-LABEL: define dso_local void @qux() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 8
+; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[QUX_VARARG:%.*]], align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.qux.s, i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[QUX_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
+; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics4(ptr noundef byval([[STRUCT_S2]]) align 8 [[S]], ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %s = alloca %struct.S2, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %s, ptr align 8 @__const.qux.s, i64 16, i1 false)
+ %call = call i32 (ptr, ...) @variadics4(ptr noundef byval(%struct.S2) align 8 %s, i64 noundef 1)
+ ret void
+}
>From c54d8d417b42c2de22f69468cf03096ec5ae1d4c Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Fri, 21 Jun 2024 19:10:40 -0500
Subject: [PATCH 3/3] [libc] Implement (v|f)printf on the GPU
Summary:
This patch implements the `printf` family of functions on the GPU using
the new variadic support. This patch adapts the old handling in the
`rpc_fprintf` placeholder, but adds an extra RPC call to get the size of
the buffer to copy. This prevents the GPU from needing to parse the
string. While it's theoretically possible for the pass to know the size
of the struct, it's prohibitively difficult to do while maintaining ABI
compatibility with NVIDIA's varargs.
Depends on https://github.com/llvm/llvm-project/pull/96015.
---
libc/config/gpu/entrypoints.txt | 19 ++---
libc/src/__support/arg_list.h | 3 +-
libc/src/gpu/rpc_fprintf.cpp | 5 +-
libc/src/stdio/CMakeLists.txt | 24 +-----
libc/src/stdio/generic/CMakeLists.txt | 25 +++++++
libc/src/stdio/{ => generic}/fprintf.cpp | 0
libc/src/stdio/{ => generic}/vfprintf.cpp | 0
libc/src/stdio/gpu/CMakeLists.txt | 48 ++++++++++++
libc/src/stdio/gpu/fprintf.cpp | 32 ++++++++
libc/src/stdio/gpu/printf.cpp | 30 ++++++++
libc/src/stdio/gpu/vfprintf.cpp | 29 ++++++++
libc/src/stdio/gpu/vfprintf_utils.h | 73 +++++++++++++++++++
libc/src/stdio/gpu/vprintf.cpp | 28 +++++++
.../integration/src/stdio/gpu/CMakeLists.txt | 2 +-
.../test/integration/src/stdio/gpu/printf.cpp | 43 ++++-------
libc/utils/gpu/server/rpc_server.cpp | 24 +++++-
16 files changed, 318 insertions(+), 67 deletions(-)
rename libc/src/stdio/{ => generic}/fprintf.cpp (100%)
rename libc/src/stdio/{ => generic}/vfprintf.cpp (100%)
create mode 100644 libc/src/stdio/gpu/fprintf.cpp
create mode 100644 libc/src/stdio/gpu/printf.cpp
create mode 100644 libc/src/stdio/gpu/vfprintf.cpp
create mode 100644 libc/src/stdio/gpu/vfprintf_utils.h
create mode 100644 libc/src/stdio/gpu/vprintf.cpp
diff --git a/libc/config/gpu/entrypoints.txt b/libc/config/gpu/entrypoints.txt
index 2217a696fc5d18..de1ca6bfd151f5 100644
--- a/libc/config/gpu/entrypoints.txt
+++ b/libc/config/gpu/entrypoints.txt
@@ -1,13 +1,3 @@
-if(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
- set(extra_entrypoints
- # stdio.h entrypoints
- libc.src.stdio.sprintf
- libc.src.stdio.snprintf
- libc.src.stdio.vsprintf
- libc.src.stdio.vsnprintf
- )
-endif()
-
set(TARGET_LIBC_ENTRYPOINTS
# assert.h entrypoints
libc.src.assert.__assert_fail
@@ -185,7 +175,14 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.errno.errno
# stdio.h entrypoints
- ${extra_entrypoints}
+ libc.src.stdio.printf
+ libc.src.stdio.vprintf
+ libc.src.stdio.fprintf
+ libc.src.stdio.vfprintf
+ libc.src.stdio.sprintf
+ libc.src.stdio.snprintf
+ libc.src.stdio.vsprintf
+ libc.src.stdio.vsnprintf
libc.src.stdio.feof
libc.src.stdio.ferror
libc.src.stdio.fseek
diff --git a/libc/src/__support/arg_list.h b/libc/src/__support/arg_list.h
index 0965e12afd562a..3a4e5ad0fab3c9 100644
--- a/libc/src/__support/arg_list.h
+++ b/libc/src/__support/arg_list.h
@@ -54,7 +54,8 @@ class MockArgList {
}
template <class T> LIBC_INLINE T next_var() {
- ++arg_counter;
+ arg_counter =
+ ((arg_counter + alignof(T) - 1) / alignof(T)) * alignof(T) + sizeof(T);
return T(arg_counter);
}
diff --git a/libc/src/gpu/rpc_fprintf.cpp b/libc/src/gpu/rpc_fprintf.cpp
index 7b0e60b59baf3f..659144d1330043 100644
--- a/libc/src/gpu/rpc_fprintf.cpp
+++ b/libc/src/gpu/rpc_fprintf.cpp
@@ -29,6 +29,9 @@ int fprintf_impl(::FILE *__restrict file, const char *__restrict format,
}
port.send_n(format, format_size);
+ port.recv([&](rpc::Buffer *buffer) {
+ args_size = static_cast<size_t>(buffer->data[0]);
+ });
port.send_n(args, args_size);
uint32_t ret = 0;
@@ -50,7 +53,7 @@ int fprintf_impl(::FILE *__restrict file, const char *__restrict format,
return ret;
}
-// TODO: This is a stand-in function that uses a struct pointer and size in
+// TODO: Delete this and port OpenMP to use `printf`.
// place of varargs. Once varargs support is added we will use that to
// implement the real version.
LLVM_LIBC_FUNCTION(int, rpc_fprintf,
diff --git a/libc/src/stdio/CMakeLists.txt b/libc/src/stdio/CMakeLists.txt
index a659d9e847a9ef..3c536a287b2c4a 100644
--- a/libc/src/stdio/CMakeLists.txt
+++ b/libc/src/stdio/CMakeLists.txt
@@ -159,17 +159,6 @@ add_entrypoint_object(
libc.src.stdio.printf_core.writer
)
-add_entrypoint_object(
- fprintf
- SRCS
- fprintf.cpp
- HDRS
- fprintf.h
- DEPENDS
- libc.src.__support.arg_list
- libc.src.stdio.printf_core.vfprintf_internal
-)
-
add_entrypoint_object(
vsprintf
SRCS
@@ -192,17 +181,6 @@ add_entrypoint_object(
libc.src.stdio.printf_core.writer
)
-add_entrypoint_object(
- vfprintf
- SRCS
- vfprintf.cpp
- HDRS
- vfprintf.h
- DEPENDS
- libc.src.__support.arg_list
- libc.src.stdio.printf_core.vfprintf_internal
-)
-
add_stdio_entrypoint_object(
fileno
SRCS
@@ -261,6 +239,7 @@ add_stdio_entrypoint_object(fputc)
add_stdio_entrypoint_object(putc)
add_stdio_entrypoint_object(putchar)
add_stdio_entrypoint_object(printf)
+add_stdio_entrypoint_object(fprintf)
add_stdio_entrypoint_object(fgetc)
add_stdio_entrypoint_object(fgetc_unlocked)
add_stdio_entrypoint_object(getc)
@@ -273,3 +252,4 @@ add_stdio_entrypoint_object(stdin)
add_stdio_entrypoint_object(stdout)
add_stdio_entrypoint_object(stderr)
add_stdio_entrypoint_object(vprintf)
+add_stdio_entrypoint_object(vfprintf)
diff --git a/libc/src/stdio/generic/CMakeLists.txt b/libc/src/stdio/generic/CMakeLists.txt
index 9cd4cfdae17f4c..e0a1c577efcb6a 100644
--- a/libc/src/stdio/generic/CMakeLists.txt
+++ b/libc/src/stdio/generic/CMakeLists.txt
@@ -396,6 +396,31 @@ add_entrypoint_object(
${printf_deps}
)
+add_entrypoint_object(
+ fprintf
+ SRCS
+ fprintf.cpp
+ HDRS
+ ../fprintf.h
+ DEPENDS
+ libc.src.__support.arg_list
+ libc.src.stdio.printf_core.vfprintf_internal
+ ${printf_deps}
+)
+
+add_entrypoint_object(
+ vfprintf
+ SRCS
+ vfprintf.cpp
+ HDRS
+ ../vfprintf.h
+ DEPENDS
+ libc.src.__support.arg_list
+ libc.src.stdio.printf_core.vfprintf_internal
+ ${printf_deps}
+)
+
+
add_entrypoint_object(
fgets
SRCS
diff --git a/libc/src/stdio/fprintf.cpp b/libc/src/stdio/generic/fprintf.cpp
similarity index 100%
rename from libc/src/stdio/fprintf.cpp
rename to libc/src/stdio/generic/fprintf.cpp
diff --git a/libc/src/stdio/vfprintf.cpp b/libc/src/stdio/generic/vfprintf.cpp
similarity index 100%
rename from libc/src/stdio/vfprintf.cpp
rename to libc/src/stdio/generic/vfprintf.cpp
diff --git a/libc/src/stdio/gpu/CMakeLists.txt b/libc/src/stdio/gpu/CMakeLists.txt
index 1b1e2a903cc0b9..280e0d3f6b00d6 100644
--- a/libc/src/stdio/gpu/CMakeLists.txt
+++ b/libc/src/stdio/gpu/CMakeLists.txt
@@ -10,6 +10,14 @@ add_header_library(
.stderr
)
+add_header_library(
+ vfprintf_utils
+ HDRS
+ vfprintf_utils.h
+ DEPENDS
+ .gpu_file
+)
+
add_entrypoint_object(
feof
SRCS
@@ -262,6 +270,46 @@ add_entrypoint_object(
.gpu_file
)
+add_entrypoint_object(
+ printf
+ SRCS
+ printf.cpp
+ HDRS
+ ../printf.h
+ DEPENDS
+ .vfprintf_utils
+)
+
+add_entrypoint_object(
+ vprintf
+ SRCS
+ vprintf.cpp
+ HDRS
+ ../vprintf.h
+ DEPENDS
+ .vfprintf_utils
+)
+
+add_entrypoint_object(
+ fprintf
+ SRCS
+ fprintf.cpp
+ HDRS
+ ../fprintf.h
+ DEPENDS
+ .vfprintf_utils
+)
+
+add_entrypoint_object(
+ vfprintf
+ SRCS
+ vfprintf.cpp
+ HDRS
+ ../vfprintf.h
+ DEPENDS
+ .vfprintf_utils
+)
+
add_entrypoint_object(
stdin
SRCS
diff --git a/libc/src/stdio/gpu/fprintf.cpp b/libc/src/stdio/gpu/fprintf.cpp
new file mode 100644
index 00000000000000..adae0203180a47
--- /dev/null
+++ b/libc/src/stdio/gpu/fprintf.cpp
@@ -0,0 +1,32 @@
+//===-- GPU Implementation of fprintf -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdio/fprintf.h"
+
+#include "src/__support/CPP/string_view.h"
+#include "src/__support/arg_list.h"
+#include "src/errno/libc_errno.h"
+#include "src/stdio/gpu/vfprintf_utils.h"
+
+#include <stdio.h>
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, fprintf,
+ (::FILE *__restrict stream, const char *__restrict format,
+ ...)) {
+ va_list vlist;
+ va_start(vlist, format);
+ cpp::string_view str_view(format);
+ int ret_val =
+ file::vfprintf_internal(stream, format, str_view.size() + 1, vlist);
+ va_end(vlist);
+ return ret_val;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdio/gpu/printf.cpp b/libc/src/stdio/gpu/printf.cpp
new file mode 100644
index 00000000000000..44905f24bad7da
--- /dev/null
+++ b/libc/src/stdio/gpu/printf.cpp
@@ -0,0 +1,30 @@
+//===-- GPU Implementation of printf --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdio/printf.h"
+
+#include "src/__support/CPP/string_view.h"
+#include "src/__support/arg_list.h"
+#include "src/errno/libc_errno.h"
+#include "src/stdio/gpu/vfprintf_utils.h"
+
+#include <stdio.h>
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, printf, (const char *__restrict format, ...)) {
+ va_list vlist;
+ va_start(vlist, format);
+ cpp::string_view str_view(format);
+ int ret_val =
+ file::vfprintf_internal(stdout, format, str_view.size() + 1, vlist);
+ va_end(vlist);
+ return ret_val;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdio/gpu/vfprintf.cpp b/libc/src/stdio/gpu/vfprintf.cpp
new file mode 100644
index 00000000000000..2ec65d9afcb977
--- /dev/null
+++ b/libc/src/stdio/gpu/vfprintf.cpp
@@ -0,0 +1,29 @@
+//===-- GPU Implementation of vfprintf ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdio/vfprintf.h"
+
+#include "src/__support/CPP/string_view.h"
+#include "src/__support/arg_list.h"
+#include "src/errno/libc_errno.h"
+#include "src/stdio/gpu/vfprintf_utils.h"
+
+#include <stdio.h>
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, vfprintf,
+ (::FILE *__restrict stream, const char *__restrict format,
+ va_list vlist)) {
+ cpp::string_view str_view(format);
+ int ret_val =
+ file::vfprintf_internal(stream, format, str_view.size() + 1, vlist);
+ return ret_val;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdio/gpu/vfprintf_utils.h b/libc/src/stdio/gpu/vfprintf_utils.h
new file mode 100644
index 00000000000000..e1a3b97b356733
--- /dev/null
+++ b/libc/src/stdio/gpu/vfprintf_utils.h
@@ -0,0 +1,73 @@
+//===--- GPU helper functions for printf using RPC ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/RPC/rpc_client.h"
+#include "src/__support/arg_list.h"
+#include "src/stdio/gpu/file.h"
+#include "src/string/string_utils.h"
+
+#include <stdio.h>
+
+namespace LIBC_NAMESPACE {
+namespace file {
+
+template <uint16_t opcode>
+LIBC_INLINE int vfprintf_impl(::FILE *__restrict file,
+ const char *__restrict format, size_t format_size,
+ va_list vlist) {
+ uint64_t mask = gpu::get_lane_mask();
+ rpc::Client::Port port = rpc::client.open<opcode>();
+
+ if constexpr (opcode == RPC_PRINTF_TO_STREAM) {
+ port.send([&](rpc::Buffer *buffer) {
+ buffer->data[0] = reinterpret_cast<uintptr_t>(file);
+ });
+ }
+
+ size_t args_size = 0;
+ port.send_n(format, format_size);
+ port.recv([&](rpc::Buffer *buffer) {
+ args_size = static_cast<size_t>(buffer->data[0]);
+ });
+ port.send_n(vlist, args_size);
+
+ uint32_t ret = 0;
+ for (;;) {
+ const char *str = nullptr;
+ port.recv([&](rpc::Buffer *buffer) {
+ ret = static_cast<uint32_t>(buffer->data[0]);
+ str = reinterpret_cast<const char *>(buffer->data[1]);
+ });
+ // If any lanes have a string argument it needs to be copied back.
+ if (!gpu::ballot(mask, str))
+ break;
+
+ uint64_t size = str ? internal::string_length(str) + 1 : 0;
+ port.send_n(str, size);
+ }
+
+ port.close();
+ return ret;
+}
+
+LIBC_INLINE int vfprintf_internal(::FILE *__restrict stream,
+ const char *__restrict format,
+ size_t format_size, va_list vlist) {
+ if (stream == stdout)
+ return vfprintf_impl<RPC_PRINTF_TO_STDOUT>(stream, format, format_size,
+ vlist);
+ else if (stream == stderr)
+ return vfprintf_impl<RPC_PRINTF_TO_STDERR>(stream, format, format_size,
+ vlist);
+ else
+ return vfprintf_impl<RPC_PRINTF_TO_STREAM>(stream, format, format_size,
+ vlist);
+}
+
+} // namespace file
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdio/gpu/vprintf.cpp b/libc/src/stdio/gpu/vprintf.cpp
new file mode 100644
index 00000000000000..ee5b89a1a33d1b
--- /dev/null
+++ b/libc/src/stdio/gpu/vprintf.cpp
@@ -0,0 +1,28 @@
+//===-- GPU Implementation of vprintf -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdio/vprintf.h"
+
+#include "src/__support/CPP/string_view.h"
+#include "src/__support/arg_list.h"
+#include "src/errno/libc_errno.h"
+#include "src/stdio/gpu/vfprintf_utils.h"
+
+#include <stdio.h>
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, vprintf,
+ (const char *__restrict format, va_list vlist)) {
+ cpp::string_view str_view(format);
+ int ret_val =
+ file::vfprintf_internal(stdout, format, str_view.size() + 1, vlist);
+ return ret_val;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/test/integration/src/stdio/gpu/CMakeLists.txt b/libc/test/integration/src/stdio/gpu/CMakeLists.txt
index 6327c45e1ea5a3..04fbd4706c5568 100644
--- a/libc/test/integration/src/stdio/gpu/CMakeLists.txt
+++ b/libc/test/integration/src/stdio/gpu/CMakeLists.txt
@@ -13,7 +13,7 @@ add_integration_test(
SRCS
printf.cpp
DEPENDS
- libc.src.gpu.rpc_fprintf
+ libc.src.stdio.fprintf
libc.src.stdio.fopen
LOADER_ARGS
--threads 32
diff --git a/libc/test/integration/src/stdio/gpu/printf.cpp b/libc/test/integration/src/stdio/gpu/printf.cpp
index 97ad4ace1dcacc..766c4f9439115d 100644
--- a/libc/test/integration/src/stdio/gpu/printf.cpp
+++ b/libc/test/integration/src/stdio/gpu/printf.cpp
@@ -9,8 +9,8 @@
#include "test/IntegrationTest/test.h"
#include "src/__support/GPU/utils.h"
-#include "src/gpu/rpc_fprintf.h"
#include "src/stdio/fopen.h"
+#include "src/stdio/fprintf.h"
using namespace LIBC_NAMESPACE;
@@ -20,68 +20,51 @@ TEST_MAIN(int argc, char **argv, char **envp) {
ASSERT_TRUE(file && "failed to open file");
// Check basic printing.
int written = 0;
- written = LIBC_NAMESPACE::rpc_fprintf(file, "A simple string\n", nullptr, 0);
+ written = LIBC_NAMESPACE::fprintf(file, "A simple string\n");
ASSERT_EQ(written, 16);
const char *str = "A simple string\n";
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%s", &str, sizeof(void *));
+ written = LIBC_NAMESPACE::fprintf(file, "%s", str);
ASSERT_EQ(written, 16);
// Check printing a different value with each thread.
uint64_t thread_id = gpu::get_thread_id();
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%8ld\n", &thread_id,
- sizeof(thread_id));
+ written = LIBC_NAMESPACE::fprintf(file, "%8ld\n", thread_id);
ASSERT_EQ(written, 9);
- struct {
- uint32_t x = 1;
- char c = 'c';
- double f = 1.0;
- } args1;
- written =
- LIBC_NAMESPACE::rpc_fprintf(file, "%d%c%.1f\n", &args1, sizeof(args1));
+ written = LIBC_NAMESPACE::fprintf(file, "%d%c%.1f\n", 1, 'c', 1.0);
ASSERT_EQ(written, 6);
- struct {
- uint32_t x = 1;
- const char *str = "A simple string\n";
- } args2;
- written =
- LIBC_NAMESPACE::rpc_fprintf(file, "%032b%s\n", &args2, sizeof(args2));
+ written = LIBC_NAMESPACE::fprintf(file, "%032b%s\n", 1, "A simple string\n");
ASSERT_EQ(written, 49);
// Check that the server correctly handles divergent numbers of arguments.
const char *format = gpu::get_thread_id() % 2 ? "%s" : "%20ld\n";
- written = LIBC_NAMESPACE::rpc_fprintf(file, format, &str, sizeof(void *));
+ written = LIBC_NAMESPACE::fprintf(file, format, str);
ASSERT_EQ(written, gpu::get_thread_id() % 2 ? 16 : 21);
format = gpu::get_thread_id() % 2 ? "%s" : str;
- written = LIBC_NAMESPACE::rpc_fprintf(file, format, &str, sizeof(void *));
+ written = LIBC_NAMESPACE::fprintf(file, format, str);
ASSERT_EQ(written, 16);
// Check that we handle null arguments correctly.
struct {
void *null = nullptr;
} args3;
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%p", &args3, sizeof(args3));
+ written = LIBC_NAMESPACE::fprintf(file, "%p", nullptr);
ASSERT_EQ(written, 9);
#ifndef LIBC_COPT_PRINTF_NO_NULLPTR_CHECKS
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%s", &args3, sizeof(args3));
+ written = LIBC_NAMESPACE::fprintf(file, "%s", nullptr);
ASSERT_EQ(written, 6);
#endif // LIBC_COPT_PRINTF_NO_NULLPTR_CHECKS
// Check for extremely abused variable width arguments
- struct {
- uint32_t x = 1;
- uint32_t y = 2;
- double f = 1.0;
- } args4;
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%**d", &args4, sizeof(args4));
+ written = LIBC_NAMESPACE::fprintf(file, "%**d", 1, 2, 1.0);
ASSERT_EQ(written, 4);
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%**d%6d", &args4, sizeof(args4));
+ written = LIBC_NAMESPACE::fprintf(file, "%**d%6d", 1, 2, 1.0);
ASSERT_EQ(written, 10);
- written = LIBC_NAMESPACE::rpc_fprintf(file, "%**.**f", &args4, sizeof(args4));
+ written = LIBC_NAMESPACE::fprintf(file, "%**.**f", 1, 2, 1.0);
ASSERT_EQ(written, 7);
return 0;
diff --git a/libc/utils/gpu/server/rpc_server.cpp b/libc/utils/gpu/server/rpc_server.cpp
index 095f3fa13ffad6..8813f670930102 100644
--- a/libc/utils/gpu/server/rpc_server.cpp
+++ b/libc/utils/gpu/server/rpc_server.cpp
@@ -44,7 +44,7 @@ template <uint32_t lane_size> void handle_printf(rpc::Server::Port &port) {
// Get the appropriate output stream to use.
if (port.get_opcode() == RPC_PRINTF_TO_STREAM)
port.recv([&](rpc::Buffer *buffer, uint32_t id) {
- files[id] = reinterpret_cast<FILE *>(buffer->data[0]);
+ files[id] = file::to_stream(buffer->data[0]);
});
else if (port.get_opcode() == RPC_PRINTF_TO_STDOUT)
std::fill(files, files + lane_size, stdout);
@@ -60,6 +60,28 @@ template <uint32_t lane_size> void handle_printf(rpc::Server::Port &port) {
// Recieve the format string and arguments from the client.
port.recv_n(format, format_sizes,
[&](uint64_t size) { return new char[size]; });
+
+ // Parse the format string to get the expected size of the buffer.
+ for (uint32_t lane = 0; lane < lane_size; ++lane) {
+ if (!format[lane])
+ continue;
+
+ WriteBuffer wb(nullptr, 0);
+ Writer writer(&wb);
+
+ internal::MockArgList printf_args;
+ Parser<internal::MockArgList &> parser(
+ reinterpret_cast<const char *>(format[lane]), printf_args);
+
+ for (FormatSection cur_section = parser.get_next_section();
+ !cur_section.raw_string.empty();
+ cur_section = parser.get_next_section())
+ ;
+ args_sizes[lane] = printf_args.read_count();
+ }
+ port.send([&](rpc::Buffer *buffer, uint32_t id) {
+ buffer->data[0] = args_sizes[id];
+ });
port.recv_n(args, args_sizes, [&](uint64_t size) { return new char[size]; });
// Identify any arguments that are actually pointers to strings on the client.
More information about the libc-commits
mailing list