[clang] [llvm] [WIP] Expand variadic functions in IR (PR #89007)
via cfe-commits
cfe-commits at lists.llvm.org
Tue Apr 16 17:31:21 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang-codegen
Author: Jon Chesterfield (JonChesterfield)
<details>
<summary>Changes</summary>
Rewrite calls to variadic functions into calls to an equivalent non-variadic function.
This makes calls to known variadic functions a zero cost abstraction. The GPUs use it as a backend implementation.
The pass runs for implemented targets on known functions at O1 or above. For targets using it as codegen it runs from the backend on known and unknown functions. Interacts well with inlining so is scheduled shortly before the inliner.
Relative to the abandoned pull/81058, this is the whole end to end transform as opposed to a subset for easier review. The feedback on that is applied, in particular this makes no attempt to detect existing va_list functions, so in some cases it'll make the IR worse in a fashion that the inliner completely reverts. The ABI abstraction is reworked to be more orthogonal which hopefully makes it clear that the switch isn't worth scattering across the target classes.
Marked WIP because vector types in structs on X64 might be misbehaving. Still testing this - it's solid enough to build a working clang and libxml2 and should meet the use cases in gpu libc and I'll put a patch up for the rocm CI - but would like more test coverage and probably another architecture implemented before landing.
---
Patch is 293.19 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/89007.diff
36 Files Affected:
- (modified) clang/lib/CodeGen/ABIInfoImpl.cpp (+11-2)
- (modified) clang/lib/CodeGen/Targets/AMDGPU.cpp (+7-1)
- (modified) clang/lib/CodeGen/Targets/NVPTX.cpp (+7-1)
- (added) clang/test/CodeGen/expand-variadic-call.c (+314)
- (added) clang/test/CodeGen/variadic-wrapper-removal.c (+85)
- (added) clang/test/CodeGenCXX/inline-then-fold-variadics.cpp (+247)
- (modified) llvm/include/llvm/InitializePasses.h (+1)
- (added) llvm/include/llvm/Transforms/IPO/ExpandVariadics.h (+43)
- (modified) llvm/lib/Passes/PassBuilder.cpp (+1)
- (modified) llvm/lib/Passes/PassBuilderPipelines.cpp (+4)
- (modified) llvm/lib/Passes/PassRegistry.def (+1)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def (+1)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (+3)
- (modified) llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp (+4)
- (modified) llvm/lib/Transforms/IPO/CMakeLists.txt (+1)
- (added) llvm/lib/Transforms/IPO/ExpandVariadics.cpp (+1056)
- (added) llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll (+499)
- (modified) llvm/test/CodeGen/AMDGPU/llc-pipeline.ll (+5)
- (modified) llvm/test/CodeGen/AMDGPU/unsupported-calls.ll (-19)
- (added) llvm/test/CodeGen/NVPTX/expand-variadic-call.ll (+468)
- (added) llvm/test/CodeGen/X86/expand-variadic-call-i386-darwin.ll (+449)
- (added) llvm/test/CodeGen/X86/expand-variadic-call-i386-linux.ll (+449)
- (added) llvm/test/CodeGen/X86/expand-variadic-call-i686-msvc.ll (+467)
- (added) llvm/test/CodeGen/X86/expand-variadic-call-x64-darwin.ll (+688)
- (added) llvm/test/CodeGen/X86/expand-variadic-call-x64-linux.ll (+688)
- (modified) llvm/test/Other/new-pm-defaults.ll (+1)
- (modified) llvm/test/Other/new-pm-thinlto-postlink-defaults.ll (+1)
- (modified) llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll (+1)
- (modified) llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll (+1-1)
- (modified) llvm/test/Other/new-pm-thinlto-prelink-defaults.ll (+1)
- (modified) llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll (+1)
- (modified) llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll (+1)
- (added) llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-outliner.ll (+86)
- (added) llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll (+225)
- (added) llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll (+121)
- (modified) llvm/utils/gn/secondary/llvm/lib/Transforms/IPO/BUILD.gn (+1)
``````````diff
diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp
index 3e34d82cb399ba..c4a1829ab8863f 100644
--- a/clang/lib/CodeGen/ABIInfoImpl.cpp
+++ b/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -154,11 +154,20 @@ llvm::Value *CodeGen::emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
llvm::Value *Ptr,
CharUnits Align) {
// OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
+ Ptr = CGF.Builder.CreateAddrSpaceCast(Ptr, CGF.AllocaInt8PtrTy,
+ Ptr->getName() + ".addrcast");
llvm::Value *RoundUp = CGF.Builder.CreateConstInBoundsGEP1_32(
CGF.Builder.getInt8Ty(), Ptr, Align.getQuantity() - 1);
+
+ // ptrmask is sensitive to the bitwidth of the mask
+ unsigned IndexTypeSize =
+ CGF.CGM.getDataLayout().getIndexTypeSizeInBits(RoundUp->getType());
+ llvm::IntegerType *MaskType =
+ llvm::IntegerType::get(CGF.getLLVMContext(), IndexTypeSize);
+
return CGF.Builder.CreateIntrinsic(
- llvm::Intrinsic::ptrmask, {CGF.AllocaInt8PtrTy, CGF.IntPtrTy},
- {RoundUp, llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())},
+ llvm::Intrinsic::ptrmask, {CGF.AllocaInt8PtrTy, MaskType},
+ {RoundUp, llvm::ConstantInt::get(MaskType, -Align.getQuantity())},
nullptr, Ptr->getName() + ".aligned");
}
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 44e86c0b40f686..e274741a6ee652 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -115,7 +115,13 @@ void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- llvm_unreachable("AMDGPU does not support varargs");
+ const bool IsIndirect = false;
+ const bool AllowHigherAlign = true;
+ // Would rather not naturally align values
+ // Splitting {char, short} into two separate arguments makes that difficult.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(1), AllowHigherAlign);
}
ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 7dce5042c3dc20..d6c256e5e6f7b9 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -215,7 +215,13 @@ void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- llvm_unreachable("NVPTX does not support varargs");
+ // TODO: Work out to what extent this correlates with ptx
+ // doubles get passed with 8 byte alignment and C promotes smaller integer
+ // types to int. Printf doesn't really do structs so hard to guess what
+ // the right thing is for that.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), true);
}
void NVPTXTargetCodeGenInfo::setTargetAttributes(
diff --git a/clang/test/CodeGen/expand-variadic-call.c b/clang/test/CodeGen/expand-variadic-call.c
new file mode 100644
index 00000000000000..baff54544207ea
--- /dev/null
+++ b/clang/test/CodeGen/expand-variadic-call.c
@@ -0,0 +1,314 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-cpu x86-64-v4 -std=c23 -O1 -ffreestanding -mllvm --expand-variadics-override=disable -emit-llvm -o - %s | FileCheck %s
+
+// This test sanity checks calling a variadic function with the expansion transform disabled.
+// The IR test cases {arch}/expand-variadic-call-*.ll correspond to IR generated from this test case.
+
+typedef __builtin_va_list va_list;
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#define va_start(ap, ...) __builtin_va_start(ap, 0)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+// 32 bit x86 alignment uses getTypeStackAlign for special cases
+// Whitebox testing.
+// Needs a type >= 16 which is either a simd or a struct containing a simd
+// darwinvectorabi should force 4 bytes
+// linux vectors with align 16/32/64 return that alignment
+
+
+// Might want various copy/end style constructs in a separate test
+
+void vararg(...);
+void valist(va_list);
+
+// CHECK-LABEL: @copy(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CP:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[CP]]) #[[ATTR7:[0-9]+]]
+// CHECK-NEXT: call void @llvm.va_copy.p0(ptr nonnull [[CP]], ptr [[VA:%.*]])
+// CHECK-NEXT: call void @valist(ptr noundef nonnull [[CP]]) #[[ATTR8:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[CP]]) #[[ATTR7]]
+// CHECK-NEXT: ret void
+//
+void copy(va_list va)
+{
+ va_list cp;
+ va_copy(cp, va);
+ valist(cp);
+}
+
+// CHECK-LABEL: @start_once(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[S:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[S]]) #[[ATTR7]]
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[S]])
+// CHECK-NEXT: call void @valist(ptr noundef nonnull [[S]]) #[[ATTR8]]
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[S]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[S]]) #[[ATTR7]]
+// CHECK-NEXT: ret void
+//
+void start_once(...)
+{
+ va_list s;
+ va_start(s);
+ valist(s);
+ va_end(s);
+}
+
+// CHECK-LABEL: @start_twice(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[S0:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: [[S1:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[S0]]) #[[ATTR7]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[S1]]) #[[ATTR7]]
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[S0]])
+// CHECK-NEXT: call void @valist(ptr noundef nonnull [[S0]]) #[[ATTR8]]
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[S0]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[S1]])
+// CHECK-NEXT: call void @valist(ptr noundef nonnull [[S1]]) #[[ATTR8]]
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[S1]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[S1]]) #[[ATTR7]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[S0]]) #[[ATTR7]]
+// CHECK-NEXT: ret void
+//
+void start_twice(...)
+{
+ va_list s0,s1;
+ va_start(s0);
+ valist(s0);
+ va_end(s0);
+ va_start(s1);
+ valist(s1);
+ va_end(s1);
+}
+
+// vectors with alignment 16/32/64 are natively aligned on linux x86
+// v32f32 would be a m1024 type, larger than x64 defines at time of writing
+typedef int i32;
+typedef float v4f32 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef float v8f32 __attribute__((__vector_size__(32), __aligned__(32)));
+typedef float v16f32 __attribute__((__vector_size__(64), __aligned__(64)));
+typedef float v32f32 __attribute__((__vector_size__(128), __aligned__(128)));
+
+
+// Pass a single value to wrapped() via vararg(...)
+// CHECK-LABEL: @single_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void single_i32(i32 x)
+{
+ vararg(x);
+}
+
+
+// CHECK-LABEL: @single_double(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(double noundef [[X:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void single_double(double x)
+{
+ vararg(x);
+}
+
+// CHECK-LABEL: @single_v4f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(<4 x float> noundef [[X:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void single_v4f32(v4f32 x)
+{
+ vararg(x);
+}
+
+// CHECK-LABEL: @single_v8f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(<8 x float> noundef [[X:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void single_v8f32(v8f32 x)
+{
+ vararg(x);
+}
+
+// CHECK-LABEL: @single_v16f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(<16 x float> noundef [[X:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void single_v16f32(v16f32 x)
+{
+ vararg(x);
+}
+
+// CHECK-LABEL: @single_v32f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <32 x float>, align 128
+// CHECK-NEXT: [[X:%.*]] = load <32 x float>, ptr [[TMP0:%.*]], align 128, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT: store <32 x float> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 128, !tbaa [[TBAA2]]
+// CHECK-NEXT: tail call void (...) @vararg(ptr noundef nonnull byval(<32 x float>) align 128 [[INDIRECT_ARG_TEMP]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void single_v32f32(v32f32 x)
+{
+ vararg(x);
+}
+
+
+
+// CHECK-LABEL: @i32_double(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]], double noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void i32_double(i32 x, double y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @double_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(double noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void double_i32(double x, i32 y)
+{
+ vararg(x, y);
+}
+
+
+// A struct used by libc variadic tests
+
+typedef struct {
+ char c;
+ short s;
+ int i;
+ long l;
+ float f;
+ double d;
+} libcS;
+
+// CHECK-LABEL: @i32_libcS(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]], ptr noundef nonnull byval([[STRUCT_LIBCS:%.*]]) align 8 [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void i32_libcS(i32 x, libcS y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @libcS_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(ptr noundef nonnull byval([[STRUCT_LIBCS:%.*]]) align 8 [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void libcS_i32(libcS x, i32 y)
+{
+ vararg(x, y);
+}
+
+
+// CHECK-LABEL: @i32_v4f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]], <4 x float> noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void i32_v4f32(i32 x, v4f32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @v4f32_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(<4 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void v4f32_i32(v4f32 x, i32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @i32_v8f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]], <8 x float> noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void i32_v8f32(i32 x, v8f32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @v8f32_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(<8 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void v8f32_i32(v8f32 x, i32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @i32_v16f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]], <16 x float> noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void i32_v16f32(i32 x, v16f32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @v16f32_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void (...) @vararg(<16 x float> noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void v16f32_i32(v16f32 x, i32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @i32_v32f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <32 x float>, align 128
+// CHECK-NEXT: [[Y:%.*]] = load <32 x float>, ptr [[TMP0:%.*]], align 128, !tbaa [[TBAA2]]
+// CHECK-NEXT: store <32 x float> [[Y]], ptr [[INDIRECT_ARG_TEMP]], align 128, !tbaa [[TBAA2]]
+// CHECK-NEXT: tail call void (...) @vararg(i32 noundef [[X:%.*]], ptr noundef nonnull byval(<32 x float>) align 128 [[INDIRECT_ARG_TEMP]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void i32_v32f32(i32 x, v32f32 y)
+{
+ vararg(x, y);
+}
+
+// CHECK-LABEL: @v32f32_i32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <32 x float>, align 128
+// CHECK-NEXT: [[X:%.*]] = load <32 x float>, ptr [[TMP0:%.*]], align 128, !tbaa [[TBAA2]]
+// CHECK-NEXT: store <32 x float> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 128, !tbaa [[TBAA2]]
+// CHECK-NEXT: tail call void (...) @vararg(ptr noundef nonnull byval(<32 x float>) align 128 [[INDIRECT_ARG_TEMP]], i32 noundef [[Y:%.*]]) #[[ATTR8]]
+// CHECK-NEXT: ret void
+//
+void v32f32_i32(v32f32 x, i32 y)
+{
+ vararg(x, y);
+}
+
+#if __AMDGPU__ || __NVPTX__
+
+
+void (*volatile vararg_ptr)(...) = &vararg;
+
+void indirect_single_i32(i32 x)
+{
+ vararg_ptr(x);
+}
+
+
+#endif
diff --git a/clang/test/CodeGen/variadic-wrapper-removal.c b/clang/test/CodeGen/variadic-wrapper-removal.c
new file mode 100644
index 00000000000000..13499612f0757f
--- /dev/null
+++ b/clang/test/CodeGen/variadic-wrapper-removal.c
@@ -0,0 +1,85 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -triple i386-unknown-linux-gnu -O1 -emit-llvm -o - %s | opt --passes='module(expand-variadics,inline)' -S | FileCheck %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu -O1 -emit-llvm -o - %s | opt --passes='module(expand-variadics,inline)' -S | FileCheck %s
+
+// neither arm arch is implemented yet, leaving it here as a reminder
+// armv6 is a ptr as far as the struct is concerned, but possibly also a [1 x i32] passed by value
+// that seems insistent, maybe leave 32 bit arm alone for now
+// aarch64 is a struct of five things passed using byval memcpy
+
+// R-N: %clang_cc1 -triple=armv6-none--eabi -O1 -emit-llvm -o - %s | opt --passes=expand-variadics -S | FileCheck %s
+// R-N: %clang_cc1 -triple=aarch64-none-linux-gnu -O1 -emit-llvm -o - %s | opt --passes=expand-variadics -S | FileCheck %s
+
+
+
+// expand-variadics rewrites calls to variadic functions into calls to
+// equivalent functions that take a va_list argument. A property of the
+// implementation is that said "equivalent function" may be a pre-existing one.
+// This is equivalent to inlining a sufficiently simple variadic wrapper.
+
+#include <stdarg.h>
+
+typedef int FILE; // close enough for this test
+
+// fprintf is sometimes implemented as a call to vfprintf. That fits the
+// pattern the transform pass recognises - given an implementation of fprintf
+// in the IR module, calls to it can be rewritten into calls into vfprintf.
+
+// CHECK-LABEL: define{{.*}} i32 @fprintf(
+// CHECK-LABEL: define{{.*}} i32 @call_fprintf(
+// CHECK-NOT: @fprintf
+// CHECK: @vfprintf
+int vfprintf(FILE *restrict f, const char *restrict fmt, va_list ap);
+int fprintf(FILE *restrict f, const char *restrict fmt, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, fmt);
+ ret = vfprintf(f, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+int call_fprintf(FILE *f)
+{
+ int x = 42;
+ double y = 3.14;
+ return fprintf(f, "int %d dbl %g\n", x, y);
+}
+
+// Void return type is also OK
+
+// CHECK-LABEL: define{{.*}} void @no_result(
+// CHECK-LABEL: define{{.*}} void @call_no_result(
+// CHECK-NOT: @no_result
+// CHECK: @vno_result
+void vno_result(const char * fmt, va_list);
+void no_result(const char * fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vno_result(fmt, ap);
+ va_end(ap);
+}
+void call_no_result(FILE *f)
+{
+ int x = 101;
+ no_result("", x);
+}
+
+// The vaend in the forwarding implementation is optional where it's a no-op
+
+// CHECK-LABEL: define{{.*}} i32 @no_vaend(
+// CHECK-LABEL: define{{.*}} i32 @call_no_vaend(
+// CHECK-NOT: @no_vaend
+// CHECK: @vno_vaend
+int vno_vaend(int x, va_list);
+int no_vaend(int x, ...)
+{
+ va_list ap;
+ va_start(ap, x);
+ return vno_vaend(x, ap);
+}
+int call_no_vaend(int x)
+{
+ return no_vaend(x, 10, 2.5f);
+}
diff --git a/clang/test/CodeGenCXX/inline-then-fold-variadics.cpp b/clang/test/CodeGenCXX/inline-then-fold-variadics.cpp
new file mode 100644
index 00000000000000..c7b5863b2632a6
--- /dev/null
+++ b/clang/test/CodeGenCXX/inline-then-fold-variadics.cpp
@@ -0,0 +1,247 @@
+// RUN: %clang_cc1 -triple i386-unknown-linux-gnu -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s --check-prefixes=CHECK,X86Linux
+
+
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s --check-prefixes=CHECK,X64SystemV
+
+
+// RUN: %clang_cc1 -triple i386-apple-darwin -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s --check-prefixes=CHECK,X86Darwin
+
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s --check-prefixes=CHECK,X64SystemV
+
+
+// The clang test suite has _lots_ of windows related triples in it
+// 'x86_64-pc-windows-msvc|i686-windows-msvc|thumbv7-windows|aarch64-windows|i686-windows|x86_64-windows|x86_64-unknown-windows-msvc|i386-windows-pc|x86_64--windows-msvc|i686--windows-msvc|x86_64-unknown-windows-gnu|i686-unknown-windows-msvc|i686-unknown-windows-gnu|arm64ec-pc-windows-msvc19.20.0|i686-pc-windows-msvc19.14.0|i686-pc-windows|x86_64--windows-gnu|i686--windows-gnu|thumbv7--windows|i386-windows|x86_64-unknown-windows-pc|i686--windows|x86_64--windows|i686-w64-windows-gnu'
+
+// Might be detecting an inconsistency - maybe different alignment
+// Presently failing on an unusual calling convention
+
+// i686 windows emits suboptimal codegen. sroa removes a field from a struct which misaligns a field which blocks store/load forwarding
+// RUN: %clang_cc1 -triple i686-windows-msvc -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s --check-prefixes=CHECK,X86Windows
+
+
+// 64 bit windows va_arg passes most type indirectly but the call instruction uses the types by value
+// ___: %clang_cc1 -triple x86_64-pc-windows-msvc -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s --check-prefixes=CHECK
+
+
+
+// amdgpu emits a sequence of addrspace casts that aren't folded yet
+// todo: match it anyway
+// R-N: %clang_cc1 -triple amdgcn-amd-amdhsa -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s
+
+// Requires the instcombine patch that hasn't landed yet
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -Wno-varargs -O1 -emit-llvm -o - %s | opt --passes=expand-variadics | opt -S -O1 | FileCheck %s
+
+
+
+
+
+// Not yet implemented on arm
+// Also there are various x86 variants that should be in the triple
+
+// Checks for consistency between clang and expand-va-intrinics
+// 1. Use clang to lower va_arg
+// 2. Use expand-variadics to lower the rest of the variadic operations
+// 3. Use opt -O1 to simplify the functions to ret %arg
+// The simplification to ret %arg will fail when the two are not consistent, modulo bugs elsewhere.
+
+#include <stdarg.h>
+
+template <typename X, typename Y>
+static X first(...) {
+ va_list va;
+ __builtin_va_start(va, 0);
+ X r = va_arg(va, X);
+ va_end(va);
+ return r;
+}
+
+template <typename X, typename Y>
+static Y second(...) {
+ va_list va;
+ __builtin_va_start(va, 0);
+ va_arg(va, X);
+ Y r = va_arg(va, Y);
+ va_end(va);
+ return r;
+}
+
+typedef float float4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef float float8 __attribute__((__vector_size__(32), __aligned__(32)));
+typedef float float16 __attribute__((__vector_size__(64), __aligned__(64)));
+typedef float float32 __attribute__((__vector_size__(128), __aligned__(128)));
+
+
+extern "C"
+{
+// CHECK-LABEL: define{{.*}} i32 @first_i32_i32(i32{{.*}} %x, i32{{.*}} %y)
+// CHECK: entry:
+// CHECK: ret i32 %x
+int first_i32_i32(int x, int y)
+{
+ return first<int,in...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/89007
More information about the cfe-commits
mailing list