[llvm] [LLVM] Add 'ExpandVariadicsPass' to LTO default pipeline (PR #100479)

Joseph Huber via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 25 04:56:01 PDT 2024


https://github.com/jhuber6 updated https://github.com/llvm/llvm-project/pull/100479

>From 2a6590e3a55785ce44e9bebb881987d7571cd743 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Wed, 24 Jul 2024 17:15:12 -0500
Subject: [PATCH] [LLVM] Add 'ExpandVariadicsPass' to LTO default pipeline

Summary:
This pass expands variadic functions into non-variadic function calls
according to the target ABI. Currently, this is used as the lowering for
the NVPTX, AMDGPU, and WASM targets.

This pass is currently only run late in the target's backend. However,
during LTO we want to run it before the inliner pass so that the
expanded functions can be inlined using standard heuristics. This pass
is a no-op for unsupported targets, so this won't apply to any code that
isn't already using it.
---
 llvm/lib/Passes/PassBuilderPipelines.cpp      |  4 ++
 llvm/test/Other/new-pm-lto-defaults.ll        |  1 +
 llvm/test/Transforms/PhaseOrdering/varargs.ll | 48 +++++++++++++++++++
 3 files changed, 53 insertions(+)
 create mode 100644 llvm/test/Transforms/PhaseOrdering/varargs.ll

diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 6f36bdad780ae3..757b20dcd6693a 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -48,6 +48,7 @@
 #include "llvm/Transforms/IPO/DeadArgumentElimination.h"
 #include "llvm/Transforms/IPO/ElimAvailExtern.h"
 #include "llvm/Transforms/IPO/EmbedBitcodePass.h"
+#include "llvm/Transforms/IPO/ExpandVariadics.h"
 #include "llvm/Transforms/IPO/ForceFunctionAttrs.h"
 #include "llvm/Transforms/IPO/FunctionAttrs.h"
 #include "llvm/Transforms/IPO/GlobalDCE.h"
@@ -1874,6 +1875,9 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
   MPM.addPass(createModuleToFunctionPassAdaptor(std::move(PeepholeFPM),
                                                 PTO.EagerlyInvalidateAnalyses));
 
+  // Lower variadic functions for supported targets prior to inlining.
+  MPM.addPass(ExpandVariadicsPass(ExpandVariadicsMode::Optimize));
+
   // Note: historically, the PruneEH pass was run first to deduce nounwind and
   // generally clean up exception handling overhead. It isn't clear this is
   // valuable as the inliner doesn't currently care whether it is inlining an
diff --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll
index d451d2897f673c..1b345780476b71 100644
--- a/llvm/test/Other/new-pm-lto-defaults.ll
+++ b/llvm/test/Other/new-pm-lto-defaults.ll
@@ -29,6 +29,7 @@
 
 ; CHECK-EP: Running pass: NoOpModulePass
 ; CHECK-O: Running pass: CrossDSOCFIPass
+; CHECK-O-NEXT: Running pass: ExpandVariadicsPass
 ; CHECK-O-NEXT: Running pass: OpenMPOptPass
 ; CHECK-O-NEXT: Running pass: GlobalDCEPass
 ; CHECK-O-NEXT: Running pass: InferFunctionAttrsPass
diff --git a/llvm/test/Transforms/PhaseOrdering/varargs.ll b/llvm/test/Transforms/PhaseOrdering/varargs.ll
new file mode 100644
index 00000000000000..dd6a41fcc96012
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/varargs.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-- -S -passes='lto<O2>' < %s | FileCheck %s
+target triple = "amdgcn-amd-amdhsa"
+
+; We use the ExpandVariadics pass to lower variadic functions so they can be
+; inlined.
+
+define i32 @foo() {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    ret i32 6
+;
+entry:
+  %call = tail call i32 (i32, ...) @vararg(i32 poison, i32 noundef 1, i32 noundef 2, i32 noundef 3)
+  ret i32 %call
+}
+
+define internal i32 @vararg(i32 %first, ...) {
+entry:
+  %vlist = alloca ptr, align 8, addrspace(5)
+  %vlist.ascast = addrspacecast ptr addrspace(5) %vlist to ptr
+  call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vlist)
+  call void @llvm.va_start.p0(ptr %vlist.ascast)
+  %vlist.promoted = load ptr, ptr addrspace(5) %vlist, align 8
+  %argp.next = getelementptr inbounds i8, ptr %vlist.promoted, i64 4
+  store ptr %argp.next, ptr addrspace(5) %vlist, align 8
+  %0 = load i32, ptr %vlist.promoted, align 4
+  %argp.next.1 = getelementptr inbounds i8, ptr %vlist.promoted, i64 8
+  store ptr %argp.next.1, ptr addrspace(5) %vlist, align 8
+  %1 = load i32, ptr %argp.next, align 4
+  %add.1 = add nsw i32 %1, %0
+  %argp.next.2 = getelementptr inbounds i8, ptr %vlist.promoted, i64 12
+  store ptr %argp.next.2, ptr addrspace(5) %vlist, align 8
+  %2 = load i32, ptr %argp.next.1, align 4
+  %add.2 = add nsw i32 %2, %add.1
+  call void @llvm.va_end.p0(ptr %vlist.ascast)
+  call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vlist)
+  ret i32 %add.2
+}
+
+declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture)
+
+declare void @llvm.va_start.p0(ptr)
+
+declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture)
+
+declare void @llvm.va_end.p0(ptr)



More information about the llvm-commits mailing list