[llvm] llvm-reduce: Add values to return reduction (PR #132686)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 24 23:19:33 PDT 2025
https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/132686
>From c43fa0f99d9524b6ffb867000f273a495b233ae3 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 19 Mar 2025 15:45:54 +0700
Subject: [PATCH 1/3] llvm-reduce: Add values to return reduction
In void functions, try to replace instruction uses
with a new non-void return. If the return type matches
the instruction, also try to directly return it.
This handles most of the cases, but doesn't try to handle
all of the weird exception related terminators.
Also doesn't try to replace argument uses, although it could. We
could also handle cases where we can insert a simple cast to an
original return value. I didn't think too hard about where to put this
in the default pass order. In many cases it obviates the need for most
of the CFG folds, but I've left it near the end initially.
I also think this is too aggressive about removing dead code, and
should leave existing dead code alone. I'm also not sure why we have
both "removeUnreachableBlocks" and EliminateUnreachableBlocks" in Utils.
Fixes #66039, fixes #107327
---
llvm/include/llvm/IR/Function.h | 8 +
llvm/lib/IR/Function.cpp | 82 ++
.../tools/llvm-reduce/reduce-operands-fp.ll | 2 +-
.../tools/llvm-reduce/reduce-operands-int.ll | 2 +-
.../tools/llvm-reduce/reduce-operands-ptr.ll | 2 +-
.../llvm-reduce/reduce-values-to-return.ll | 959 ++++++++++++++++++
llvm/tools/llvm-reduce/CMakeLists.txt | 1 +
llvm/tools/llvm-reduce/DeltaManager.cpp | 2 +
.../deltas/ReduceValuesToReturn.cpp | 245 +++++
.../llvm-reduce/deltas/ReduceValuesToReturn.h | 18 +
10 files changed, 1318 insertions(+), 3 deletions(-)
create mode 100644 llvm/test/tools/llvm-reduce/reduce-values-to-return.ll
create mode 100644 llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
create mode 100644 llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.h
diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h
index f17b7140ca29c..6d4a53da7ff22 100644
--- a/llvm/include/llvm/IR/Function.h
+++ b/llvm/include/llvm/IR/Function.h
@@ -1048,6 +1048,14 @@ class LLVM_ABI Function : public GlobalObject, public ilist_node<Function> {
void setValueSubclassDataBit(unsigned Bit, bool On);
};
+namespace CallingConv {
+
+// TODO: Need similar function for support of argument in position. General
+// version on FunctionType + Attributes + CallingConv::ID?
+LLVM_READNONE
+bool supportsNonVoidReturnType(CallingConv::ID CC);
+} // namespace CallingConv
+
/// Check whether null pointer dereferencing is considered undefined behavior
/// for a given function or an address space.
/// Null pointer access in non-zero address space is not considered undefined.
diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp
index 3644fab913b10..79308e4787b67 100644
--- a/llvm/lib/IR/Function.cpp
+++ b/llvm/lib/IR/Function.cpp
@@ -1177,3 +1177,85 @@ bool llvm::NullPointerIsDefined(const Function *F, unsigned AS) {
return false;
}
+
+bool llvm::CallingConv::supportsNonVoidReturnType(CallingConv::ID CC) {
+ switch (CC) {
+ case CallingConv::C:
+ case CallingConv::Fast:
+ case CallingConv::Cold:
+ case CallingConv::GHC:
+ case CallingConv::HiPE:
+ case CallingConv::AnyReg:
+ case CallingConv::PreserveMost:
+ case CallingConv::PreserveAll:
+ case CallingConv::Swift:
+ case CallingConv::CXX_FAST_TLS:
+ case CallingConv::Tail:
+ case CallingConv::CFGuard_Check:
+ case CallingConv::SwiftTail:
+ case CallingConv::PreserveNone:
+ case CallingConv::X86_StdCall:
+ case CallingConv::X86_FastCall:
+ case CallingConv::ARM_APCS:
+ case CallingConv::ARM_AAPCS:
+ case CallingConv::ARM_AAPCS_VFP:
+ case CallingConv::MSP430_INTR:
+ case CallingConv::X86_ThisCall:
+ case CallingConv::PTX_Device:
+ case CallingConv::SPIR_FUNC:
+ case CallingConv::Intel_OCL_BI:
+ case CallingConv::X86_64_SysV:
+ case CallingConv::Win64:
+ case CallingConv::X86_VectorCall:
+ case CallingConv::DUMMY_HHVM:
+ case CallingConv::DUMMY_HHVM_C:
+ case CallingConv::X86_INTR:
+ case CallingConv::AVR_INTR:
+ case CallingConv::AVR_SIGNAL:
+ case CallingConv::AVR_BUILTIN:
+ return true;
+ case CallingConv::AMDGPU_KERNEL:
+ case CallingConv::SPIR_KERNEL:
+ case CallingConv::AMDGPU_CS_Chain:
+ case CallingConv::AMDGPU_CS_ChainPreserve:
+ return false;
+ case CallingConv::AMDGPU_VS:
+ case CallingConv::AMDGPU_HS:
+ case CallingConv::AMDGPU_GS:
+ case CallingConv::AMDGPU_PS:
+ case CallingConv::AMDGPU_CS:
+ case CallingConv::AMDGPU_LS:
+ case CallingConv::AMDGPU_ES:
+ case CallingConv::MSP430_BUILTIN:
+ case CallingConv::AArch64_VectorCall:
+ case CallingConv::AArch64_SVE_VectorCall:
+ case CallingConv::WASM_EmscriptenInvoke:
+ case CallingConv::AMDGPU_Gfx:
+ case CallingConv::M68k_INTR:
+ case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0:
+ case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2:
+ case CallingConv::M68k_RTD:
+ case CallingConv::GRAAL:
+ case CallingConv::ARM64EC_Thunk_X64:
+ case CallingConv::ARM64EC_Thunk_Native:
+ case CallingConv::RISCV_VectorCall:
+ case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1:
+ case CallingConv::RISCV_VLSCall_32:
+ case CallingConv::RISCV_VLSCall_64:
+ case CallingConv::RISCV_VLSCall_128:
+ case CallingConv::RISCV_VLSCall_256:
+ case CallingConv::RISCV_VLSCall_512:
+ case CallingConv::RISCV_VLSCall_1024:
+ case CallingConv::RISCV_VLSCall_2048:
+ case CallingConv::RISCV_VLSCall_4096:
+ case CallingConv::RISCV_VLSCall_8192:
+ case CallingConv::RISCV_VLSCall_16384:
+ case CallingConv::RISCV_VLSCall_32768:
+ case CallingConv::RISCV_VLSCall_65536:
+ return true;
+ default:
+ return false;
+ }
+
+ llvm_unreachable("covered callingconv switch");
+}
diff --git a/llvm/test/tools/llvm-reduce/reduce-operands-fp.ll b/llvm/test/tools/llvm-reduce/reduce-operands-fp.ll
index b547c819bf0de..e10a3f8c010ce 100644
--- a/llvm/test/tools/llvm-reduce/reduce-operands-fp.ll
+++ b/llvm/test/tools/llvm-reduce/reduce-operands-fp.ll
@@ -27,7 +27,7 @@
; CHECK-INTERESTINGNESS: = fadd <2 x float>
; CHECK-INTERESTINGNESS: = fadd <2 x float>
-; CHECK-LABEL: define void @foo(
+; CHECK-LABEL: define {{(void|<2 x float>)}} @foo(
; ONE: %fadd0 = fadd float %arg0, 1.000000e+00
diff --git a/llvm/test/tools/llvm-reduce/reduce-operands-int.ll b/llvm/test/tools/llvm-reduce/reduce-operands-int.ll
index 397a1595ca6b2..66ad30832aabc 100644
--- a/llvm/test/tools/llvm-reduce/reduce-operands-int.ll
+++ b/llvm/test/tools/llvm-reduce/reduce-operands-int.ll
@@ -22,7 +22,7 @@
; CHECK-INTERESTINGNESS: = add <2 x i32>
; CHECK-INTERESTINGNESS: = add <2 x i32>
-; CHECK-LABEL: define void @foo(
+; CHECK-LABEL: define {{(void|<2 x i32>)}} @foo(
; ONE: %add0 = add i32 %arg0, 1
diff --git a/llvm/test/tools/llvm-reduce/reduce-operands-ptr.ll b/llvm/test/tools/llvm-reduce/reduce-operands-ptr.ll
index 3e163b30f6b38..4669cf76074d4 100644
--- a/llvm/test/tools/llvm-reduce/reduce-operands-ptr.ll
+++ b/llvm/test/tools/llvm-reduce/reduce-operands-ptr.ll
@@ -8,7 +8,7 @@
; RUN: llvm-reduce --abort-on-invalid-reduction --test FileCheck --test-arg --check-prefixes=CHECK-INTERESTINGNESS --test-arg %s --test-arg --input-file %s -o %t
; RUN: FileCheck --check-prefixes=CHECK,ZERO %s < %t
-; CHECK-LABEL: define void @foo(
+; CHECK-LABEL: define {{(void|ptr)}} @foo(
; ONE: load i32, ptr %a0
; ONE: load i32, ptr @g
diff --git a/llvm/test/tools/llvm-reduce/reduce-values-to-return.ll b/llvm/test/tools/llvm-reduce/reduce-values-to-return.ll
new file mode 100644
index 0000000000000..0c36db8ebc278
--- /dev/null
+++ b/llvm/test/tools/llvm-reduce/reduce-values-to-return.ll
@@ -0,0 +1,959 @@
+; Test that llvm-reduce can move intermediate values by inserting
+; early returns
+;
+; RUN: llvm-reduce --abort-on-invalid-reduction --delta-passes=values-to-return --test FileCheck --test-arg --check-prefixes=CHECK,INTERESTING --test-arg %s --test-arg --input-file %s -o %t
+; RUN: FileCheck --check-prefixes=CHECK,RESULT %s < %t
+
+ at gv = global i32 0, align 4
+ at gv_struct = global { i32, float } zeroinitializer, align 4
+ at gv_array = global [3 x i32] zeroinitializer, align 4
+ at gv_empty_struct = global { } zeroinitializer, align 4
+
+; CHECK: @global.func.user = global ptr @store_instruction_to_return_with_uses
+ at global.func.user = global ptr @store_instruction_to_return_with_uses
+
+; INTERESTING-LABEL: @store_instruction_to_return_with_uses(
+; INTERESTING-NEXT: = load
+
+; RESULT-LABEL: define i32 @store_instruction_to_return_with_uses(ptr %arg) {
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @store_instruction_to_return_with_uses(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: define void @user(
+; INTERESTING: call
+
+; RESULT-LABEL: define void @user(
+; RESULT-NEXT: call i32 @store_instruction_to_return_with_uses(ptr %a, ptr %b)
+; RESULT-NEXT: ret void
+; RESULT-NEXT: }
+define void @user(ptr %a, ptr %b) {
+ call void @store_instruction_to_return_with_uses(ptr %a, ptr %b)
+ ret void
+}
+
+; INTERESTING-LABEL: @store_instruction_to_return_no_uses(
+; INTERESTING: = load i32
+
+; RESULT-LABEL: define i32 @store_instruction_to_return_no_uses(
+; RESULT-NEXT: %load = load i32
+; RESULT-NEXT: ret i32 %load
+define void @store_instruction_to_return_no_uses(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @store_instruction_to_return_preserve_attrs(
+; INTERESTING: = load
+
+; RESULT: ; Function Attrs: nounwind
+; RESULT-NEXT: define weak i32 @store_instruction_to_return_preserve_attrs(ptr byref(i32) %arg) #0 {
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define weak void @store_instruction_to_return_preserve_attrs(ptr byref(i32) %arg) nounwind "some-attr" {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @store_instruction_to_return_preserve_addrspace(
+; INTERESTING: = load
+
+; RESULT-LABEL: define i32 @store_instruction_to_return_preserve_addrspace(ptr %arg) addrspace(1) {
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @store_instruction_to_return_preserve_addrspace(ptr %arg) addrspace(1) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @store_instruction_to_return_no_uses_unreachable(
+; INTERESTING: = load
+
+; RESULT-LABEL: define i32 @store_instruction_to_return_no_uses_unreachable(ptr %arg) {
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @store_instruction_to_return_no_uses_unreachable(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ unreachable
+}
+
+; INTERESTING-LABEL: @store_instruction_to_return_with_non_callee_use(
+; INTERESTING: = load
+
+; RESULT-LABEL: define i32 @store_instruction_to_return_with_non_callee_use(ptr %arg) {
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @store_instruction_to_return_with_non_callee_use(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+declare void @takes_fptr(ptr)
+
+; CHECK: @non_callee_user(
+; CHECK: ret void
+define void @non_callee_user(ptr %a, ptr %b) {
+ call void @takes_fptr(ptr @store_instruction_to_return_with_non_callee_use)
+ ret void
+}
+
+declare i32 @convergent_call() convergent
+
+; CHECK-LABEL: @no_return_token_def(
+; CHECK: call token
+; RESULT: ret void
+define void @no_return_token_def(ptr %arg) convergent {
+ %t = call token @llvm.experimental.convergence.entry()
+ ret void
+}
+
+; INTERESTING-LABEL: @no_return_token_def_other(
+; INTERESTING: call token
+
+; RESULT-LABEL: define i32 @no_return_token_def_other(
+; RESULT: call token
+; RESULT: call i32
+; RESULT: ret i32
+define void @no_return_token_def_other(ptr %arg) convergent {
+ %t = call token @llvm.experimental.convergence.entry()
+ %call = call i32 @convergent_call() [ "convergencectrl"(token %t) ]
+ store i32 %call, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @store_instruction_to_return_variadic_func(
+; INTERESTING: = load
+
+; RESULT-LABEL: define i32 @store_instruction_to_return_variadic_func(ptr %arg, ...)
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @store_instruction_to_return_variadic_func(ptr %arg, ...) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; Has a callsite use that is invoking the function with a non-void
+; return type, that does not match the new return type.
+
+; INTERESTING-LABEL: @inst_to_return_has_nonvoid_wrong_type_caller(
+
+; RESULT-LABEL: define void @inst_to_return_has_nonvoid_wrong_type_caller(
+; RESULT-NEXT: %load = load i32, ptr %arg
+; RESULT-NEXT: store i32 %load, ptr @gv
+; RESULT-NEXT: ret void
+define void @inst_to_return_has_nonvoid_wrong_type_caller(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @wrong_callsite_return_type(
+
+; RESULT-LABEL: define i64 @wrong_callsite_return_type(
+; RESULT-NEXT: %ret = call i64 @inst_to_return_has_nonvoid_wrong_type_caller(ptr %arg)
+; RESULT-NEXT: ret i64 %ret
+define i64 @wrong_callsite_return_type(ptr %arg) {
+ %ret = call i64 @inst_to_return_has_nonvoid_wrong_type_caller(ptr %arg)
+ ret i64 %ret
+}
+
+; INTERESTING-LABEL: @inst_to_return_already_has_new_type_caller(
+
+; RESULT-LABEL: define i32 @inst_to_return_already_has_new_type_caller(
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @inst_to_return_already_has_new_type_caller(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; Callsite has UB signature mismatch, but the return type happens to
+; match the new return type.
+;
+; INTERESTING-LABEL: @callsite_already_new_return_type(
+
+; RESULT-LABEL: define i32 @callsite_already_new_return_type(
+; RESULT-NEXT: %ret = call i32 @inst_to_return_already_has_new_type_caller(ptr %arg)
+; RESULT-NEXT: ret i32 %ret
+define i32 @callsite_already_new_return_type(ptr %arg) {
+ %ret = call i32 @inst_to_return_already_has_new_type_caller(ptr %arg)
+ ret i32 %ret
+}
+
+; INTERESTING-LABEL: @non_void_no_op(
+; INTERESTING: = load
+; INTERESTING: ret
+
+; RESULT-LABEL: define ptr @non_void_no_op(
+; RESULT: ret ptr null
+define ptr @non_void_no_op(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret ptr null
+}
+
+; INTERESTING-LABEL: @non_void_same_type_use(
+; INTERESTING: = load
+; INTERESTING: ret
+
+; RESULT-LABEL: define i32 @non_void_same_type_use(
+; RESULT-NEXT: %load = load i32, ptr %arg
+; RESULT-NEXT: ret i32 %load
+define i32 @non_void_same_type_use(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret i32 0
+}
+
+; INTERESTING-LABEL: @non_void_bitcastable_type_use(
+; INTERESTING: = load
+; INTERESTING: ret
+
+; RESULT-LABEL: define i32 @non_void_bitcastable_type_use(
+; RESULT-NEXT: %load = load float, ptr %arg
+; RESULT-NEXT: store float %load,
+; RESULT-NEXT: ret i32 0
+define i32 @non_void_bitcastable_type_use(ptr %arg) {
+ %load = load float, ptr %arg
+ store float %load, ptr @gv
+ ret i32 0
+}
+
+; INTERESTING-LABEL: @form_return_struct(
+; INTERESTING: = load { i32, float }
+
+; RESULT-LABEL: define { i32, float } @form_return_struct(ptr %arg) {
+; RESULT-NEXT: %load = load { i32, float }, ptr %arg, align 4
+; RESULT-NEXT: ret { i32, float } %load
+define void @form_return_struct(ptr %arg) {
+ %load = load { i32, float }, ptr %arg
+ store { i32, float } %load, ptr @gv_struct
+ ret void
+}
+
+; INTERESTING-LABEL: define void @return_struct_user(
+; INTERESTING-NEXT: call
+; RESULT: call { i32, float } @form_return_struct(ptr %arg)
+define void @return_struct_user(ptr %arg) {
+ call void @form_return_struct(ptr %arg)
+ ret void
+}
+
+; INTERESTING-LABEL: @form_return_array(
+; INTERESTING: = load
+
+; RESULT-LABEL: define [3 x i32] @form_return_array(
+; RESULT-NEXT: %load = load [3 x i32]
+; RESULT-NEXT: ret [3 x i32] %load
+define void @form_return_array(ptr %arg) {
+ %load = load [3 x i32], ptr %arg
+ store [3 x i32] %load, ptr @gv_array
+ ret void
+}
+
+; CHECK-LABEL: @return_array_user(
+; RESULT: call [3 x i32] @form_return_array(ptr %arg)
+define void @return_array_user(ptr %arg) {
+ call void @form_return_array(ptr %arg)
+ ret void
+}
+
+; INTERESTING-LABEL: @form_return_empty_struct(
+; INTERESTING: = load
+
+; RESULT: define {} @form_return_empty_struct(
+; RESULT-NEXT: %load = load {}
+; RESULT-NEXT: ret {} %load
+define void @form_return_empty_struct(ptr %arg) {
+ %load = load { }, ptr %arg
+ store { } %load, ptr @gv_empty_struct
+ ret void
+}
+
+; CHECK-LABEL: define void @return_empty_struct_user(
+; RESULT: call {} @form_return_empty_struct(ptr %arg)
+define void @return_empty_struct_user(ptr %arg) {
+ call void @form_return_empty_struct(ptr %arg)
+ ret void
+}
+
+define target("sometarget.sometype") @target_type_func() {
+ ret target("sometarget.sometype") poison
+}
+
+define void @target_type_user(target("sometarget.sometype") %a) {
+ ret void
+}
+
+; INTERESTING-LABEL: @form_return_target_ty(
+; INTERESTING: call target("sometarget.sometype") @target_type_func()
+
+; RESULT: define target("sometarget.sometype") @form_return_target_ty(
+; RESULT-NEXT: %call = call target("sometarget.sometype") @target_type_func()
+; RESULT-NEXT: ret target("sometarget.sometype") %call
+define void @form_return_target_ty(ptr %arg) {
+ %call = call target("sometarget.sometype") @target_type_func()
+ call void @target_type_user(target("sometarget.sometype") %call)
+ ret void
+}
+
+; CHECK-LABEL: define void @return_target_ty_user(
+; RESULT-NEXT: %1 = call target("sometarget.sometype") @form_return_target_ty(ptr %arg)
+; RESULT-NEXT: ret void
+define void @return_target_ty_user(ptr %arg) {
+ call void @form_return_target_ty(ptr %arg)
+ ret void
+}
+
+; Make sure an invalid reduction isn't attempted for a function with
+; an sret argument
+
+; CHECK-LABEL: @no_sret_nonvoid_return
+define void @no_sret_nonvoid_return(ptr sret(i32) %out.sret, ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr %out.sret
+ ret void
+}
+
+; Test a calling convention where it's illegal to use a non-void
+; return. No invalid reduction should be introduced.
+
+; INTERESTING-LABEL: @no_void_return_callingconv(
+; INTERESTING: = load i32
+
+; RESULT-LABEL: define amdgpu_kernel void @no_void_return_callingconv(
+; RESULT-NEXT: %load = load i32
+; RESULT-NEXT: store i32 %load
+; RESULT-NEXT: ret void
+define amdgpu_kernel void @no_void_return_callingconv(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @keep_first_of_3(
+; INTERESTING: %load0 = load i32, ptr %arg0
+; INTERESTING: ret
+
+; RESULT-LABEL: define i32 @keep_first_of_3(
+; RESULT-NEXT: %load0 = load i32, ptr %arg0, align 4
+; RESULT-NEXT: ret i32 %load0
+define void @keep_first_of_3(ptr %arg0, ptr %arg1, ptr %arg2) {
+ %load0 = load i32, ptr %arg0
+ %load1 = load i32, ptr %arg1
+ %load2 = load i32, ptr %arg2
+ store i32 %load0, ptr @gv
+ store i32 %load1, ptr @gv
+ store i32 %load2, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @keep_second_of_3(
+; INTERESTING: %load1 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @keep_second_of_3(
+; RESULT-NEXT: %load0 = load i32, ptr %arg0
+; RESULT-NEXT: %load1 = load i32, ptr %arg1
+; RESULT-NEXT: ret i32 %load1
+define void @keep_second_of_3(ptr %arg0, ptr %arg1, ptr %arg2) {
+ %load0 = load i32, ptr %arg0
+ %load1 = load i32, ptr %arg1
+ %load2 = load i32, ptr %arg2
+ store i32 %load0, ptr @gv
+ store i32 %load1, ptr @gv
+ store i32 %load2, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @keep_third_of_3(
+; INTERESTING: %load2 = load i32, ptr %arg2
+
+; RESULT-LABEL: define i32 @keep_third_of_3(
+; RESULT-NEXT: %load0 = load i32, ptr %arg0, align 4
+; RESULT-NEXT: %load1 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: %load2 = load i32, ptr %arg2, align 4
+; RESULT-NEXT: ret i32 %load2
+define void @keep_third_of_3(ptr %arg0, ptr %arg1, ptr %arg2) {
+ %load0 = load i32, ptr %arg0
+ %load1 = load i32, ptr %arg1
+ %load2 = load i32, ptr %arg2
+ store i32 %load0, ptr @gv
+ store i32 %load1, ptr @gv
+ store i32 %load2, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @keep_first_2_of_3(
+; INTERESTING: %load0 = load i32, ptr %arg0
+; INTERESTING: %load1 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @keep_first_2_of_3(
+; RESULT-NEXT: %load0 = load i32, ptr %arg0
+; RESULT-NEXT: %load1 = load i32, ptr %arg1
+; RESULT-NEXT: ret i32 %load1
+define void @keep_first_2_of_3(ptr %arg0, ptr %arg1, ptr %arg2) {
+ %load0 = load i32, ptr %arg0
+ %load1 = load i32, ptr %arg1
+ %load2 = load i32, ptr %arg2
+ store i32 %load0, ptr @gv
+ store i32 %load1, ptr @gv
+ store i32 %load2, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @keep_second_of_3_already_ret_constexpr(
+; INTERESTING: %load1 = load i32, ptr %arg1
+; INTERESTING: ret
+
+; RESULT-LABEL: define i32 @keep_second_of_3_already_ret_constexpr(
+; RESULT-NEXT: %load0 = load i32, ptr %arg0, align 4
+; RESULT-NEXT: %load1 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load1
+define i32 @keep_second_of_3_already_ret_constexpr(ptr %arg0, ptr %arg1, ptr %arg2) {
+ %load0 = load i32, ptr %arg0
+ %load1 = load i32, ptr %arg1
+ %load2 = load i32, ptr %arg2
+ store i32 %load0, ptr @gv
+ store i32 %load1, ptr @gv
+ store i32 %load2, ptr @gv
+ ret i32 ptrtoint (ptr @gv to i32)
+}
+
+; INTERESTING-LABEL: @self_recursive(
+; INTERESTING: %load = load i32, ptr %arg
+
+; RESULT-LABEL: define i32 @self_recursive(
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @self_recursive(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ call void @self_recursive(ptr %arg)
+ ret void
+}
+
+; INTERESTING-LABEL: @has_invoke_user(
+
+; RESULT-LABEL: define i32 @has_invoke_user(
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: ret i32 %load
+define void @has_invoke_user(ptr %arg) {
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; INTERESTING-LABEL: @invoker(
+; RESULT: %0 = invoke i32 @has_invoke_user(ptr %arg)
+define void @invoker(ptr %arg) personality ptr @__gxx_personality_v0 {
+bb:
+ invoke void @has_invoke_user(ptr %arg)
+ to label %bb3 unwind label %bb1
+
+bb1:
+ landingpad { ptr, i32 }
+ catch ptr null
+ br label %bb3
+
+bb3:
+ ret void
+}
+
+; INTERESTING-LABEL: @return_from_nonentry_block(
+
+; RESULT-LABEL: define i32 @return_from_nonentry_block(
+; RESULT: br i1 %arg0, label %bb0, label %bb1
+
+; RESULT: bb0:
+; RESULT-NEXT: %load = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load
+
+; RESULT: bb1:
+; RESULT-NEXT: unreachable
+define void @return_from_nonentry_block(i1 %arg0, ptr %arg1) {
+entry:
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %load = load i32, ptr %arg1
+ store i32 %load, ptr @gv
+ ret void
+
+bb1:
+ unreachable
+}
+
+; INTERESTING-LABEL: @multi_void_return(
+; INTERESTING: %load = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @multi_void_return(i1 %arg0, ptr %arg1) {
+; RESULT-NEXT: entry:
+; RESULT-NEXT: br i1 %arg0, label %bb0, label %bb1
+
+; RESULT: bb0:
+; RESULT-NEXT: %load = load i32, ptr %arg1
+; RESULT-NEXT: ret i32 %load
+
+; RESULT: bb1:
+; RESULT-NEXT: ret i32 0
+define void @multi_void_return(i1 %arg0, ptr %arg1) {
+entry:
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %load = load i32, ptr %arg1
+ store i32 %load, ptr @gv
+ ret void
+
+bb1:
+ ret void
+}
+
+; INTERESTING-LABEL: @multi_void_return_dominates_all(
+; INTERESTING: %load = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @multi_void_return_dominates_all(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load
+; RESULT-NEXT: }
+define void @multi_void_return_dominates_all(i1 %arg0, ptr %arg1) {
+entry:
+ %load = load i32, ptr %arg1
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ store i32 %load, ptr @gv
+ ret void
+
+bb1:
+ ret void
+}
+
+; INTERESTING-LABEL: @multi_unreachable_dominates_all(
+; INTERESTING: %load = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @multi_unreachable_dominates_all(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load
+; RESULT-NEXT: }
+define void @multi_unreachable_dominates_all(i1 %arg0, ptr %arg1) {
+entry:
+ %load = load i32, ptr %arg1
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ store i32 %load, ptr @gv
+ unreachable
+
+bb1:
+ unreachable
+}
+
+; We want to mutate %bb0 to return %load0, and not break the ret in
+; %bb1
+
+; INTERESTING-LABEL: @multi_nonvoid_return(
+; INTERESTING: %other = load i32, ptr %arg2
+; INTERESTING: br i1 %arg0
+; INTERESTING: %load = load i32, ptr %arg1
+
+
+; RESULT-LABEL: define i32 @multi_nonvoid_return(
+
+; RESULT: entry:
+; RESULT-NEXT: %other = load i32, ptr %arg2
+; RESULT-NEXT: br i1 %arg0, label %bb0, label %bb1
+
+; RESULT: bb0:
+; RESULT-NEXT: %load = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load
+
+; RESULT: bb1:
+; RESULT-NEXT: ret i32 99
+define i32 @multi_nonvoid_return(i1 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %other = load i32, ptr %arg2
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %load = load i32, ptr %arg1
+ store i32 %load, ptr @gv
+ ret i32 %other
+
+bb1:
+ ret i32 99
+}
+
+; TODO: Could handle this better if we avoided eliminating code that
+; was already dead
+
+; INTERESTING-LABEL: @interesting_in_unreachable_code(
+; INTERESTING: %load = load i32, ptr %arg
+
+; RESULT-LABEL: define void @interesting_in_unreachable_code(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: ret void
+
+; RESULT: bb: ; No predecessors!
+; RESULT-NEXT: %load = load i32, ptr %arg, align 4
+; RESULT-NEXT: store i32 %load,
+; RESULT-NEXT: ret void
+define void @interesting_in_unreachable_code(ptr %arg) {
+entry:
+ ret void
+
+bb:
+ %load = load i32, ptr %arg
+ store i32 %load, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @use_in_successor_phi(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @use_in_successor_phi(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load0
+; RESULT-NEXT: }
+define void @use_in_successor_phi(i1 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %load0 = load i32, ptr %arg1
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32 [ %load0, %entry ], [ %load1, %bb1 ]
+ store i32 %phi, ptr @gv
+ br label %bb2
+
+bb1:
+ %load1 = load i32, ptr %arg2
+ br label %bb0
+
+bb2:
+ ret void
+}
+
+; INTERESTING-LABEL: @use_in_successor_phi_repeated(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @use_in_successor_phi_repeated(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load0
+; RESULT-NEXT: }
+define void @use_in_successor_phi_repeated(i1 %arg0, ptr %arg1, ptr %arg2, i32 %switch.val) {
+entry:
+ %load0 = load i32, ptr %arg1
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32 [ %load0, %entry ], [ %load1, %bb1 ], [ %load1, %bb1 ]
+ store i32 %phi, ptr @gv
+ br label %bb2
+
+bb1:
+ %load1 = load i32, ptr %arg2
+ switch i32 %switch.val, label %bb2 [
+ i32 1, label %bb0
+ i32 2, label %bb0
+ ]
+
+bb2:
+ ret void
+}
+
+; INTERESTING-LABEL: @replace_cond_br_with_ret(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @replace_cond_br_with_ret(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load0
+define void @replace_cond_br_with_ret(i1 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %load0 = load i32, ptr %arg1
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ store i32 %load0, ptr %arg2
+ ret void
+
+bb1:
+ store i32 %load0, ptr %arg2
+ ret void
+}
+
+; INTERESTING-LABEL: @replace_switch_with_ret(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @replace_switch_with_ret(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load0
+; RESULT-NEXT: }
+define void @replace_switch_with_ret(i32 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %load0 = load i32, ptr %arg1
+ switch i32 %arg0, label %bb2 [
+ i32 1, label %bb0
+ i32 2, label %bb1
+ ]
+
+bb0:
+ store i32 9, ptr %arg2
+ ret void
+
+bb1:
+ store i32 10, ptr %arg2
+ unreachable
+
+bb2:
+ ret void
+}
+
+; INTERESTING-LABEL: @replace_uncond_br_with_ret(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @replace_uncond_br_with_ret(
+; RESULT-NEXT: entry:
+; RESULT: %load0 = load i32, ptr %arg1, align 4
+; RESULT-NEXT: ret i32 %load0
+; RESULT-NEXT: }
+define void @replace_uncond_br_with_ret(i1 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %load0 = load i32, ptr %arg1
+ br label %bb0
+
+bb0:
+ store i32 %load0, ptr %arg2
+ ret void
+}
+
+; INTERESTING-LABEL: @replace_uncond_br_with_ret_with_phi(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @replace_uncond_br_with_ret_with_phi(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1
+; RESULT-NEXT: ret i32 %load0
+; RESULT-NEXT: }
+define void @replace_uncond_br_with_ret_with_phi(i1 %arg0, ptr %arg1, ptr %arg2, ptr %arg3) {
+entry:
+ %load0 = load i32, ptr %arg1
+ br label %bb0
+
+bb0:
+ %phi = phi i32 [ %load0, %entry ]
+ store i32 %phi, ptr %arg2
+ store i32 %load0, ptr %arg3
+ ret void
+}
+
+; INTERESTING-LABEL: @use_tail_instr_in_successor_phi(
+; INTERESTING: %load0 = load i32, ptr %arg1
+
+; RESULT-LABEL: define i32 @use_tail_instr_in_successor_phi(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1
+; RESULT-NEXT: ret i32 %load0
+; RESULT-NEXT: }
+define void @use_tail_instr_in_successor_phi(i1 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %load0 = load i32, ptr %arg1
+ %load1 = load i32, ptr %arg2
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32 [ %load0, %entry ], [ %load1, %bb1 ]
+ store i32 %phi, ptr @gv
+ br label %bb2
+
+bb1:
+ br label %bb0
+
+bb2:
+ ret void
+}
+
+; INTERESTING-LABEL: @use_before_instr_in_successor_phi(
+; INTERESTING: %load1 = load i32, ptr %arg2
+
+; RESULT-LABEL: define i32 @use_before_instr_in_successor_phi(
+; RESULT-NEXT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg1
+; RESULT-NEXT: %load1 = load i32, ptr %arg2
+; RESULT-NEXT: ret i32 %load1
+; RESULT-NEXT: }
+define void @use_before_instr_in_successor_phi(i1 %arg0, ptr %arg1, ptr %arg2) {
+entry:
+ %load0 = load i32, ptr %arg1
+ %load1 = load i32, ptr %arg2
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32 [ %load0, %entry ], [ %load1, %bb1 ]
+ store i32 %phi, ptr @gv
+ br label %bb2
+
+bb1:
+ br label %bb0
+
+bb2:
+ ret void
+}
+
+declare i32 @maybe_throwing_callee(i32)
+declare void @thrown()
+declare void @did_not_throw(i32)
+
+; TODO: Handle invokes properly
+; INTERESTING-LABEL @reduce_invoke_use(
+; INTERESTING: call void @did_not_throw(i32 %invoke)
+
+; RESULT-LABEL: define { ptr, i32 } @reduce_invoke_use(
+
+; RESULT: %invoke = invoke i32 @maybe_throwing_callee
+
+; RESULT: bb1: ; preds = %bb
+; RESULT-NEXT: %landing = landingpad { ptr, i32 }
+; RESULT-NEXT: catch ptr null
+; RESULT-NEXT: ret { ptr, i32 } %landing
+
+; RESULT: bb4: ; preds = %bb3
+; RESULT-NEXT: ret { ptr, i32 } zeroinitializer
+define void @reduce_invoke_use(i32 %arg) personality ptr @__gxx_personality_v0 {
+bb:
+ %invoke = invoke i32 @maybe_throwing_callee(i32 %arg)
+ to label %bb3 unwind label %bb1
+
+bb1: ; preds = %bb
+ %landing = landingpad { ptr, i32 }
+ catch ptr null
+ call void @thrown()
+ br label %bb4
+
+bb3: ; preds = %bb
+ call void @did_not_throw(i32 %invoke)
+ br label %bb4
+
+bb4: ; preds = %bb3, %bb1
+ ret void
+}
+
+; We can replace the branch in %bb0 with a return, but bb2 will still
+; be reachable after
+
+; INTERESTING-LABEL: @successor_block_not_dead_after_ret(
+; INTERESTING: %load0 = load i32, ptr %arg2
+
+; RESULT-LABEL: define i32 @successor_block_not_dead_after_ret(
+; RESULT: entry:
+; RESULT-NEXT: br i1 %arg0, label %bb0, label %bb2
+
+; RESULT: bb0: ; preds = %entry
+; RESULT-NEXT: %load0 = load i32, ptr %arg2, align 4
+; RESULT-NEXT: ret i32 %load0
+
+; RESULT: bb2: ; preds = %entry
+; RESULT-NEXT: %phi = phi i32 [ %arg4, %entry ]
+; RESULT-NEXT: ret i32 %phi
+; RESULT-NEXT: }
+define void @successor_block_not_dead_after_ret(i1 %arg0, i1 %arg1, ptr %arg2, ptr %arg3, i32 %arg4, i32 %arg5) {
+entry:
+ br i1 %arg0, label %bb0, label %bb2
+
+bb0:
+ %load0 = load i32, ptr %arg2
+ store i32 %load0, ptr @gv
+ br i1 %arg1, label %bb1, label %bb2
+
+bb1:
+ %load1 = load i32, ptr %arg3
+ store i32 %load1, ptr @gv
+ br label %bb0
+
+bb2:
+ %phi = phi i32 [ %arg4, %entry ], [ %arg5, %bb0 ]
+ store i32 %phi, ptr @gv
+ ret void
+}
+
+; INTERESTING-LABEL: @successor_block_self_loop_phi(
+; INTERESTING: %load0 = load i32, ptr %arg2
+
+; RESULT-LABEL: define i32 @successor_block_self_loop_phi(
+; RESULT: entry:
+; RESULT-NEXT: br i1 %arg0, label %bb0, label %bb1
+
+; RESULT: bb0: ; preds = %entry
+; RESULT-NEXT: %phi = phi i32 [ 12, %entry ]
+; RESULT-NEXT: %load0 = load i32, ptr %arg2, align 4
+; RESULT-NEXT: ret i32 %load0
+
+; RESULT: bb1: ; preds = %entry
+; RESULT-NEXT: ret i32 0
+; RESULT-NEXT: }
+define void @successor_block_self_loop_phi(i1 %arg0, i1 %arg1, ptr %arg2) {
+entry:
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32 [ 12, %entry ], [ %load0, %bb0 ]
+ %load0 = load i32, ptr %arg2
+ store i32 %phi, ptr @gv
+ br i1 %arg1, label %bb0, label %bb1
+
+bb1:
+ ret void
+}
+
+; INTERESTING-LABEL: @successor_block_self_loop_phi_2(
+; INTERESTING: %phi1 = phi i32
+
+; RESULT-LABEL: define i32 @successor_block_self_loop_phi_2(
+; RESULT: entry:
+; RESULT-NEXT: %load0 = load i32, ptr %arg2
+; RESULT-NEXT: br i1 %arg0, label %bb0, label %bb1
+
+; RESULT: bb0: ; preds = %entry
+; RESULT-NEXT: %phi0 = phi i32 [ 12, %entry ]
+; RESULT-NEXT: %phi1 = phi i32 [ %arg4, %entry ]
+; RESULT-NEXT: ret i32 %phi1
+
+; RESULT-NOT: bb
+
+; RESULT: bb1: ; preds = %entry
+; RESULT-NEXT: ret i32 0
+; RESULT-NEXT: }
+define void @successor_block_self_loop_phi_2(i1 %arg0, i1 %arg1, ptr %arg2, ptr %arg3, i32 %arg4) {
+entry:
+ %load0 = load i32, ptr %arg2
+ br i1 %arg0, label %bb0, label %bb1
+
+bb0:
+ %phi0 = phi i32 [ 12, %entry ], [ %load0, %bb0 ]
+ %phi1 = phi i32 [ %arg4, %entry ], [ %load1, %bb0 ]
+ %load1 = load i32, ptr %arg3
+ store i32 %phi0, ptr @gv
+ store i32 %phi1, ptr @gv
+ br i1 %arg1, label %bb0, label %bb1
+
+bb1:
+ ret void
+}
diff --git a/llvm/tools/llvm-reduce/CMakeLists.txt b/llvm/tools/llvm-reduce/CMakeLists.txt
index b8ad6f71b41e5..a43d2d082ff3e 100644
--- a/llvm/tools/llvm-reduce/CMakeLists.txt
+++ b/llvm/tools/llvm-reduce/CMakeLists.txt
@@ -54,6 +54,7 @@ add_llvm_tool(llvm-reduce
deltas/ReduceInstructionsMIR.cpp
deltas/ReduceInstructionFlagsMIR.cpp
deltas/ReduceIRReferences.cpp
+ deltas/ReduceValuesToReturn.cpp
deltas/ReduceVirtualRegisters.cpp
deltas/ReduceRegisterMasks.cpp
deltas/ReduceRegisterDefs.cpp
diff --git a/llvm/tools/llvm-reduce/DeltaManager.cpp b/llvm/tools/llvm-reduce/DeltaManager.cpp
index 624b5306bc71b..e299c1bd262cd 100644
--- a/llvm/tools/llvm-reduce/DeltaManager.cpp
+++ b/llvm/tools/llvm-reduce/DeltaManager.cpp
@@ -47,6 +47,7 @@
#include "deltas/ReduceRegisterUses.h"
#include "deltas/ReduceSpecialGlobals.h"
#include "deltas/ReduceUsingSimplifyCFG.h"
+#include "deltas/ReduceValuesToReturn.h"
#include "deltas/ReduceVirtualRegisters.h"
#include "deltas/RunIRPasses.h"
#include "deltas/SimplifyInstructions.h"
@@ -110,6 +111,7 @@ static cl::list<std::string>
DELTA_PASS("attributes", reduceAttributesDeltaPass) \
DELTA_PASS("module-data", reduceModuleDataDeltaPass) \
DELTA_PASS("opcodes", reduceOpcodesDeltaPass) \
+ DELTA_PASS("values-to-return", reduceValuesToReturnDeltaPass) \
DELTA_PASS("volatile", reduceVolatileInstructionsDeltaPass) \
DELTA_PASS("atomic-ordering", reduceAtomicOrderingDeltaPass) \
DELTA_PASS("syncscopes", reduceAtomicSyncScopesDeltaPass) \
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
new file mode 100644
index 0000000000000..6ac498bb31a30
--- /dev/null
+++ b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
@@ -0,0 +1,245 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Try to reduce a function by inserting new return instructions. Try to insert
+// an early return for each instruction value at that point. This requires
+// mutating the return type, or finding instructions with a compatible type.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "llvm-reduce"
+
+#include "ReduceValuesToReturn.h"
+
+#include "Delta.h"
+#include "Utils.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+
+using namespace llvm;
+
+/// Return true if it is legal to emit a copy of the function with a non-void
+/// return type.
+static bool canUseNonVoidReturnType(const Function &F) {
+ // Functions with sret arguments must return void.
+ return !F.hasStructRetAttr() &&
+ CallingConv::supportsNonVoidReturnType(F.getCallingConv());
+}
+
+/// Return true if it's legal to replace a function return type to use \p Ty.
+static bool isReallyValidReturnType(Type *Ty) {
+ return FunctionType::isValidReturnType(Ty) && !Ty->isTokenTy() &&
+ Ty->isFirstClassType();
+}
+
+/// Insert a ret inst after \p NewRetValue, which returns the value it produces.
+static void rewriteFuncWithReturnType(Function &OldF,
+ Instruction *NewRetValue) {
+ Type *NewRetTy = NewRetValue->getType();
+ FunctionType *OldFuncTy = OldF.getFunctionType();
+
+ FunctionType *NewFuncTy =
+ FunctionType::get(NewRetTy, OldFuncTy->params(), OldFuncTy->isVarArg());
+
+ LLVMContext &Ctx = OldF.getContext();
+
+ BasicBlock *NewRetBlock = NewRetValue->getParent();
+
+ // Hack up any return values in other blocks, we can't leave them as ret void.
+ if (OldFuncTy->getReturnType()->isVoidTy()) {
+ for (BasicBlock &OtherRetBB : OldF) {
+ if (&OtherRetBB != NewRetBlock) {
+ auto *OrigRI = dyn_cast<ReturnInst>(OtherRetBB.getTerminator());
+ if (!OrigRI)
+ continue;
+
+ OrigRI->eraseFromParent();
+ ReturnInst::Create(Ctx, getDefaultValue(NewRetTy), &OtherRetBB);
+ }
+ }
+ }
+
+ // Now prune any CFG edges we have to deal with.
+ //
+ // Use KeepOneInputPHIs in case the instruction we are using for the return is
+ // that phi.
+ // TODO: Could avoid this with fancier iterator management.
+ for (BasicBlock *Succ : successors(NewRetBlock))
+ Succ->removePredecessor(NewRetBlock, /*KeepOneInputPHIs=*/true);
+
+ // Now delete the tail of this block, in reverse to delete uses before defs.
+ for (Instruction &I : make_early_inc_range(make_range(
+ NewRetBlock->rbegin(), NewRetValue->getIterator().getReverse()))) {
+
+ Value *Replacement = getDefaultValue(I.getType());
+ I.replaceAllUsesWith(Replacement);
+ I.eraseFromParent();
+ }
+
+ ReturnInst::Create(Ctx, NewRetValue, NewRetBlock);
+
+ // TODO: We may be eliminating blocks that were originally unreachable. We
+ // probably ought to only be pruning blocks that became dead directly as a
+ // result of our pruning here.
+ EliminateUnreachableBlocks(OldF);
+
+ Function *NewF =
+ Function::Create(NewFuncTy, OldF.getLinkage(), OldF.getAddressSpace(), "",
+ OldF.getParent());
+
+ NewF->removeFromParent();
+ OldF.getParent()->getFunctionList().insertAfter(OldF.getIterator(), NewF);
+ NewF->takeName(&OldF);
+ NewF->copyAttributesFrom(&OldF);
+
+ // Adjust the callsite uses to the new return type. We pre-filtered cases
+ // where the original call type was incorrectly non-void.
+ for (User *U : make_early_inc_range(OldF.users())) {
+ if (auto *CB = dyn_cast<CallBase>(U)) {
+ if (CB->getType()->isVoidTy()) {
+ FunctionType *CallType = CB->getFunctionType();
+
+ // The callsite may not match the new function type, in an undefined
+ // behavior way. Only mutate the local return type.
+ FunctionType *NewCallType = FunctionType::get(
+ NewRetTy, CallType->params(), CallType->isVarArg());
+
+ CB->mutateType(NewRetTy);
+ CB->setCalledFunction(NewCallType, NewF);
+ } else {
+ assert(CB->getType() == NewRetTy &&
+ "only handle exact return type match non-void returns");
+ }
+ }
+ }
+
+ // Preserve the parameters of OldF.
+ ValueToValueMapTy VMap;
+ for (auto Z : zip_first(OldF.args(), NewF->args())) {
+ Argument &OldArg = std::get<0>(Z);
+ Argument &NewArg = std::get<1>(Z);
+
+ NewArg.setName(OldArg.getName()); // Copy the name over...
+ VMap[&OldArg] = &NewArg; // Add mapping to VMap
+ }
+
+ SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
+ CloneFunctionInto(NewF, &OldF, VMap,
+ CloneFunctionChangeType::LocalChangesOnly, Returns, "",
+ /*CodeInfo=*/nullptr);
+ OldF.replaceAllUsesWith(NewF);
+ OldF.eraseFromParent();
+}
+
+// Check if all the callsites of the void function are void, or happen to
+// incorrectly use the new return type.
+//
+// TODO: We could make better effort to handle call type mismatches.
+static bool canReplaceFuncUsers(const Function &F, Type *NewRetTy) {
+ for (const Use &U : F.uses()) {
+ const CallBase *CB = dyn_cast<CallBase>(U.getUser());
+ if (!CB)
+ continue;
+
+ // Normal pointer uses are trivially replacable.
+ if (!CB->isCallee(&U))
+ continue;
+
+ // We can trivially replace the correct void call sites.
+ if (CB->getType()->isVoidTy())
+ continue;
+
+ // We can trivially replace the call if the return type happened to match
+ // the new return type.
+ if (CB->getType() == NewRetTy)
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Cannot replace callsite with wrong type: " << *CB
+ << '\n');
+ return false;
+ }
+
+ return true;
+}
+
+/// Return true if it's worthwhile replacing the non-void return value of \p BB
+/// with \p Replacement
+static bool shouldReplaceNonVoidReturnValue(const BasicBlock &BB,
+ Value *Replacement) {
+ if (const auto *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
+ return RI->getReturnValue() != Replacement;
+ return true;
+}
+
+static bool canHandleSuccessors(const BasicBlock &BB) {
+ // TODO: Handle invoke and other exotic terminators
+ if (!isa<ReturnInst, UnreachableInst, BranchInst, SwitchInst>(
+ BB.getTerminator()))
+ return false;
+
+ for (const BasicBlock *Succ : successors(&BB)) {
+ if (!Succ->canSplitPredecessors())
+ return false;
+ }
+
+ return true;
+}
+
+static bool tryForwardingValuesToReturn(
+ Function &F, Oracle &O,
+ std::vector<std::pair<Function *, Instruction *>> &FuncsToReplace) {
+
+ // TODO: Should this try to forward arguments to the return value before
+ // instructions?
+
+ // TODO: Should we try to expand returns to aggregate for function that
+ // already have a return value?
+ Type *RetTy = F.getReturnType();
+
+ for (BasicBlock &BB : F) {
+ if (!canHandleSuccessors(BB))
+ continue;
+
+ for (Instruction &I : BB) {
+ if (!isReallyValidReturnType(I.getType()))
+ continue;
+
+ if ((RetTy->isVoidTy() ||
+ (RetTy == I.getType() && shouldReplaceNonVoidReturnValue(BB, &I))) &&
+ canReplaceFuncUsers(F, I.getType()) && !O.shouldKeep()) {
+ FuncsToReplace.emplace_back(&F, &I);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void reduceValuesToReturn(Oracle &O, ReducerWorkItem &WorkItem) {
+ Module &Program = WorkItem.getModule();
+
+ // We're going to chaotically hack on the other users of the function in other
+ // functions, so we need to collect a worklist of returns to replace.
+ std::vector<std::pair<Function *, Instruction *>> FuncsToReplace;
+
+ for (Function &F : Program.functions()) {
+ if (!F.isDeclaration() && canUseNonVoidReturnType(F))
+ tryForwardingValuesToReturn(F, O, FuncsToReplace);
+ }
+
+ for (auto [F, I] : FuncsToReplace)
+ rewriteFuncWithReturnType(*F, I);
+}
+
+void llvm::reduceValuesToReturnDeltaPass(TestRunner &Test) {
+ runDeltaPass(Test, reduceValuesToReturn,
+ "Converting values to function return value");
+}
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.h b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.h
new file mode 100644
index 0000000000000..eac6480cb6c04
--- /dev/null
+++ b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.h
@@ -0,0 +1,18 @@
+//===- ReduceValuesToReturn.h - Specialized Delta Pass ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_LLVM_REDUCE_DELTAS_REDUCEVALUESTORETURN_H
+#define LLVM_TOOLS_LLVM_REDUCE_DELTAS_REDUCEVALUESTORETURN_H
+
+#include "TestRunner.h"
+
+namespace llvm {
+void reduceValuesToReturnDeltaPass(TestRunner &Test);
+} // namespace llvm
+
+#endif
>From c658ad37b4014d24dca04d91691f5ed642c6c575 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 25 Mar 2025 12:42:11 +0700
Subject: [PATCH 2/3] Fix non-void return typed non-callee uses
---
...-values-to-return-nonvoid-noncallee-use.ll | 27 +++++++++++++++++++
.../deltas/ReduceValuesToReturn.cpp | 5 ++--
2 files changed, 30 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/tools/llvm-reduce/reduce-values-to-return-nonvoid-noncallee-use.ll
diff --git a/llvm/test/tools/llvm-reduce/reduce-values-to-return-nonvoid-noncallee-use.ll b/llvm/test/tools/llvm-reduce/reduce-values-to-return-nonvoid-noncallee-use.ll
new file mode 100644
index 0000000000000..215ea97a8be91
--- /dev/null
+++ b/llvm/test/tools/llvm-reduce/reduce-values-to-return-nonvoid-noncallee-use.ll
@@ -0,0 +1,27 @@
+; Make sure we don't break on non-callee uses of funtions with a
+; non-void return type.
+
+; RUN: llvm-reduce --abort-on-invalid-reduction --delta-passes=values-to-return --test FileCheck --test-arg --check-prefix=INTERESTING --test-arg %s --test-arg --input-file %s -o %t
+; RUN: FileCheck --check-prefix=RESULT %s < %t
+
+; INTERESTING-LABEL: @interesting(
+; INTERESTING: %inttoptr = inttoptr i64
+
+; RESULT-LABEL: define ptr @interesting(i64 %arg) {
+; RESULT-NEXT: %inttoptr = inttoptr i64 %arg to ptr
+; RESULT-NEXT: ret ptr %inttoptr
+define void @interesting(i64 %arg) {
+ %inttoptr = inttoptr i64 %arg to ptr
+ %load = load i32, ptr %inttoptr
+ ret void
+}
+
+declare i32 @func(ptr)
+
+; RESULT-LABEL: define i32 @caller() {
+; RESULT-NEXT: %call = call i32 @func(ptr @interesting)
+; RESULT-NEXT: ret i32 %call
+define void @caller() {
+ %call = call i32 @func(ptr @interesting)
+ ret void
+}
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
index 6ac498bb31a30..4c8ce6ebe0d94 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
@@ -102,7 +102,8 @@ static void rewriteFuncWithReturnType(Function &OldF,
// Adjust the callsite uses to the new return type. We pre-filtered cases
// where the original call type was incorrectly non-void.
for (User *U : make_early_inc_range(OldF.users())) {
- if (auto *CB = dyn_cast<CallBase>(U)) {
+ if (auto *CB = dyn_cast<CallBase>(U);
+ CB && CB->getCalledOperand() == &OldF) {
if (CB->getType()->isVoidTy()) {
FunctionType *CallType = CB->getFunctionType();
@@ -115,7 +116,7 @@ static void rewriteFuncWithReturnType(Function &OldF,
CB->setCalledFunction(NewCallType, NewF);
} else {
assert(CB->getType() == NewRetTy &&
- "only handle exact return type match non-void returns");
+ "only handle exact return type match with non-void returns");
}
}
}
>From 6b471c971db1ff6c0fb5fadb52381382e6578714 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 25 Mar 2025 13:19:13 +0700
Subject: [PATCH 3/3] Add const
---
llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
index 4c8ce6ebe0d94..0c1affcac3698 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceValuesToReturn.cpp
@@ -173,7 +173,7 @@ static bool canReplaceFuncUsers(const Function &F, Type *NewRetTy) {
/// Return true if it's worthwhile replacing the non-void return value of \p BB
/// with \p Replacement
static bool shouldReplaceNonVoidReturnValue(const BasicBlock &BB,
- Value *Replacement) {
+ const Value *Replacement) {
if (const auto *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
return RI->getReturnValue() != Replacement;
return true;
More information about the llvm-commits
mailing list