[llvm] 8373cee - [CGP] Extend `dupRetToEnableTailCallOpts` to known intrinsics
Antonio Frighetto via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 12 05:20:22 PST 2024
Author: Antonio Frighetto
Date: 2024-02-12T14:17:02+01:00
New Revision: 8373ceef8f2ee377d6daf884e2f3ea11408a7fe2
URL: https://github.com/llvm/llvm-project/commit/8373ceef8f2ee377d6daf884e2f3ea11408a7fe2
DIFF: https://github.com/llvm/llvm-project/commit/8373ceef8f2ee377d6daf884e2f3ea11408a7fe2.diff
LOG: [CGP] Extend `dupRetToEnableTailCallOpts` to known intrinsics
Hint further tail call optimization opportunities when the examined
returned value is the return value of a known intrinsic or library
function, and it appears as first function argument.
Fixes: https://github.com/llvm/llvm-project/issues/75455.
Added:
Modified:
llvm/lib/CodeGen/CodeGenPrepare.cpp
llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 09c4922d8822cc..32a25b49b4e4b0 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2522,8 +2522,40 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
return false;
}
+static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo *TLInfo,
+ const CallInst *CI) {
+ assert(CI && CI->use_empty());
+
+ if (const auto *II = dyn_cast<IntrinsicInst>(CI))
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ return true;
+ default:
+ return false;
+ }
+
+ LibFunc LF;
+ Function *Callee = CI->getCalledFunction();
+ if (Callee && TLInfo && TLInfo->getLibFunc(*Callee, LF))
+ switch (LF) {
+ case LibFunc_strcpy:
+ case LibFunc_strncpy:
+ case LibFunc_strcat:
+ case LibFunc_strncat:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
/// Look for opportunities to duplicate return instructions to the predecessor
-/// to enable tail call optimizations. The case it is currently looking for is:
+/// to enable tail call optimizations. The case it is currently looking for is
+/// the following one. Known intrinsics or library function that may be tail
+/// called are taken into account as well.
/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
@@ -2580,8 +2612,6 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
}
PN = dyn_cast<PHINode>(V);
- if (!PN)
- return false;
}
if (PN && PN->getParent() != BB)
@@ -2620,8 +2650,30 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
// Make sure the phi value is indeed produced by the tail call.
if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
TLI->mayBeEmittedAsTailCall(CI) &&
- attributesPermitTailCall(F, CI, RetI, *TLI))
+ attributesPermitTailCall(F, CI, RetI, *TLI)) {
TailCallBBs.push_back(PredBB);
+ } else {
+ // Consider the cases in which the phi value is indirectly produced by
+ // the tail call, for example when encountering memset(), memmove(),
+ // strcpy(), whose return value may have been optimized out. In such
+ // cases, the value needs to be the first function argument.
+ //
+ // bb0:
+ // tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
+ // br label %return
+ // return:
+ // %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
+ if (PredBB && PredBB->getSingleSuccessor() == BB)
+ CI = dyn_cast_or_null<CallInst>(
+ PredBB->getTerminator()->getPrevNonDebugInstruction(true));
+
+ if (CI && CI->use_empty() &&
+ isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
+ IncomingVal == CI->getArgOperand(0) &&
+ TLI->mayBeEmittedAsTailCall(CI) &&
+ attributesPermitTailCall(F, CI, RetI, *TLI))
+ TailCallBBs.push_back(PredBB);
+ }
}
} else {
SmallPtrSet<BasicBlock *, 4> VisitedBBs;
@@ -2631,8 +2683,14 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
CallInst *CI = dyn_cast<CallInst>(I);
if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
- attributesPermitTailCall(F, CI, RetI, *TLI))
- TailCallBBs.push_back(Pred);
+ attributesPermitTailCall(F, CI, RetI, *TLI)) {
+ // Either we return void or the return value must be the first
+ // argument of a known intrinsic or library function.
+ if (!V || (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
+ V == CI->getArgOperand(0))) {
+ TailCallBBs.push_back(Pred);
+ }
+ }
}
}
}
diff --git a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
index c48087da500e0e..401ed9f7bc5a9e 100644
--- a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
+++ b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
@@ -188,18 +188,14 @@ return:
define ptr @memset_tailc(ptr %ret_val, i64 %sz) nounwind {
; CHECK-LABEL: memset_tailc:
; CHECK: ## %bb.0: ## %entry
-; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: movq %rdi, %rbx
; CHECK-NEXT: testq %rdi, %rdi
-; CHECK-NEXT: je LBB4_2
-; CHECK-NEXT: ## %bb.1: ## %if.then
+; CHECK-NEXT: je LBB4_1
+; CHECK-NEXT: ## %bb.2: ## %if.then
; CHECK-NEXT: movq %rsi, %rdx
-; CHECK-NEXT: movq %rbx, %rdi
; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq _memset
-; CHECK-NEXT: LBB4_2: ## %return
-; CHECK-NEXT: movq %rbx, %rax
-; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: jmp _memset ## TAILCALL
+; CHECK-NEXT: LBB4_1: ## %return
+; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
entry:
%cmp = icmp eq ptr %ret_val, null
@@ -216,21 +212,15 @@ return:
define ptr @memcpy_tailc(ptr %ret_val, i64 %sz, ptr %src) nounwind {
; CHECK-LABEL: memcpy_tailc:
; CHECK: ## %bb.0: ## %entry
-; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: testq %rsi, %rsi
; CHECK-NEXT: je LBB5_1
; CHECK-NEXT: ## %bb.2: ## %if.then
; CHECK-NEXT: movq %rsi, %rax
-; CHECK-NEXT: movq %rdi, %rbx
; CHECK-NEXT: movq %rdx, %rsi
; CHECK-NEXT: movq %rax, %rdx
-; CHECK-NEXT: callq _memcpy
-; CHECK-NEXT: jmp LBB5_3
-; CHECK-NEXT: LBB5_1:
-; CHECK-NEXT: movq %rdx, %rbx
-; CHECK-NEXT: LBB5_3: ## %return
-; CHECK-NEXT: movq %rbx, %rax
-; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: jmp _memcpy ## TAILCALL
+; CHECK-NEXT: LBB5_1: ## %return
+; CHECK-NEXT: movq %rdx, %rax
; CHECK-NEXT: retq
entry:
%cmp = icmp eq i64 %sz, 0
@@ -251,25 +241,25 @@ define ptr @strcpy_legal_and_baz_illegal(ptr %arg, i64 %sz, ptr %2) nounwind {
; CHECK-NEXT: pushq %r15
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: movq %rdx, %r14
+; CHECK-NEXT: movq %rdx, %rbx
; CHECK-NEXT: movq %rsi, %r15
-; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movq %rdi, %r14
; CHECK-NEXT: movq %rsi, %rdi
; CHECK-NEXT: callq _malloc
; CHECK-NEXT: testq %r15, %r15
-; CHECK-NEXT: je LBB6_2
-; CHECK-NEXT: ## %bb.1: ## %if.then
+; CHECK-NEXT: je LBB6_1
+; CHECK-NEXT: ## %bb.2: ## %if.then
; CHECK-NEXT: movq %rax, %rdi
-; CHECK-NEXT: movq %r14, %rsi
-; CHECK-NEXT: movq %rax, %rbx
-; CHECK-NEXT: callq _strcpy
-; CHECK-NEXT: jmp LBB6_3
-; CHECK-NEXT: LBB6_2: ## %if.else
-; CHECK-NEXT: movq %rbx, %rdi
-; CHECK-NEXT: movq %r14, %rsi
+; CHECK-NEXT: movq %rbx, %rsi
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: jmp _strcpy ## TAILCALL
+; CHECK-NEXT: LBB6_1: ## %if.else
+; CHECK-NEXT: movq %r14, %rdi
+; CHECK-NEXT: movq %rbx, %rsi
; CHECK-NEXT: callq _baz
-; CHECK-NEXT: LBB6_3: ## %return
-; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: movq %r14, %rax
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
; CHECK-NEXT: popq %r15
More information about the llvm-commits
mailing list