[llvm] [x86] Enable indirect tail calls with more arguments (PR #137643)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 28 07:49:41 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Hans Wennborg (zmodem)
<details>
<summary>Changes</summary>
X86ISelDAGToDAG's `isCalleeLoad` / `moveBelowOrigChain` tries to move the load instruction next to the call so they can be folded, but it would only allow a single CopyToReg node in between.
This patch makes it look through multiple CopyToReg, while being careful to only perform the transformation when the load+call can be folded.
Fixes #<!-- -->136848
---
Full diff: https://github.com/llvm/llvm-project/pull/137643.diff
3 Files Affected:
- (modified) llvm/lib/Target/X86/X86ISelDAGToDAG.cpp (+46-13)
- (modified) llvm/test/CodeGen/X86/cfguard-checks.ll (+1-2)
- (added) llvm/test/CodeGen/X86/fold-call-4.ll (+13)
``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 01118beb9cf5e..7d6359f701368 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -890,6 +890,12 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
LD->getExtensionType() != ISD::NON_EXTLOAD)
return false;
+ // If the load's outgoing chain has more than one use, we can't (currently)
+ // move the load since we'd most likely create a loop. TODO: Maybe it could
+ // work if moveBelowOrigChain() updated *all* the chain users.
+ if (!Callee.getValue(1).hasOneUse())
+ return false;
+
// Now let's find the callseq_start.
while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
if (!Chain.hasOneUse())
@@ -897,20 +903,31 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
Chain = Chain.getOperand(0);
}
- if (!Chain.getNumOperands())
- return false;
- // Since we are not checking for AA here, conservatively abort if the chain
- // writes to memory. It's not safe to move the callee (a load) across a store.
- if (isa<MemSDNode>(Chain.getNode()) &&
- cast<MemSDNode>(Chain.getNode())->writeMem())
+ while (true) {
+ if (!Chain.getNumOperands())
+ return false;
+ // Since we are not checking for AA here, conservatively abort if the chain
+ // writes to memory. It's not safe to move the callee (a load) across a store.
+ if (isa<MemSDNode>(Chain.getNode()) &&
+ cast<MemSDNode>(Chain.getNode())->writeMem())
+ return false;
+
+ if (Chain.getOperand(0).getNode() == Callee.getNode())
+ return true;
+ if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
+ Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
+ Callee.getValue(1).hasOneUse())
+ return true;
+
+ // Look past CopyToRegs. We only walk one path, so the chain mustn't branch.
+ if (Chain.getOperand(0).getOpcode() == ISD::CopyToReg &&
+ Chain.getOperand(0).getValue(0).hasOneUse()) {
+ Chain = Chain.getOperand(0);
+ continue;
+ }
+
return false;
- if (Chain.getOperand(0).getNode() == Callee.getNode())
- return true;
- if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
- Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
- Callee.getValue(1).hasOneUse())
- return true;
- return false;
+ }
}
static bool isEndbrImm64(uint64_t Imm) {
@@ -1353,6 +1370,22 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
(N->getOpcode() == X86ISD::TC_RETURN &&
(Subtarget->is64Bit() ||
!getTargetMachine().isPositionIndependent())))) {
+
+ if (N->getOpcode() == X86ISD::TC_RETURN) {
+ // There needs to be enough non-callee-saved GPRs available to compute
+ // the load address if folded into the tailcall. See how the
+ // X86tcret_6regs and X86tcret_1reg classes are used and defined.
+ unsigned NumRegs = 0;
+ for (unsigned I = 3, E = N->getNumOperands(); I != E; ++I) {
+ if (isa<RegisterSDNode>(N->getOperand(I)))
+ ++NumRegs;
+ }
+ if (!Subtarget->is64Bit() && NumRegs > 1)
+ continue;
+ if (NumRegs > 6)
+ continue;
+ }
+
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
diff --git a/llvm/test/CodeGen/X86/cfguard-checks.ll b/llvm/test/CodeGen/X86/cfguard-checks.ll
index a727bbbfdcbe3..db19efaf910a3 100644
--- a/llvm/test/CodeGen/X86/cfguard-checks.ll
+++ b/llvm/test/CodeGen/X86/cfguard-checks.ll
@@ -210,8 +210,7 @@ entry:
; X64-LABEL: vmptr_thunk:
; X64: movq (%rcx), %rax
; X64-NEXT: movq 8(%rax), %rax
- ; X64-NEXT: movq __guard_dispatch_icall_fptr(%rip), %rdx
- ; X64-NEXT: rex64 jmpq *%rdx # TAILCALL
+ ; X64-NEXT: rex64 jmpq *__guard_dispatch_icall_fptr(%rip) # TAILCALL
; X64-NOT: callq
}
diff --git a/llvm/test/CodeGen/X86/fold-call-4.ll b/llvm/test/CodeGen/X86/fold-call-4.ll
new file mode 100644
index 0000000000000..708e05a0bfff0
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fold-call-4.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+; The callee address computation should get folded into the call.
+; CHECK-LABEL: f:
+; CHECK-NOT: mov
+; CHECK: jmpq *(%rdi,%rsi,8)
+define void @f(ptr %table, i64 %idx, i64 %aux1, i64 %aux2, i64 %aux3) {
+entry:
+ %arrayidx = getelementptr inbounds ptr, ptr %table, i64 %idx
+ %funcptr = load ptr, ptr %arrayidx, align 8
+ tail call void %funcptr(ptr %table, i64 %idx, i64 %aux1, i64 %aux2, i64 %aux3)
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/137643
More information about the llvm-commits
mailing list