[llvm] r281223 - [X86] Copy imp-uses when folding tailcall into conditional branch.
Ahmed Bougacha via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 12 09:05:28 PDT 2016
Author: ab
Date: Mon Sep 12 11:05:27 2016
New Revision: 281223
URL: http://llvm.org/viewvc/llvm-project?rev=281223&view=rev
Log:
[X86] Copy imp-uses when folding tailcall into conditional branch.
r280832 added 32-bit support for emitting conditional tail-calls, but
dropped imp-used parameter registers. This went unnoticed until
r281113, which added 64-bit support, as this is only exposed with
parameter passing via registers.
Don't drop the imp-used parameters.
Added:
llvm/trunk/test/CodeGen/X86/tail-call-conditional.mir
Modified:
llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=281223&r1=281222&r2=281223&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Mon Sep 12 11:05:27 2016
@@ -4176,7 +4176,7 @@ void X86InstrInfo::replaceBranchWithTail
MIB->addOperand(TailCall.getOperand(0)); // Destination.
MIB.addImm(0); // Stack offset (not used).
MIB->addOperand(BranchCond[0]); // Condition.
- MIB->addOperand(TailCall.getOperand(2)); // Regmask.
+ MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
I->eraseFromParent();
}
Added: llvm/trunk/test/CodeGen/X86/tail-call-conditional.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tail-call-conditional.mir?rev=281223&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tail-call-conditional.mir (added)
+++ llvm/trunk/test/CodeGen/X86/tail-call-conditional.mir Mon Sep 12 11:05:27 2016
@@ -0,0 +1,84 @@
+# RUN: llc -mtriple x86_64-- -verify-machineinstrs -run-pass branch-folder -o - %s | FileCheck %s
+
+# Check the TCRETURNdi64cc optimization.
+
+--- |
+ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+ define i64 @test(i64 %arg, i8* %arg1) optsize {
+ %tmp = icmp ult i64 %arg, 100
+ br i1 %tmp, label %1, label %4
+
+ %tmp3 = icmp ult i64 %arg, 10
+ br i1 %tmp3, label %2, label %3
+
+ %tmp5 = tail call i64 @f1(i8* %arg1, i64 %arg)
+ ret i64 %tmp5
+
+ %tmp7 = tail call i64 @f2(i8* %arg1, i64 %arg)
+ ret i64 %tmp7
+
+ ret i64 123
+ }
+
+ declare i64 @f1(i8*, i64)
+ declare i64 @f2(i8*, i64)
+
+...
+---
+name: test
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0:
+ successors: %bb.1, %bb.4
+ liveins: %rdi, %rsi
+
+ %rax = COPY %rdi
+ CMP64ri8 %rax, 99, implicit-def %eflags
+ JA_1 %bb.4, implicit %eflags
+ JMP_1 %bb.1
+
+ ; CHECK: bb.1:
+ ; CHECK-NEXT: successors: %bb.2({{[^)]+}}){{$}}
+ ; CHECK-NEXT: liveins: %rax, %rsi
+ ; CHECK-NEXT: {{^ $}}
+ ; CHECK-NEXT: %rdi = COPY %rsi
+ ; CHECK-NEXT: %rsi = COPY %rax
+ ; CHECK-NEXT: CMP64ri8 %rax, 9, implicit-def %eflags
+ ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit %rsp, implicit %rsp, implicit %rdi, implicit %rsi
+
+ bb.1:
+ successors: %bb.2, %bb.3
+ liveins: %rax, %rsi
+
+ CMP64ri8 %rax, 9, implicit-def %eflags
+ JA_1 %bb.3, implicit %eflags
+ JMP_1 %bb.2
+
+ bb.2:
+ liveins: %rax, %rsi
+
+ %rdi = COPY %rsi
+ %rsi = COPY %rax
+
+ TCRETURNdi64 @f1, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+
+ ; CHECK: bb.2:
+ ; CHECK-NEXT: liveins: %rax, %rsi, %rdi, %rsi
+ ; CHECK-NEXT: {{^ $}}
+ ; CHECK-NEXT: TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+
+ bb.3:
+ liveins: %rax, %rsi
+
+ %rdi = COPY %rsi
+ %rsi = COPY %rax
+ TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+
+ bb.4:
+ dead %eax = MOV32ri64 123, implicit-def %rax
+ RET 0, %rax
+
+...
More information about the llvm-commits
mailing list