[llvm] r322917 - AArch64: Omit callframe setup/destroy when not necessary
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 18 18:45:38 PST 2018
Author: matze
Date: Thu Jan 18 18:45:38 2018
New Revision: 322917
URL: http://llvm.org/viewvc/llvm-project?rev=322917&view=rev
Log:
AArch64: Omit callframe setup/destroy when not necessary
Do not create CALLSEQ_START/CALLSEQ_END when there is no callframe to
setup and the callframe size is 0.
- Fixes an invalid callframe nesting for byval arguments, which would
look like this before this patch (as in `big-byval.ll`):
...
ADJCALLSTACKDOWN 32768, 0, ... # Setup for extfunc
...
ADJCALLSTACKDOWN 0, 0, ... # setup for memcpy
...
BL &memcpy ...
ADJCALLSTACKUP 0, 0, ... # destroy for memcpy
...
BL &extfunc
ADJCALLSTACKUP 32768, 0, ... # destroy for extfunc
- Saves us two instructions in the common case of zero-sized stackframes.
- Remove an unnecessary scheduling barrier (hence the small unittest
changes).
Differential Revision: https://reviews.llvm.org/D42006
Added:
llvm/trunk/test/CodeGen/AArch64/big-byval.ll
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll
llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
llvm/trunk/test/CodeGen/AArch64/func-calls.ll
llvm/trunk/test/CodeGen/AArch64/nontemporal.ll
llvm/trunk/test/CodeGen/AArch64/swifterror.ll
llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Thu Jan 18 18:45:38 2018
@@ -3343,9 +3343,15 @@ AArch64TargetLowering::LowerCall(CallLow
assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
}
+ // We can omit callseq_start/callseq_end if there is no callframe to setup.
+ // Do not omit for patchpoints as SelectionDAGBuilder::visitPatchpoint()
+ // currently expects it.
+ bool OmitCallSeq = NumBytes == 0 && !CLI.IsPatchPoint;
+ assert((!IsSibCall || OmitCallSeq) && "Should not get callseq for sibcalls");
+
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- if (!IsSibCall)
+ if (!OmitCallSeq)
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
@@ -3511,7 +3517,7 @@ AArch64TargetLowering::LowerCall(CallLow
// the frame up *after* the call, however in the ABI-changing tail-call case
// we've carefully laid out the parameters so that when sp is reset they'll be
// in the correct location.
- if (IsTailCall && !IsSibCall) {
+ if (IsTailCall && !OmitCallSeq) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
InFlag = Chain.getValue(1);
@@ -3569,9 +3575,11 @@ AArch64TargetLowering::LowerCall(CallLow
uint64_t CalleePopBytes =
DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
- DAG.getIntPtrConstant(CalleePopBytes, DL, true),
- InFlag, DL);
+ if (!OmitCallSeq)
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
+ DAG.getIntPtrConstant(CalleePopBytes, DL, true),
+ InFlag, DL);
+
if (!Ins.empty())
InFlag = Chain.getValue(1);
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll Thu Jan 18 18:45:38 2018
@@ -5,9 +5,9 @@
; CHECK: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16]
; CHECK-NEXT: add x29, sp, #16
-; CHECK-NEXT: stur wzr, [x29, #-4]
; CHECK: adrp x0, l_.str at PAGE
; CHECK: add x0, x0, l_.str at PAGEOFF
+; CHECK-NEXT: stur wzr, [x29, #-4]
; CHECK-NEXT: bl _puts
; CHECK-NEXT: ldp x29, x30, [sp, #16]
; CHECK-NEXT: add sp, sp, #32
@@ -15,9 +15,9 @@
; CHECK-LINUX-LABEL: main:
; CHECK-LINUX: str x30, [sp, #-16]!
-; CHECK-LINUX-NEXT: str wzr, [sp, #12]
; CHECK-LINUX: adrp x0, .L.str
; CHECK-LINUX: add x0, x0, :lo12:.L.str
+; CHECK-LINUX-NEXT: str wzr, [sp, #12]
; CHECK-LINUX-NEXT: bl puts
; CHECK-LINUX-NEXT: ldr x30, [sp], #16
; CHECK-LINUX-NEXT: ret
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll Thu Jan 18 18:45:38 2018
@@ -22,10 +22,10 @@ target triple = "arm64-apple-ios"
; DISABLE: cmp w0, w1
; DISABLE-NEXT: b.ge [[EXIT_LABEL:LBB[0-9_]+]]
;
-; Store %a in the alloca.
-; CHECK: stur w0, {{\[}}[[SAVE_SP]], #-4]
; Set the alloca address in the second argument.
-; CHECK-NEXT: sub x1, [[SAVE_SP]], #4
+; CHECK: sub x1, [[SAVE_SP]], #4
+; Store %a in the alloca.
+; CHECK-NEXT: stur w0, {{\[}}[[SAVE_SP]], #-4]
; Set the first argument to zero.
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: bl _doSomething
Added: llvm/trunk/test/CodeGen/AArch64/big-byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/big-byval.ll?rev=322917&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/big-byval.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/big-byval.ll Thu Jan 18 18:45:38 2018
@@ -0,0 +1,13 @@
+; RUN: llc -o - %s -verify-machineinstrs | FileCheck %s
+target triple = "aarch64--"
+
+; Make sure we don't fail machine verification because the memcpy callframe
+; setup is nested inside the extfunc callframe setup.
+; CHECK-LABEL: func:
+; CHECK: bl memcpy
+; CHECK: bl extfunc
+declare void @extfunc([4096 x i64]* byval %p)
+define void @func([4096 x i64]* %z) {
+ call void @extfunc([4096 x i64]* byval %z)
+ ret void
+}
Modified: llvm/trunk/test/CodeGen/AArch64/func-calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-calls.ll?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-calls.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-calls.ll Thu Jan 18 18:45:38 2018
@@ -63,10 +63,10 @@ define void @simple_rets() {
store [2 x i64] %arr, [2 x i64]* @varsmallstruct
; CHECK: bl return_smallstruct
; CHECK: add x[[VARSMALLSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varsmallstruct
+; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
; CHECK: stp x0, x1, [x[[VARSMALLSTRUCT]]]
call void @return_large_struct(%myStruct* sret @varstruct)
-; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
; CHECK: bl return_large_struct
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/nontemporal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/nontemporal.ll?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/nontemporal.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/nontemporal.ll Thu Jan 18 18:45:38 2018
@@ -313,8 +313,8 @@ declare void @dummy(<4 x float>*)
define void @test_stnp_v4f32_offset_alloca(<4 x float> %v) #0 {
; CHECK-LABEL: test_stnp_v4f32_offset_alloca:
-; CHECK: stnp d0, d{{.*}}, [sp]
-; CHECK-NEXT: mov x0, sp
+; CHECK: mov x0, sp
+; CHECK-NEXT: stnp d0, d{{.*}}, [sp]
; CHECK-NEXT: bl _dummy
%tmp0 = alloca <4 x float>
store <4 x float> %v, <4 x float>* %tmp0, align 1, !nontemporal !0
@@ -324,8 +324,8 @@ define void @test_stnp_v4f32_offset_allo
define void @test_stnp_v4f32_offset_alloca_2(<4 x float> %v) #0 {
; CHECK-LABEL: test_stnp_v4f32_offset_alloca_2:
-; CHECK: stnp d0, d{{.*}}, [sp, #16]
-; CHECK-NEXT: mov x0, sp
+; CHECK: mov x0, sp
+; CHECK-NEXT: stnp d0, d{{.*}}, [sp, #16]
; CHECK-NEXT: bl _dummy
%tmp0 = alloca <4 x float>, i32 2
%tmp1 = getelementptr <4 x float>, <4 x float>* %tmp0, i32 1
Modified: llvm/trunk/test/CodeGen/AArch64/swifterror.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/swifterror.ll?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/swifterror.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/swifterror.ll Thu Jan 18 18:45:38 2018
@@ -223,8 +223,8 @@ bb_end:
; parameter.
define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) {
; CHECK-APPLE-LABEL: foo_sret:
-; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8
; CHECK-APPLE: orr w0, wzr, #0x10
+; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8
; CHECK-APPLE: malloc
; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1
; CHECK-APPLE: strb [[ID]], [x0, #8]
@@ -406,7 +406,7 @@ entry:
ret float %0
}
-; CHECK-APPLE-LABEL: swifterror_clobber
+; CHECK-APPLE-LABEL: swifterror_clobber:
; CHECK-APPLE: mov [[REG:x[0-9]+]], x21
; CHECK-APPLE: nop
; CHECK-APPLE: mov x21, [[REG]]
@@ -415,7 +415,7 @@ define swiftcc void @swifterror_clobber(
ret void
}
-; CHECK-APPLE-LABEL: swifterror_reg_clobber
+; CHECK-APPLE-LABEL: swifterror_reg_clobber:
; CHECK-APPLE: stp {{.*}}x21
; CHECK-APPLE: nop
; CHECK-APPLE: ldp {{.*}}x21
@@ -423,7 +423,7 @@ define swiftcc void @swifterror_reg_clob
call void asm sideeffect "nop", "~{x21}"()
ret void
}
-; CHECK-APPLE-LABEL: params_in_reg
+; CHECK-APPLE-LABEL: params_in_reg:
; Save callee saved registers and swifterror since it will be clobbered by the first call to params_in_reg2.
; CHECK-APPLE: stp x21, x28, [sp
; CHECK-APPLE: stp x27, x26, [sp
@@ -431,16 +431,15 @@ define swiftcc void @swifterror_reg_clob
; CHECK-APPLE: stp x23, x22, [sp
; CHECK-APPLE: stp x20, x19, [sp
; CHECK-APPLE: stp x29, x30, [sp
-; CHECK-APPLE: str x20, [sp
+; CHECK-APPLE: str x7, [sp
; Store argument registers.
-; CHECK-APPLE: mov x23, x7
-; CHECK-APPLE: mov x24, x6
-; CHECK-APPLE: mov x25, x5
-; CHECK-APPLE: mov x26, x4
-; CHECK-APPLE: mov x27, x3
-; CHECK-APPLE: mov x28, x2
-; CHECK-APPLE: mov x19, x1
-; CHECK-APPLE: mov x22, x0
+; CHECK-APPLE: mov x23, x6
+; CHECK-APPLE: mov x24, x5
+; CHECK-APPLE: mov x25, x4
+; CHECK-APPLE: mov x26, x3
+; CHECK-APPLE: mov x27, x2
+; CHECK-APPLE: mov x28, x1
+; CHECK-APPLE: mov x19, x0
; Setup call.
; CHECK-APPLE: orr w0, wzr, #0x1
; CHECK-APPLE: orr w1, wzr, #0x2
@@ -450,20 +449,20 @@ define swiftcc void @swifterror_reg_clob
; CHECK-APPLE: orr w5, wzr, #0x6
; CHECK-APPLE: orr w6, wzr, #0x7
; CHECK-APPLE: orr w7, wzr, #0x8
+; CHECK-APPLE: mov x22, x20
; CHECK-APPLE: mov x20, xzr
; CHECK-APPLE: mov x21, xzr
; CHECK-APPLE: bl _params_in_reg2
; Restore original arguments for next call.
-; CHECK-APPLE: mov x0, x22
-; CHECK-APPLE: mov x1, x19
-; CHECK-APPLE: mov x2, x28
-; CHECK-APPLE: mov x3, x27
-; CHECK-APPLE: mov x4, x26
-; CHECK-APPLE: mov x5, x25
-; CHECK-APPLE: mov x6, x24
-; CHECK-APPLE: mov x7, x23
+; CHECK-APPLE: mov x0, x19
+; CHECK-APPLE: mov x1, x28
+; CHECK-APPLE: mov x2, x27
+; CHECK-APPLE: mov x3, x26
+; CHECK-APPLE: mov x4, x25
+; CHECK-APPLE: mov x5, x24
; Restore original swiftself argument and swifterror %err.
-; CHECK-APPLE: ldp x20, x21, [sp
+; CHECK-APPLE: ldp x7, x21, [sp
+; CHECK-APPLE: mov x20, x22
; CHECK-APPLE: bl _params_in_reg2
; Restore calle save registers but don't clober swifterror x21.
; CHECK-APPLE-NOT: x21
@@ -489,9 +488,9 @@ define swiftcc void @params_in_reg(i64,
}
declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err)
-; CHECK-APPLE-LABEL: params_and_return_in_reg
+; CHECK-APPLE-LABEL: params_and_return_in_reg:
; Store callee saved registers.
-; CHECK-APPLE: stp x20, x28, [sp, #24
+; CHECK-APPLE: stp x7, x28, [sp, #24
; CHECK-APPLE: stp x27, x26, [sp
; CHECK-APPLE: stp x25, x24, [sp
; CHECK-APPLE: stp x23, x22, [sp
@@ -499,14 +498,13 @@ declare swiftcc void @params_in_reg2(i64
; CHECK-APPLE: stp x29, x30, [sp
; Save original arguments.
; CHECK-APPLE: mov x23, x21
-; CHECK-APPLE: str x7, [sp, #16]
-; CHECK-APPLE: mov x24, x6
-; CHECK-APPLE: mov x25, x5
-; CHECK-APPLE: mov x26, x4
-; CHECK-APPLE: mov x27, x3
-; CHECK-APPLE: mov x28, x2
-; CHECK-APPLE: mov x19, x1
-; CHECK-APPLE: mov x22, x0
+; CHECK-APPLE: str x6, [sp, #16]
+; CHECK-APPLE: mov x24, x5
+; CHECK-APPLE: mov x25, x4
+; CHECK-APPLE: mov x26, x3
+; CHECK-APPLE: mov x27, x2
+; CHECK-APPLE: mov x28, x1
+; CHECK-APPLE: mov x19, x0
; Setup call arguments.
; CHECK-APPLE: orr w0, wzr, #0x1
; CHECK-APPLE: orr w1, wzr, #0x2
@@ -516,24 +514,26 @@ declare swiftcc void @params_in_reg2(i64
; CHECK-APPLE: orr w5, wzr, #0x6
; CHECK-APPLE: orr w6, wzr, #0x7
; CHECK-APPLE: orr w7, wzr, #0x8
+; CHECK-APPLE: mov x22, x20
; CHECK-APPLE: mov x20, xzr
; CHECK-APPLE: mov x21, xzr
; CHECK-APPLE: bl _params_in_reg2
; Store swifterror %error_ptr_ref.
; CHECK-APPLE: str x21, [sp, #8]
; Setup call arguments from original arguments.
-; CHECK-APPLE: mov x0, x22
-; CHECK-APPLE: mov x1, x19
-; CHECK-APPLE: mov x2, x28
-; CHECK-APPLE: mov x3, x27
-; CHECK-APPLE: mov x4, x26
-; CHECK-APPLE: mov x5, x25
-; CHECK-APPLE: mov x6, x24
-; CHECK-APPLE: ldp x7, x20, [sp, #16]
+; CHECK-APPLE: mov x0, x19
+; CHECK-APPLE: mov x1, x28
+; CHECK-APPLE: mov x2, x27
+; CHECK-APPLE: mov x3, x26
+; CHECK-APPLE: mov x4, x25
+; CHECK-APPLE: mov x5, x24
+; CHECK-APPLE: ldp x6, x7, [sp, #16]
+; CHECK-APPLE: mov x20, x22
; CHECK-APPLE: mov x21, x23
; CHECK-APPLE: bl _params_and_return_in_reg2
+; Save swifterror %err.
+; CHECK-APPLE: str x0, [sp, #24]
; Store return values.
-; CHECK-APPLE: mov x19, x0
; CHECK-APPLE: mov x22, x1
; CHECK-APPLE: mov x24, x2
; CHECK-APPLE: mov x25, x3
@@ -541,8 +541,6 @@ declare swiftcc void @params_in_reg2(i64
; CHECK-APPLE: mov x27, x5
; CHECK-APPLE: mov x28, x6
; CHECK-APPLE: mov x23, x7
-; Save swifterror %err.
-; CHECK-APPLE: str x21, [sp, #24]
; Setup call.
; CHECK-APPLE: orr w0, wzr, #0x1
; CHECK-APPLE: orr w1, wzr, #0x2
@@ -552,12 +550,12 @@ declare swiftcc void @params_in_reg2(i64
; CHECK-APPLE: orr w5, wzr, #0x6
; CHECK-APPLE: orr w6, wzr, #0x7
; CHECK-APPLE: orr w7, wzr, #0x8
+; CHECK-APPLE: mov x19, x21
; CHECK-APPLE: mov x20, xzr
; ... setup call with swiferror %error_ptr_ref.
; CHECK-APPLE: ldr x21, [sp, #8]
; CHECK-APPLE: bl _params_in_reg2
; Restore return values for return from this function.
-; CHECK-APPLE: mov x0, x19
; CHECK-APPLE: mov x1, x22
; CHECK-APPLE: mov x2, x24
; CHECK-APPLE: mov x3, x25
@@ -565,13 +563,14 @@ declare swiftcc void @params_in_reg2(i64
; CHECK-APPLE: mov x5, x27
; CHECK-APPLE: mov x6, x28
; CHECK-APPLE: mov x7, x23
+; CHECK-APPLE: mov x21, x19
; Restore swifterror %err and callee save registers.
-; CHECK-APPLE: ldp x21, x28, [sp, #24
; CHECK-APPLE: ldp x29, x30, [sp
; CHECK-APPLE: ldp x20, x19, [sp
; CHECK-APPLE: ldp x23, x22, [sp
; CHECK-APPLE: ldp x25, x24, [sp
; CHECK-APPLE: ldp x27, x26, [sp
+; CHECK-APPLE: ldp x0, x28, [sp, #24
; CHECK-APPLE: ret
define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err) {
%error_ptr_ref = alloca swifterror %swift_error*, align 8
@@ -601,14 +600,14 @@ entry:
declare swiftcc void @foo2(%swift_error** swifterror)
; Make sure we properly assign registers during fast-isel.
-; CHECK-O0-LABEL: testAssign
+; CHECK-O0-LABEL: testAssign:
; CHECK-O0: mov [[TMP:x.*]], xzr
; CHECK-O0: mov x21, [[TMP]]
; CHECK-O0: bl _foo2
; CHECK-O0: str x21, [s[[STK:.*]]]
; CHECK-O0: ldr x0, [s[[STK]]]
-; CHECK-APPLE-LABEL: testAssign
+; CHECK-APPLE-LABEL: testAssign:
; CHECK-APPLE: mov x21, xzr
; CHECK-APPLE: bl _foo2
; CHECK-APPLE: mov x0, x21
Modified: llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll?rev=322917&r1=322916&r2=322917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll Thu Jan 18 18:45:38 2018
@@ -36,9 +36,9 @@ define void @test_tailcall_explicit_sret
; CHECK-LABEL: _test_tailcall_explicit_sret_alloca_dummyusers:
; CHECK: ldr [[PTRLOAD1:q[0-9]+]], [x0]
-; CHECK: str [[PTRLOAD1]], [sp]
; CHECK: mov x8, sp
-; CHECK-NEXT: bl _test_explicit_sret
+; CHECK: str [[PTRLOAD1]], [sp]
+; CHECK: bl _test_explicit_sret
; CHECK: ret
define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
%l = alloca i1024, align 8
@@ -75,10 +75,10 @@ define i1024 @test_tailcall_explicit_sre
}
; CHECK-LABEL: _test_indirect_tailcall_explicit_sret_nosret_arg:
-; CHECK-DAG: mov x[[CALLERX8NUM:[0-9]+]], x8
-; CHECK-DAG: mov [[FPTR:x[0-9]+]], x0
+; CHECK: mov [[FPTR:x[0-9]+]], x0
; CHECK: mov x0, sp
-; CHECK-NEXT: blr [[FPTR]]
+; CHECK: mov x[[CALLERX8NUM:[0-9]+]], x8
+; CHECK: blr [[FPTR]]
; CHECK: ldr [[CALLERSRET1:q[0-9]+]], [sp]
; CHECK: str [[CALLERSRET1:q[0-9]+]], [x[[CALLERX8NUM]]]
; CHECK: ret
More information about the llvm-commits
mailing list