[llvm] 155d584 - [AArch64] Avoid jump tables in swiftasync clobber-live-reg test. NFC.

Ahmed Bougacha via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 3 13:52:05 PST 2024


Author: Ahmed Bougacha
Date: 2024-01-03T13:51:46-08:00
New Revision: 155d5849da2b2bfa2da918923d8f148a96c03e72

URL: https://github.com/llvm/llvm-project/commit/155d5849da2b2bfa2da918923d8f148a96c03e72
DIFF: https://github.com/llvm/llvm-project/commit/155d5849da2b2bfa2da918923d8f148a96c03e72.diff

LOG: [AArch64] Avoid jump tables in swiftasync clobber-live-reg test. NFC.

The upstream test relies on jump-tables, which are lowered in
dramatically different ways with later arm64e/ptrauth patches.

Concretely, it's failing for at least two reasons:
- ptrauth removes x16/x17 from tcGPR64 to prevent indirect tail-calls
  from using either register as the callee, conflicting with their usage
  as scratch for the tail-call LR auth checking sequence.  In the
  1/2_available_regs_left tests, this causes the MI scheduler to move
  the load up across some of the inlineasm register clobbers.

- ptrauth adds an x16/x17-using pseudo for jump-table dispatch, which
  looks somewhat different from the regular jump-table dispatch codegen
  by itself, but also prevents compression currently.

They seem like sensible changes.  But they mean the tests aren't really
testing what they're intented to, because there's always an implicit
x16/x17 clobber when using jump-tables.

This updates the test in a way that should work identically regardless
of ptrauth support, with one exception, #1 above, which merely reorders
the load/inlineasm w.r.t. eachother.
I verified the tests still fail the live-reg assertions when
applicable.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll b/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
index a202bfb6bca42f..1f7584b57e4aaf 100644
--- a/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
+++ b/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -o - -mtriple=arm64e-apple-macosx -aarch64-min-jump-table-entries=2 %s | FileCheck %s
+; RUN: llc -o - -mtriple=arm64e-apple-macosx %s | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
+; x16 is not available, shrink-wrapping cannot happen because
+; StoreSwiftAsyncContext needs it.
 define swifttailcc void @test_async_with_jumptable_x16_clobbered(ptr %src, ptr swiftasync %as) #0 {
 ; CHECK-LABEL: test_async_with_jumptable_x16_clobbered:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-NEXT:    add x16, sp, #8
 ; CHECK-NEXT:    movk x16, #49946, lsl #48
@@ -18,83 +20,52 @@ define swifttailcc void @test_async_with_jumptable_x16_clobbered(ptr %src, ptr s
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
+; CHECK-NEXT:    mov x20, x22
+; CHECK-NEXT:    mov x22, x0
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ldr x8, [x0]
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:  Lloh0:
-; CHECK-NEXT:    adrp x9, LJTI0_0 at PAGE
-; CHECK-NEXT:  Lloh1:
-; CHECK-NEXT:    add x9, x9, LJTI0_0 at PAGEOFF
-; CHECK-NEXT:  Ltmp0:
-; CHECK-NEXT:    adr x10, Ltmp0
-; CHECK-NEXT:    ldrsw x11, [x9, x8, lsl #2]
-; CHECK-NEXT:    add x10, x10, x11
-; CHECK-NEXT:    mov x19, x20
-; CHECK-NEXT:    br x10
-; CHECK-NEXT:  LBB0_1: ; %then.2
-; CHECK-NEXT:    mov x19, #0 ; =0x0
-; CHECK-NEXT:    b LBB0_3
-; CHECK-NEXT:  LBB0_2: ; %then.3
-; CHECK-NEXT:    mov x19, x22
-; CHECK-NEXT:  LBB0_3: ; %exit
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    cbnz x8, LBB0_2
+; CHECK-NEXT:  ; %bb.1: ; %then.1
+; CHECK-NEXT:    str xzr, [x22]
+; CHECK-NEXT:    mov x0, x22
+; CHECK-NEXT:  LBB0_2: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
+; CHECK-NEXT:    mov x1, x0
+; CHECK-NEXT:    mov x0, x20
 ; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldr x19, [sp], #32 ; 8-byte Folded Reload
 ; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    br x2
-; CHECK-NEXT:    .loh AdrpAdd Lloh0, Lloh1
-; CHECK-NEXT:    .cfi_endproc
-; CHECK-NEXT:    .section __TEXT,__const
-; CHECK-NEXT:    .p2align 2, 0x0
-; CHECK-NEXT:  LJTI0_0:
-; CHECK-NEXT:    .long LBB0_3-Ltmp0
-; CHECK-NEXT:    .long LBB0_1-Ltmp0
-; CHECK-NEXT:    .long LBB0_1-Ltmp0
-; CHECK-NEXT:    .long LBB0_2-Ltmp0
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    br x1
 entry:
   %x16 = tail call i64 asm "", "={x16}"()
   %l = load i64, ptr %src, align 8
-  switch i64 %l, label %dead [
-    i64 0, label %exit
-    i64 1, label %then.1
-    i64 2, label %then.2
-    i64 3, label %then.3
-  ]
+  %c = icmp eq i64 %l, 0
+  br i1 %c, label %then.1, label %exit
 
 then.1:
+  store i64 0, ptr %src
   br label %exit
 
-then.2:
-  br label %exit
-
-then.3:
-  br label %exit
-
-dead:                                                ; preds = %entryresume.5
-  unreachable
-
 exit:
-  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  %p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
   tail call void asm sideeffect "", "{x16}"(i64 %x16)
-  %r = call i64 @foo()
+  %r = call i64 @foo(ptr %p)
   %fn = inttoptr i64 %r to ptr
-  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
   ret void
 }
 
+; x17 is not available, shrink-wrapping cannot happen because
+; StoreSwiftAsyncContext needs it.
 define swifttailcc void @test_async_with_jumptable_x17_clobbered(ptr %src, ptr swiftasync %as) #0 {
 ; CHECK-LABEL: test_async_with_jumptable_x17_clobbered:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-NEXT:    add x16, sp, #8
 ; CHECK-NEXT:    movk x16, #49946, lsl #48
@@ -105,86 +76,61 @@ define swifttailcc void @test_async_with_jumptable_x17_clobbered(ptr %src, ptr s
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
+; CHECK-NEXT:    mov x20, x22
+; CHECK-NEXT:    mov x22, x0
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ldr x8, [x0]
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:  Lloh2:
-; CHECK-NEXT:    adrp x9, LJTI1_0 at PAGE
-; CHECK-NEXT:  Lloh3:
-; CHECK-NEXT:    add x9, x9, LJTI1_0 at PAGEOFF
-; CHECK-NEXT:  Ltmp1:
-; CHECK-NEXT:    adr x10, Ltmp1
-; CHECK-NEXT:    ldrsw x11, [x9, x8, lsl #2]
-; CHECK-NEXT:    add x10, x10, x11
-; CHECK-NEXT:    mov x19, x20
-; CHECK-NEXT:    br x10
-; CHECK-NEXT:  LBB1_1: ; %then.2
-; CHECK-NEXT:    mov x19, #0 ; =0x0
-; CHECK-NEXT:    b LBB1_3
-; CHECK-NEXT:  LBB1_2: ; %then.3
-; CHECK-NEXT:    mov x19, x22
-; CHECK-NEXT:  LBB1_3: ; %exit
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    cbnz x8, LBB1_2
+; CHECK-NEXT:  ; %bb.1: ; %then.1
+; CHECK-NEXT:    str xzr, [x22]
+; CHECK-NEXT:    mov x0, x22
+; CHECK-NEXT:  LBB1_2: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
+; CHECK-NEXT:    mov x1, x0
+; CHECK-NEXT:    mov x0, x20
 ; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldr x19, [sp], #32 ; 8-byte Folded Reload
 ; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    br x2
-; CHECK-NEXT:    .loh AdrpAdd Lloh2, Lloh3
-; CHECK-NEXT:    .cfi_endproc
-; CHECK-NEXT:    .section __TEXT,__const
-; CHECK-NEXT:    .p2align 2, 0x0
-; CHECK-NEXT:  LJTI1_0:
-; CHECK-NEXT:    .long LBB1_3-Ltmp1
-; CHECK-NEXT:    .long LBB1_1-Ltmp1
-; CHECK-NEXT:    .long LBB1_1-Ltmp1
-; CHECK-NEXT:    .long LBB1_2-Ltmp1
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    br x1
 entry:
   %x17 = tail call i64 asm "", "={x17}"()
   %l = load i64, ptr %src, align 8
-  switch i64 %l, label %dead [
-    i64 0, label %exit
-    i64 1, label %then.1
-    i64 2, label %then.2
-    i64 3, label %then.3
-  ]
+  %c = icmp eq i64 %l, 0
+  br i1 %c, label %then.1, label %exit
 
 then.1:
+  store i64 0, ptr %src
   br label %exit
 
-then.2:
-  br label %exit
-
-then.3:
-  br label %exit
-
-dead:                                                ; preds = %entryresume.5
-  unreachable
-
 exit:
-  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  %p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
   tail call void asm sideeffect "", "{x17}"(i64 %x17)
-  %r = call i64 @foo()
+  %r = call i64 @foo(ptr %p)
   %fn = inttoptr i64 %r to ptr
-  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
   ret void
 }
 
 define swifttailcc void @test_async_with_jumptable_x1_clobbered(ptr %src, ptr swiftasync %as) #0 {
 ; CHECK-LABEL: test_async_with_jumptable_x1_clobbered:
 ; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    mov x20, x22
+; CHECK-NEXT:    mov x22, x0
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    cbnz x8, LBB2_2
+; CHECK-NEXT:  ; %bb.1: ; %then.1
+; CHECK-NEXT:    str xzr, [x22]
+; CHECK-NEXT:    mov x0, x22
+; CHECK-NEXT:  LBB2_2: ; %exit
 ; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-NEXT:    add x16, sp, #8
 ; CHECK-NEXT:    movk x16, #49946, lsl #48
@@ -195,85 +141,52 @@ define swifttailcc void @test_async_with_jumptable_x1_clobbered(ptr %src, ptr sw
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:  Lloh4:
-; CHECK-NEXT:    adrp x9, LJTI2_0 at PAGE
-; CHECK-NEXT:  Lloh5:
-; CHECK-NEXT:    add x9, x9, LJTI2_0 at PAGEOFF
-; CHECK-NEXT:  Ltmp2:
-; CHECK-NEXT:    adr x10, Ltmp2
-; CHECK-NEXT:    ldrsw x11, [x9, x8, lsl #2]
-; CHECK-NEXT:    add x10, x10, x11
-; CHECK-NEXT:    mov x19, x20
-; CHECK-NEXT:    br x10
-; CHECK-NEXT:  LBB2_1: ; %then.2
-; CHECK-NEXT:    mov x19, #0 ; =0x0
-; CHECK-NEXT:    b LBB2_3
-; CHECK-NEXT:  LBB2_2: ; %then.3
-; CHECK-NEXT:    mov x19, x22
-; CHECK-NEXT:  LBB2_3: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
+; CHECK-NEXT:    mov x1, x0
+; CHECK-NEXT:    mov x0, x20
 ; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldr x19, [sp], #32 ; 8-byte Folded Reload
 ; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    br x2
-; CHECK-NEXT:    .loh AdrpAdd Lloh4, Lloh5
-; CHECK-NEXT:    .cfi_endproc
-; CHECK-NEXT:    .section __TEXT,__const
-; CHECK-NEXT:    .p2align 2, 0x0
-; CHECK-NEXT:  LJTI2_0:
-; CHECK-NEXT:    .long LBB2_3-Ltmp2
-; CHECK-NEXT:    .long LBB2_1-Ltmp2
-; CHECK-NEXT:    .long LBB2_1-Ltmp2
-; CHECK-NEXT:    .long LBB2_2-Ltmp2
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    br x1
 entry:
   %x1 = tail call i64 asm "", "={x1}"()
   %l = load i64, ptr %src, align 8
-  switch i64 %l, label %dead [
-    i64 0, label %exit
-    i64 1, label %then.1
-    i64 2, label %then.2
-    i64 3, label %then.3
-  ]
+  %c = icmp eq i64 %l, 0
+  br i1 %c, label %then.1, label %exit
 
 then.1:
+  store i64 0, ptr %src
   br label %exit
 
-then.2:
-  br label %exit
-
-then.3:
-  br label %exit
-
-dead:                                                ; preds = %entryresume.5
-  unreachable
-
 exit:
-  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  %p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
   tail call void asm sideeffect "", "{x1}"(i64 %x1)
-  %r = call i64 @foo()
+  %r = call i64 @foo(ptr %p)
   %fn = inttoptr i64 %r to ptr
-  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
   ret void
 }
 
 define swifttailcc void @test_async_with_jumptable_x1_x9_clobbered(ptr %src, ptr swiftasync %as) #0 {
 ; CHECK-LABEL: test_async_with_jumptable_x1_x9_clobbered:
 ; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    mov x20, x22
+; CHECK-NEXT:    mov x22, x0
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    cbnz x8, LBB3_2
+; CHECK-NEXT:  ; %bb.1: ; %then.1
+; CHECK-NEXT:    str xzr, [x22]
+; CHECK-NEXT:    mov x0, x22
+; CHECK-NEXT:  LBB3_2: ; %exit
 ; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-NEXT:    add x16, sp, #8
 ; CHECK-NEXT:    movk x16, #49946, lsl #48
@@ -284,76 +197,35 @@ define swifttailcc void @test_async_with_jumptable_x1_x9_clobbered(ptr %src, ptr
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:  Lloh6:
-; CHECK-NEXT:    adrp x10, LJTI3_0 at PAGE
-; CHECK-NEXT:  Lloh7:
-; CHECK-NEXT:    add x10, x10, LJTI3_0 at PAGEOFF
-; CHECK-NEXT:  Ltmp3:
-; CHECK-NEXT:    adr x11, Ltmp3
-; CHECK-NEXT:    ldrsw x12, [x10, x8, lsl #2]
-; CHECK-NEXT:    add x11, x11, x12
-; CHECK-NEXT:    mov x19, x20
-; CHECK-NEXT:    br x11
-; CHECK-NEXT:  LBB3_1: ; %then.2
-; CHECK-NEXT:    mov x19, #0 ; =0x0
-; CHECK-NEXT:    b LBB3_3
-; CHECK-NEXT:  LBB3_2: ; %then.3
-; CHECK-NEXT:    mov x19, x22
-; CHECK-NEXT:  LBB3_3: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
+; CHECK-NEXT:    mov x1, x0
+; CHECK-NEXT:    mov x0, x20
 ; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldr x19, [sp], #32 ; 8-byte Folded Reload
 ; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    br x2
-; CHECK-NEXT:    .loh AdrpAdd Lloh6, Lloh7
-; CHECK-NEXT:    .cfi_endproc
-; CHECK-NEXT:    .section __TEXT,__const
-; CHECK-NEXT:    .p2align 2, 0x0
-; CHECK-NEXT:  LJTI3_0:
-; CHECK-NEXT:    .long LBB3_3-Ltmp3
-; CHECK-NEXT:    .long LBB3_1-Ltmp3
-; CHECK-NEXT:    .long LBB3_1-Ltmp3
-; CHECK-NEXT:    .long LBB3_2-Ltmp3
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    br x1
 entry:
   %x1 = tail call i64 asm "", "={x1}"()
   %x9 = tail call i64 asm "", "={x9}"()
   %l = load i64, ptr %src, align 8
-  switch i64 %l, label %dead [
-    i64 0, label %exit
-    i64 1, label %then.1
-    i64 2, label %then.2
-    i64 3, label %then.3
-  ]
+  %c = icmp eq i64 %l, 0
+  br i1 %c, label %then.1, label %exit
 
 then.1:
+  store i64 0, ptr %src
   br label %exit
 
-then.2:
-  br label %exit
-
-then.3:
-  br label %exit
-
-dead:                                                ; preds = %entryresume.5
-  unreachable
-
 exit:
-  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  %p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
   tail call void asm sideeffect "", "{x1}"(i64 %x1)
   tail call void asm sideeffect "", "{x9}"(i64 %x9)
-  %r = call i64 @foo()
+  %r = call i64 @foo(ptr %p)
   %fn = inttoptr i64 %r to ptr
-  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
   ret void
 }
 
@@ -361,23 +233,8 @@ exit:
 define swifttailcc void @test_async_with_jumptable_2_available_regs_left(ptr %src, ptr swiftasync %as) #0 {
 ; CHECK-LABEL: test_async_with_jumptable_2_available_regs_left:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
-; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; CHECK-NEXT:    add x16, sp, #8
-; CHECK-NEXT:    movk x16, #49946, lsl #48
-; CHECK-NEXT:    mov x17, x22
-; CHECK-NEXT:    pacdb x17, x16
-; CHECK-NEXT:    str x17, [sp, #8]
-; CHECK-NEXT:    add x29, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa w29, 16
-; CHECK-NEXT:    .cfi_offset w30, -8
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
-; CHECK-NEXT:    ; InlineAsm Start
-; CHECK-NEXT:    ; InlineAsm End
-; CHECK-NEXT:    ; InlineAsm Start
-; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    mov x20, x22
+; CHECK-NEXT:    mov x22, x0
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
@@ -404,27 +261,27 @@ define swifttailcc void @test_async_with_jumptable_2_available_regs_left(ptr %sr
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
-; CHECK-NEXT:    ldr x10, [x0]
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:  Lloh8:
-; CHECK-NEXT:    adrp x17, LJTI4_0 at PAGE
-; CHECK-NEXT:  Lloh9:
-; CHECK-NEXT:    add x17, x17, LJTI4_0 at PAGEOFF
-; CHECK-NEXT:  Ltmp4:
-; CHECK-NEXT:    adr x0, Ltmp4
-; CHECK-NEXT:    ldrsw x19, [x17, x10, lsl #2]
-; CHECK-NEXT:    add x0, x0, x19
-; CHECK-NEXT:    mov x19, x20
-; CHECK-NEXT:    br x0
-; CHECK-NEXT:  LBB4_1: ; %then.2
-; CHECK-NEXT:    mov x19, #0 ; =0x0
-; CHECK-NEXT:    b LBB4_3
-; CHECK-NEXT:  LBB4_2: ; %then.3
-; CHECK-NEXT:    mov x19, x22
-; CHECK-NEXT:  LBB4_3: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    ldr x10, [x22]
+; CHECK-NEXT:    cbnz x10, LBB4_2
+; CHECK-NEXT:  ; %bb.1: ; %then.1
+; CHECK-NEXT:    str xzr, [x22]
+; CHECK-NEXT:    mov x0, x22
+; CHECK-NEXT:  LBB4_2: ; %exit
+; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    add x16, sp, #8
+; CHECK-NEXT:    movk x16, #49946, lsl #48
+; CHECK-NEXT:    mov x17, x22
+; CHECK-NEXT:    pacdb x17, x16
+; CHECK-NEXT:    str x17, [sp, #8]
+; CHECK-NEXT:    add x29, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
@@ -454,22 +311,12 @@ define swifttailcc void @test_async_with_jumptable_2_available_regs_left(ptr %sr
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
+; CHECK-NEXT:    mov x1, x0
+; CHECK-NEXT:    mov x0, x20
 ; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldr x19, [sp], #32 ; 8-byte Folded Reload
 ; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    br x2
-; CHECK-NEXT:    .loh AdrpAdd Lloh8, Lloh9
-; CHECK-NEXT:    .cfi_endproc
-; CHECK-NEXT:    .section __TEXT,__const
-; CHECK-NEXT:    .p2align 2, 0x0
-; CHECK-NEXT:  LJTI4_0:
-; CHECK-NEXT:    .long LBB4_3-Ltmp4
-; CHECK-NEXT:    .long LBB4_1-Ltmp4
-; CHECK-NEXT:    .long LBB4_1-Ltmp4
-; CHECK-NEXT:    .long LBB4_2-Ltmp4
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    br x1
 entry:
   %x1 = tail call i64 asm "", "={x1}"()
   %x2 = tail call i64 asm "", "={x2}"()
@@ -485,29 +332,16 @@ entry:
   %x13 = tail call i64 asm "", "={x13}"()
   %x14 = tail call i64 asm "", "={x14}"()
   %x15 = tail call i64 asm "", "={x15}"()
-  %x16 = tail call i64 asm "", "={x16}"()
   %l = load i64, ptr %src, align 8
-  switch i64 %l, label %dead [
-    i64 0, label %exit
-    i64 1, label %then.1
-    i64 2, label %then.2
-    i64 3, label %then.3
-  ]
+  %c = icmp eq i64 %l, 0
+  br i1 %c, label %then.1, label %exit
 
 then.1:
+  store i64 0, ptr %src
   br label %exit
 
-then.2:
-  br label %exit
-
-then.3:
-  br label %exit
-
-dead:                                                ; preds = %entryresume.5
-  unreachable
-
 exit:
-  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  %p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
   tail call void asm sideeffect "", "{x1}"(i64 %x1)
   tail call void asm sideeffect "", "{x2}"(i64 %x2)
   tail call void asm sideeffect "", "{x3}"(i64 %x3)
@@ -522,37 +356,32 @@ exit:
   tail call void asm sideeffect "", "{x13}"(i64 %x13)
   tail call void asm sideeffect "", "{x14}"(i64 %x14)
   tail call void asm sideeffect "", "{x15}"(i64 %x15)
-  tail call void asm sideeffect "", "{x16}"(i64 %x16)
-  %r = call i64 @foo()
+  %r = call i64 @foo(ptr %p)
   %fn = inttoptr i64 %r to ptr
-  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
   ret void
 }
 
+
 ; There is only 1 available scratch registers left, shrink-wrapping cannot
 ; happen because StoreSwiftAsyncContext needs 2 free scratch registers.
 define swifttailcc void @test_async_with_jumptable_1_available_reg_left(ptr %src, ptr swiftasync %as) #0 {
 ; CHECK-LABEL: test_async_with_jumptable_1_available_reg_left:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    stp x21, x19, [sp, #8] ; 16-byte Folded Spill
-; CHECK-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
-; CHECK-NEXT:    add x16, sp, #24
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    add x16, sp, #8
 ; CHECK-NEXT:    movk x16, #49946, lsl #48
 ; CHECK-NEXT:    mov x17, x22
 ; CHECK-NEXT:    pacdb x17, x16
-; CHECK-NEXT:    str x17, [sp, #24]
-; CHECK-NEXT:    add x29, sp, #32
+; CHECK-NEXT:    str x17, [sp, #8]
+; CHECK-NEXT:    add x29, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
-; CHECK-NEXT:    .cfi_offset w21, -40
-; CHECK-NEXT:    ; InlineAsm Start
-; CHECK-NEXT:    ; InlineAsm End
-; CHECK-NEXT:    ; InlineAsm Start
-; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    mov x20, x22
+; CHECK-NEXT:    mov x22, x0
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
@@ -581,27 +410,15 @@ define swifttailcc void @test_async_with_jumptable_1_available_reg_left(ptr %src
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
-; CHECK-NEXT:    ldr x10, [x0]
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:  Lloh10:
-; CHECK-NEXT:    adrp x0, LJTI5_0 at PAGE
-; CHECK-NEXT:  Lloh11:
-; CHECK-NEXT:    add x0, x0, LJTI5_0 at PAGEOFF
-; CHECK-NEXT:  Ltmp5:
-; CHECK-NEXT:    adr x21, Ltmp5
-; CHECK-NEXT:    ldrsw x19, [x0, x10, lsl #2]
-; CHECK-NEXT:    add x21, x21, x19
-; CHECK-NEXT:    mov x19, x20
-; CHECK-NEXT:    br x21
-; CHECK-NEXT:  LBB5_1: ; %then.2
-; CHECK-NEXT:    mov x19, #0 ; =0x0
-; CHECK-NEXT:    b LBB5_3
-; CHECK-NEXT:  LBB5_2: ; %then.3
-; CHECK-NEXT:    mov x19, x22
-; CHECK-NEXT:  LBB5_3: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    ldr x10, [x22]
+; CHECK-NEXT:    cbnz x10, LBB5_2
+; CHECK-NEXT:  ; %bb.1: ; %then.1
+; CHECK-NEXT:    str xzr, [x22]
+; CHECK-NEXT:    mov x0, x22
+; CHECK-NEXT:  LBB5_2: ; %exit
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ; InlineAsm Start
@@ -633,23 +450,12 @@ define swifttailcc void @test_async_with_jumptable_1_available_reg_left(ptr %src
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
-; CHECK-NEXT:    ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldp x21, x19, [sp, #8] ; 16-byte Folded Reload
+; CHECK-NEXT:    mov x1, x0
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
 ; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    add sp, sp, #48
-; CHECK-NEXT:    br x2
-; CHECK-NEXT:    .loh AdrpAdd Lloh10, Lloh11
-; CHECK-NEXT:    .cfi_endproc
-; CHECK-NEXT:    .section __TEXT,__const
-; CHECK-NEXT:    .p2align 2, 0x0
-; CHECK-NEXT:  LJTI5_0:
-; CHECK-NEXT:    .long LBB5_3-Ltmp5
-; CHECK-NEXT:    .long LBB5_1-Ltmp5
-; CHECK-NEXT:    .long LBB5_1-Ltmp5
-; CHECK-NEXT:    .long LBB5_2-Ltmp5
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    br x1
 entry:
   %x1 = tail call i64 asm "", "={x1}"()
   %x2 = tail call i64 asm "", "={x2}"()
@@ -666,29 +472,16 @@ entry:
   %x14 = tail call i64 asm "", "={x14}"()
   %x15 = tail call i64 asm "", "={x15}"()
   %x16 = tail call i64 asm "", "={x16}"()
-  %x17 = tail call i64 asm "", "={x17}"()
   %l = load i64, ptr %src, align 8
-  switch i64 %l, label %dead [
-    i64 0, label %exit
-    i64 1, label %then.1
-    i64 2, label %then.2
-    i64 3, label %then.3
-  ]
+  %c = icmp eq i64 %l, 0
+  br i1 %c, label %then.1, label %exit
 
 then.1:
+  store i64 0, ptr %src
   br label %exit
 
-then.2:
-  br label %exit
-
-then.3:
-  br label %exit
-
-dead:                                                ; preds = %entryresume.5
-  unreachable
-
 exit:
-  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  %p = phi ptr [ %src, %then.1 ], [ %as, %entry ]
   tail call void asm sideeffect "", "{x1}"(i64 %x1)
   tail call void asm sideeffect "", "{x2}"(i64 %x2)
   tail call void asm sideeffect "", "{x3}"(i64 %x3)
@@ -704,13 +497,12 @@ exit:
   tail call void asm sideeffect "", "{x14}"(i64 %x14)
   tail call void asm sideeffect "", "{x15}"(i64 %x15)
   tail call void asm sideeffect "", "{x16}"(i64 %x16)
-  tail call void asm sideeffect "", "{x17}"(i64 %x17)
-  %r = call i64 @foo()
+  %r = call i64 @foo(ptr %p)
   %fn = inttoptr i64 %r to ptr
-  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %as)
   ret void
 }
 
-declare i64 @foo()
+declare i64 @foo(ptr)
 
 attributes #0 = { "frame-pointer"="non-leaf" }


        


More information about the llvm-commits mailing list