[llvm] 820b358 - [AArch64] Add artificial clobbers to swift async context test.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 24 06:17:26 PST 2023


Author: Florian Hahn
Date: 2023-11-24T14:14:49Z
New Revision: 820b3583c9681a1969814ac3b04a6fe41b87d079

URL: https://github.com/llvm/llvm-project/commit/820b3583c9681a1969814ac3b04a6fe41b87d079
DIFF: https://github.com/llvm/llvm-project/commit/820b3583c9681a1969814ac3b04a6fe41b87d079.diff

LOG: [AArch64] Add artificial clobbers to swift async context test.

Manually add clobbers for various register combinations to tests. This
highlights incorrectly performing shrink-wrapping, with
StoreSwiftAsyncContext expansion clobbering a live register.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll b/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
index 832e2a6eae74fcb..f81531766ca13bf 100644
--- a/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
+++ b/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
@@ -1,42 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -o - -mtriple=arm64e-apple-macosx %s | FileCheck %s
+; RUN: not --crash llc -o - -mtriple=arm64e-apple-macosx -aarch64-min-jump-table-entries=2 %s
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-define swifttailcc void @test_async_with_jumptable(ptr %src, ptr swiftasync %as) #0 {
-; CHECK-LABEL: test_async_with_jumptable:
-; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
-; CHECK-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
-; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; CHECK-NEXT:    add x16, sp, #8
-; CHECK-NEXT:    movk x16, #49946, lsl #48
-; CHECK-NEXT:    mov x17, x22
-; CHECK-NEXT:    pacdb x17, x16
-; CHECK-NEXT:    str x17, [sp, #8]
-; CHECK-NEXT:    add x29, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa w29, 16
-; CHECK-NEXT:    .cfi_offset w30, -8
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    .cfi_offset w19, -32
-; CHECK-NEXT:    mov x20, x22
-; CHECK-NEXT:    ldr x8, [x0]
-; CHECK-NEXT:    cmp x8, #2
-; CHECK-NEXT:    csel x9, xzr, x0, eq
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel x10, x22, xzr, eq
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    csel x19, x9, x10, gt
-; CHECK-NEXT:    mov x22, x0
-; CHECK-NEXT:    bl _foo
-; CHECK-NEXT:    mov x2, x0
-; CHECK-NEXT:    mov x0, x19
-; CHECK-NEXT:    mov x1, x20
-; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; CHECK-NEXT:    ldr x19, [sp], #32 ; 8-byte Folded Reload
-; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
-; CHECK-NEXT:    br x2
+define swifttailcc void @test_async_with_jumptable_x16_clobbered(ptr %src, ptr swiftasync %as) #0 {
 entry:
+  %x16 = tail call i64 asm "", "={x16}"()
   %l = load i64, ptr %src, align 8
   switch i64 %l, label %dead [
     i64 0, label %exit
@@ -59,6 +28,230 @@ dead:                                                ; preds = %entryresume.5
 
 exit:
   %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  tail call void asm sideeffect "", "{x16}"(i64 %x16)
+  %r = call i64 @foo()
+  %fn = inttoptr i64 %r to ptr
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  ret void
+}
+
+define swifttailcc void @test_async_with_jumptable_x17_clobbered(ptr %src, ptr swiftasync %as) #0 {
+entry:
+  %x17 = tail call i64 asm "", "={x17}"()
+  %l = load i64, ptr %src, align 8
+  switch i64 %l, label %dead [
+    i64 0, label %exit
+    i64 1, label %then.1
+    i64 2, label %then.2
+    i64 3, label %then.3
+  ]
+
+then.1:
+  br label %exit
+
+then.2:
+  br label %exit
+
+then.3:
+  br label %exit
+
+dead:                                                ; preds = %entryresume.5
+  unreachable
+
+exit:
+  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  tail call void asm sideeffect "", "{x17}"(i64 %x17)
+  %r = call i64 @foo()
+  %fn = inttoptr i64 %r to ptr
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  ret void
+}
+
+define swifttailcc void @test_async_with_jumptable_x1_clobbered(ptr %src, ptr swiftasync %as) #0 {
+entry:
+  %x1 = tail call i64 asm "", "={x1}"()
+  %l = load i64, ptr %src, align 8
+  switch i64 %l, label %dead [
+    i64 0, label %exit
+    i64 1, label %then.1
+    i64 2, label %then.2
+    i64 3, label %then.3
+  ]
+
+then.1:
+  br label %exit
+
+then.2:
+  br label %exit
+
+then.3:
+  br label %exit
+
+dead:                                                ; preds = %entryresume.5
+  unreachable
+
+exit:
+  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  tail call void asm sideeffect "", "{x1}"(i64 %x1)
+  %r = call i64 @foo()
+  %fn = inttoptr i64 %r to ptr
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  ret void
+}
+
+define swifttailcc void @test_async_with_jumptable_x1_x9_clobbered(ptr %src, ptr swiftasync %as) #0 {
+entry:
+  %x1 = tail call i64 asm "", "={x1}"()
+  %x9 = tail call i64 asm "", "={x9}"()
+  %l = load i64, ptr %src, align 8
+  switch i64 %l, label %dead [
+    i64 0, label %exit
+    i64 1, label %then.1
+    i64 2, label %then.2
+    i64 3, label %then.3
+  ]
+
+then.1:
+  br label %exit
+
+then.2:
+  br label %exit
+
+then.3:
+  br label %exit
+
+dead:                                                ; preds = %entryresume.5
+  unreachable
+
+exit:
+  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  tail call void asm sideeffect "", "{x1}"(i64 %x1)
+  tail call void asm sideeffect "", "{x9}"(i64 %x9)
+  %r = call i64 @foo()
+  %fn = inttoptr i64 %r to ptr
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  ret void
+}
+
+; There are 2 available scratch registers left, shrink-wrapping can happen.
+define swifttailcc void @test_async_with_jumptable_2_available_regs_left(ptr %src, ptr swiftasync %as) #0 {
+entry:
+  %x1 = tail call i64 asm "", "={x1}"()
+  %x2 = tail call i64 asm "", "={x2}"()
+  %x3 = tail call i64 asm "", "={x3}"()
+  %x4 = tail call i64 asm "", "={x4}"()
+  %x5 = tail call i64 asm "", "={x5}"()
+  %x6 = tail call i64 asm "", "={x6}"()
+  %x7 = tail call i64 asm "", "={x7}"()
+  %x8 = tail call i64 asm "", "={x8}"()
+  %x9 = tail call i64 asm "", "={x9}"()
+  %x11 = tail call i64 asm "", "={x11}"()
+  %x12 = tail call i64 asm "", "={x12}"()
+  %x13 = tail call i64 asm "", "={x13}"()
+  %x14 = tail call i64 asm "", "={x14}"()
+  %x15 = tail call i64 asm "", "={x15}"()
+  %x16 = tail call i64 asm "", "={x16}"()
+  %l = load i64, ptr %src, align 8
+  switch i64 %l, label %dead [
+    i64 0, label %exit
+    i64 1, label %then.1
+    i64 2, label %then.2
+    i64 3, label %then.3
+  ]
+
+then.1:
+  br label %exit
+
+then.2:
+  br label %exit
+
+then.3:
+  br label %exit
+
+dead:                                                ; preds = %entryresume.5
+  unreachable
+
+exit:
+  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  tail call void asm sideeffect "", "{x1}"(i64 %x1)
+  tail call void asm sideeffect "", "{x2}"(i64 %x2)
+  tail call void asm sideeffect "", "{x3}"(i64 %x3)
+  tail call void asm sideeffect "", "{x4}"(i64 %x4)
+  tail call void asm sideeffect "", "{x5}"(i64 %x5)
+  tail call void asm sideeffect "", "{x6}"(i64 %x6)
+  tail call void asm sideeffect "", "{x7}"(i64 %x7)
+  tail call void asm sideeffect "", "{x8}"(i64 %x8)
+  tail call void asm sideeffect "", "{x9}"(i64 %x9)
+  tail call void asm sideeffect "", "{x11}"(i64 %x11)
+  tail call void asm sideeffect "", "{x12}"(i64 %x12)
+  tail call void asm sideeffect "", "{x13}"(i64 %x13)
+  tail call void asm sideeffect "", "{x14}"(i64 %x14)
+  tail call void asm sideeffect "", "{x15}"(i64 %x15)
+  tail call void asm sideeffect "", "{x16}"(i64 %x16)
+  %r = call i64 @foo()
+  %fn = inttoptr i64 %r to ptr
+  musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+  ret void
+}
+
+; There is only 1 available scratch registers left, shrink-wrapping cannot
+; happen because StoreSwiftAsyncContext needs 2 free scratch registers.
+define swifttailcc void @test_async_with_jumptable_1_available_reg_left(ptr %src, ptr swiftasync %as) #0 {
+entry:
+  %x1 = tail call i64 asm "", "={x1}"()
+  %x2 = tail call i64 asm "", "={x2}"()
+  %x3 = tail call i64 asm "", "={x3}"()
+  %x4 = tail call i64 asm "", "={x4}"()
+  %x5 = tail call i64 asm "", "={x5}"()
+  %x6 = tail call i64 asm "", "={x6}"()
+  %x7 = tail call i64 asm "", "={x7}"()
+  %x8 = tail call i64 asm "", "={x8}"()
+  %x9 = tail call i64 asm "", "={x9}"()
+  %x11 = tail call i64 asm "", "={x11}"()
+  %x12 = tail call i64 asm "", "={x12}"()
+  %x13 = tail call i64 asm "", "={x13}"()
+  %x14 = tail call i64 asm "", "={x14}"()
+  %x15 = tail call i64 asm "", "={x15}"()
+  %x16 = tail call i64 asm "", "={x16}"()
+  %x17 = tail call i64 asm "", "={x17}"()
+  %l = load i64, ptr %src, align 8
+  switch i64 %l, label %dead [
+    i64 0, label %exit
+    i64 1, label %then.1
+    i64 2, label %then.2
+    i64 3, label %then.3
+  ]
+
+then.1:
+  br label %exit
+
+then.2:
+  br label %exit
+
+then.3:
+  br label %exit
+
+dead:                                                ; preds = %entryresume.5
+  unreachable
+
+exit:
+  %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+  tail call void asm sideeffect "", "{x1}"(i64 %x1)
+  tail call void asm sideeffect "", "{x2}"(i64 %x2)
+  tail call void asm sideeffect "", "{x3}"(i64 %x3)
+  tail call void asm sideeffect "", "{x4}"(i64 %x4)
+  tail call void asm sideeffect "", "{x5}"(i64 %x5)
+  tail call void asm sideeffect "", "{x6}"(i64 %x6)
+  tail call void asm sideeffect "", "{x7}"(i64 %x7)
+  tail call void asm sideeffect "", "{x8}"(i64 %x8)
+  tail call void asm sideeffect "", "{x9}"(i64 %x9)
+  tail call void asm sideeffect "", "{x11}"(i64 %x11)
+  tail call void asm sideeffect "", "{x12}"(i64 %x12)
+  tail call void asm sideeffect "", "{x13}"(i64 %x13)
+  tail call void asm sideeffect "", "{x14}"(i64 %x14)
+  tail call void asm sideeffect "", "{x15}"(i64 %x15)
+  tail call void asm sideeffect "", "{x16}"(i64 %x16)
+  tail call void asm sideeffect "", "{x17}"(i64 %x17)
   %r = call i64 @foo()
   %fn = inttoptr i64 %r to ptr
   musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)


        


More information about the llvm-commits mailing list