[llvm] [M68k] Correctly emit non-pic relocations (PR #89863)

via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 23 20:01:33 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-m68k

Author: Peter Lafreniere (n8pjl)

<details>
<summary>Changes</summary>

The m68k backend will always emit external calls (including libcalls) with
PC-relative PLT relocations, even when in non-pic mode or -fno-plt is used.

This is unexpected, as other function calls are emitted with absolute
addressing, and a static code modes suggests that there is no PLT. It also
leads to a miscompilation where the call instruction emitted expects an
immediate address, while the relocation emitted for that instruction is
PC-relative.

This miscompilation can even be seen in the default C function in godbolt:
https://godbolt.org/z/zEoazovzo

Fix the issue by classifying external function references based upon the pic
mode. This triggers a change in the static code model, making it more in line
with the expected behaviour and allowing use of this backend in more bare-metal
situations where a PLT does not exist.

The change avoids the issue where we emit a PLT32 relocation for an absolute
call, and makes libcalls and other external calls use absolute addressing modes
when a static code model is desired.

Further work should be done in instruction lowering and validation to ensure
that miscompilations of the same type don't occur.


---

Patch is 28.53 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/89863.diff


15 Files Affected:

- (modified) llvm/lib/Target/M68k/M68kSubtarget.cpp (+2-2) 
- (modified) llvm/test/CodeGen/M68k/Arith/divide-by-constant.ll (+4-4) 
- (modified) llvm/test/CodeGen/M68k/Arith/imul.ll (+4-4) 
- (modified) llvm/test/CodeGen/M68k/Arith/mul64.ll (+1-1) 
- (modified) llvm/test/CodeGen/M68k/Arith/sdiv-exact.ll (+2-2) 
- (modified) llvm/test/CodeGen/M68k/Arith/smul-with-overflow.ll (+4-4) 
- (modified) llvm/test/CodeGen/M68k/Arith/sub-with-overflow.ll (+4-4) 
- (modified) llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll (+5-5) 
- (modified) llvm/test/CodeGen/M68k/Atomics/load-store.ll (+16-16) 
- (modified) llvm/test/CodeGen/M68k/Atomics/rmw.ll (+14-14) 
- (modified) llvm/test/CodeGen/M68k/CodeModel/medium-static.ll (+9-9) 
- (modified) llvm/test/CodeGen/M68k/CodeModel/small-static.ll (+9-9) 
- (modified) llvm/test/CodeGen/M68k/TLS/tlsie.ll (+1-1) 
- (modified) llvm/test/CodeGen/M68k/TLS/tlsle.ll (+1-1) 
- (modified) llvm/test/CodeGen/M68k/gcc_except_table.ll (+2-2) 


``````````diff
diff --git a/llvm/lib/Target/M68k/M68kSubtarget.cpp b/llvm/lib/Target/M68k/M68kSubtarget.cpp
index 3af1e994c01cd9..cacdbf559faa2d 100644
--- a/llvm/lib/Target/M68k/M68kSubtarget.cpp
+++ b/llvm/lib/Target/M68k/M68kSubtarget.cpp
@@ -251,6 +251,6 @@ M68kSubtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
     return M68kII::MO_GOTPCREL;
   }
 
-  // otherwise linker will figure this out
-  return M68kII::MO_PLT;
+  // Ensure that we don't emit PLT relocations when in non-pic modes.
+  return isPositionIndependent() ? M68kII::MO_PLT : M68kII::MO_ABSOLUTE_ADDRESS;
 }
diff --git a/llvm/test/CodeGen/M68k/Arith/divide-by-constant.ll b/llvm/test/CodeGen/M68k/Arith/divide-by-constant.ll
index 834dfe1c26f08f..b9f56a2ebfee4c 100644
--- a/llvm/test/CodeGen/M68k/Arith/divide-by-constant.ll
+++ b/llvm/test/CodeGen/M68k/Arith/divide-by-constant.ll
@@ -77,7 +77,7 @@ define i32 @test5(i32 %A) nounwind {
 ; CHECK-NEXT:    suba.l #12, %sp
 ; CHECK-NEXT:    move.l #1577682821, (4,%sp)
 ; CHECK-NEXT:    move.l (16,%sp), (%sp)
-; CHECK-NEXT:    jsr __udivsi3 at PLT
+; CHECK-NEXT:    jsr __udivsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
   %tmp1 = udiv i32 %A, 1577682821         ; <i32> [#uses=1]
@@ -114,7 +114,7 @@ define i32 @test7(i32 %x) nounwind {
 ; CHECK-NEXT:    suba.l #12, %sp
 ; CHECK-NEXT:    move.l #28, (4,%sp)
 ; CHECK-NEXT:    move.l (16,%sp), (%sp)
-; CHECK-NEXT:    jsr __udivsi3 at PLT
+; CHECK-NEXT:    jsr __udivsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
   %div = udiv i32 %x, 28
@@ -178,7 +178,7 @@ define i32 @testsize2(i32 %x) minsize nounwind {
 ; CHECK-NEXT:    suba.l #12, %sp
 ; CHECK-NEXT:    move.l #33, (4,%sp)
 ; CHECK-NEXT:    move.l (16,%sp), (%sp)
-; CHECK-NEXT:    jsr __divsi3 at PLT
+; CHECK-NEXT:    jsr __divsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
 entry:
@@ -203,7 +203,7 @@ define i32 @testsize4(i32 %x) minsize nounwind {
 ; CHECK-NEXT:    suba.l #12, %sp
 ; CHECK-NEXT:    move.l #33, (4,%sp)
 ; CHECK-NEXT:    move.l (16,%sp), (%sp)
-; CHECK-NEXT:    jsr __udivsi3 at PLT
+; CHECK-NEXT:    jsr __udivsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
 entry:
diff --git a/llvm/test/CodeGen/M68k/Arith/imul.ll b/llvm/test/CodeGen/M68k/Arith/imul.ll
index f53568395c29b2..7cd476bcd99026 100644
--- a/llvm/test/CodeGen/M68k/Arith/imul.ll
+++ b/llvm/test/CodeGen/M68k/Arith/imul.ll
@@ -116,7 +116,7 @@ define i32 @mul_32(i32 %a, i32 %b) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset -16
 ; CHECK-NEXT:    move.l (20,%sp), (4,%sp)
 ; CHECK-NEXT:    move.l (16,%sp), (%sp)
-; CHECK-NEXT:    jsr __mulsi3 at PLT
+; CHECK-NEXT:    jsr __mulsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
     %mul = mul i32 %a, %b
@@ -162,7 +162,7 @@ define i64 @mul_64(i64 %a, i64 %b) {
 ; CHECK-NEXT:    move.l (32,%sp), (8,%sp)
 ; CHECK-NEXT:    move.l (28,%sp), (4,%sp)
 ; CHECK-NEXT:    move.l (24,%sp), (%sp)
-; CHECK-NEXT:    jsr __muldi3 at PLT
+; CHECK-NEXT:    jsr __muldi3
 ; CHECK-NEXT:    adda.l #20, %sp
 ; CHECK-NEXT:    rts
     %mul = mul i64 %a, %b
@@ -179,7 +179,7 @@ define i64 @mul3_64(i64 %A) {
 ; CHECK-NEXT:    move.l #0, (8,%sp)
 ; CHECK-NEXT:    move.l (28,%sp), (4,%sp)
 ; CHECK-NEXT:    move.l (24,%sp), (%sp)
-; CHECK-NEXT:    jsr __muldi3 at PLT
+; CHECK-NEXT:    jsr __muldi3
 ; CHECK-NEXT:    adda.l #20, %sp
 ; CHECK-NEXT:    rts
     %mul = mul i64 %A, 3
@@ -196,7 +196,7 @@ define i64 @mul40_64(i64 %A) {
 ; CHECK-NEXT:    move.l #0, (8,%sp)
 ; CHECK-NEXT:    move.l (28,%sp), (4,%sp)
 ; CHECK-NEXT:    move.l (24,%sp), (%sp)
-; CHECK-NEXT:    jsr __muldi3 at PLT
+; CHECK-NEXT:    jsr __muldi3
 ; CHECK-NEXT:    adda.l #20, %sp
 ; CHECK-NEXT:    rts
     %mul = mul i64 %A, 40
diff --git a/llvm/test/CodeGen/M68k/Arith/mul64.ll b/llvm/test/CodeGen/M68k/Arith/mul64.ll
index f6228d4c63475b..12967025ab46d6 100644
--- a/llvm/test/CodeGen/M68k/Arith/mul64.ll
+++ b/llvm/test/CodeGen/M68k/Arith/mul64.ll
@@ -11,7 +11,7 @@ define i64 @foo(i64 %t, i64 %u) nounwind {
 ; CHECK-NEXT:    move.l (32,%sp), (8,%sp)
 ; CHECK-NEXT:    move.l (28,%sp), (4,%sp)
 ; CHECK-NEXT:    move.l (24,%sp), (%sp)
-; CHECK-NEXT:    jsr __muldi3 at PLT
+; CHECK-NEXT:    jsr __muldi3
 ; CHECK-NEXT:    adda.l #20, %sp
 ; CHECK-NEXT:    rts
   %k = mul i64 %t, %u
diff --git a/llvm/test/CodeGen/M68k/Arith/sdiv-exact.ll b/llvm/test/CodeGen/M68k/Arith/sdiv-exact.ll
index bb6b4acc034df2..96cc8b237202db 100644
--- a/llvm/test/CodeGen/M68k/Arith/sdiv-exact.ll
+++ b/llvm/test/CodeGen/M68k/Arith/sdiv-exact.ll
@@ -9,7 +9,7 @@ define i32 @test1(i32 %x) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset -16
 ; CHECK-NEXT:    move.l #-1030792151, (4,%sp)
 ; CHECK-NEXT:    move.l (16,%sp), (%sp)
-; CHECK-NEXT:    jsr __mulsi3 at PLT
+; CHECK-NEXT:    jsr __mulsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
   %div = sdiv exact i32 %x, 25
@@ -26,7 +26,7 @@ define i32 @test2(i32 %x) {
 ; CHECK-NEXT:    asr.l #3, %d0
 ; CHECK-NEXT:    move.l %d0, (%sp)
 ; CHECK-NEXT:    move.l #-1431655765, (4,%sp)
-; CHECK-NEXT:    jsr __mulsi3 at PLT
+; CHECK-NEXT:    jsr __mulsi3
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
   %div = sdiv exact i32 %x, 24
diff --git a/llvm/test/CodeGen/M68k/Arith/smul-with-overflow.ll b/llvm/test/CodeGen/M68k/Arith/smul-with-overflow.ll
index 5bd4d5d48bc859..a71d49a53bf034 100644
--- a/llvm/test/CodeGen/M68k/Arith/smul-with-overflow.ll
+++ b/llvm/test/CodeGen/M68k/Arith/smul-with-overflow.ll
@@ -69,7 +69,7 @@ define fastcc i1 @test1(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:  ; %bb.2: ; %overflow
 ; CHECK-NEXT:    lea (no,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #0, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
@@ -77,7 +77,7 @@ define fastcc i1 @test1(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:    move.l %d0, (4,%sp)
 ; CHECK-NEXT:    lea (ok,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #1, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
@@ -107,7 +107,7 @@ define fastcc i1 @test2(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:  ; %bb.1: ; %overflow
 ; CHECK-NEXT:    lea (no,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #0, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
@@ -115,7 +115,7 @@ define fastcc i1 @test2(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:    move.l %d0, (4,%sp)
 ; CHECK-NEXT:    lea (ok,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #1, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
diff --git a/llvm/test/CodeGen/M68k/Arith/sub-with-overflow.ll b/llvm/test/CodeGen/M68k/Arith/sub-with-overflow.ll
index 8d47c7ebf7e56f..7d558b0e54501e 100644
--- a/llvm/test/CodeGen/M68k/Arith/sub-with-overflow.ll
+++ b/llvm/test/CodeGen/M68k/Arith/sub-with-overflow.ll
@@ -18,7 +18,7 @@ define i1 @func1(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:  ; %bb.2: ; %overflow
 ; CHECK-NEXT:    lea (no,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #0, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
@@ -26,7 +26,7 @@ define i1 @func1(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:    move.l %d0, (4,%sp)
 ; CHECK-NEXT:    lea (ok,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #1, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
@@ -55,7 +55,7 @@ define i1 @func2(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:  ; %bb.2: ; %carry
 ; CHECK-NEXT:    lea (no,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #0, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
@@ -63,7 +63,7 @@ define i1 @func2(i32 %v1, i32 %v2) nounwind {
 ; CHECK-NEXT:    move.l %d0, (4,%sp)
 ; CHECK-NEXT:    lea (ok,%pc), %a0
 ; CHECK-NEXT:    move.l %a0, (%sp)
-; CHECK-NEXT:    jsr printf at PLT
+; CHECK-NEXT:    jsr printf
 ; CHECK-NEXT:    move.b #1, %d0
 ; CHECK-NEXT:    adda.l #12, %sp
 ; CHECK-NEXT:    rts
diff --git a/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll b/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll
index b018ea4af4aa76..42c0a333fa1be4 100644
--- a/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll
+++ b/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll
@@ -18,7 +18,7 @@ define i1 @cmpxchg_i8_monotonic_monotonic(i8 %cmp, i8 %new, ptr %mem) nounwind {
 ; NO-ATOMIC-NEXT:    and.l #255, %d0
 ; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (32,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_1 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_1
 ; NO-ATOMIC-NEXT:    sub.b %d2, %d0
 ; NO-ATOMIC-NEXT:    seq %d0
 ; NO-ATOMIC-NEXT:    movem.l (16,%sp), %d2 ; 8-byte Folded Reload
@@ -55,7 +55,7 @@ define i16 @cmpxchg_i16_release_monotonic(i16 %cmp, i16 %new, ptr %mem) nounwind
 ; NO-ATOMIC-NEXT:    and.l #65535, %d0
 ; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_2 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_2
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -78,7 +78,7 @@ define i32 @cmpxchg_i32_release_acquire(i32 %cmp, i32 %new, ptr %mem) nounwind {
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_4 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_4
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -107,7 +107,7 @@ define i64 @cmpxchg_i64_seqcst_seqcst(i64 %cmp, i64 %new, ptr %mem) nounwind {
 ; NO-ATOMIC-NEXT:    move.l (52,%sp), (12,%sp)
 ; NO-ATOMIC-NEXT:    move.l (48,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (56,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_compare_exchange_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_compare_exchange_8
 ; NO-ATOMIC-NEXT:    move.l (28,%sp), %d1
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), %d0
 ; NO-ATOMIC-NEXT:    adda.l #36, %sp
@@ -125,7 +125,7 @@ define i64 @cmpxchg_i64_seqcst_seqcst(i64 %cmp, i64 %new, ptr %mem) nounwind {
 ; ATOMIC-NEXT:    move.l (52,%sp), (12,%sp)
 ; ATOMIC-NEXT:    move.l (48,%sp), (8,%sp)
 ; ATOMIC-NEXT:    move.l (56,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_compare_exchange_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_compare_exchange_8
 ; ATOMIC-NEXT:    move.l (28,%sp), %d1
 ; ATOMIC-NEXT:    move.l (24,%sp), %d0
 ; ATOMIC-NEXT:    adda.l #36, %sp
diff --git a/llvm/test/CodeGen/M68k/Atomics/load-store.ll b/llvm/test/CodeGen/M68k/Atomics/load-store.ll
index b238172c2f12c0..23fdfad05cab5d 100644
--- a/llvm/test/CodeGen/M68k/Atomics/load-store.ll
+++ b/llvm/test/CodeGen/M68k/Atomics/load-store.ll
@@ -203,7 +203,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
 ; NO-ATOMIC-NEXT:    suba.l #12, %sp
 ; NO-ATOMIC-NEXT:    move.l #0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -212,7 +212,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
 ; ATOMIC-NEXT:    suba.l #12, %sp
 ; ATOMIC-NEXT:    move.l #0, (4,%sp)
 ; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_load_8
 ; ATOMIC-NEXT:    adda.l #12, %sp
 ; ATOMIC-NEXT:    rts
   %1 = load atomic i64, ptr %a unordered, align 8
@@ -225,7 +225,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
 ; NO-ATOMIC-NEXT:    suba.l #12, %sp
 ; NO-ATOMIC-NEXT:    move.l #0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -234,7 +234,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
 ; ATOMIC-NEXT:    suba.l #12, %sp
 ; ATOMIC-NEXT:    move.l #0, (4,%sp)
 ; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_load_8
 ; ATOMIC-NEXT:    adda.l #12, %sp
 ; ATOMIC-NEXT:    rts
   %1 = load atomic i64, ptr %a monotonic, align 8
@@ -247,7 +247,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
 ; NO-ATOMIC-NEXT:    suba.l #12, %sp
 ; NO-ATOMIC-NEXT:    move.l #2, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -256,7 +256,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
 ; ATOMIC-NEXT:    suba.l #12, %sp
 ; ATOMIC-NEXT:    move.l #2, (4,%sp)
 ; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_load_8
 ; ATOMIC-NEXT:    adda.l #12, %sp
 ; ATOMIC-NEXT:    rts
   %1 = load atomic i64, ptr %a acquire, align 8
@@ -269,7 +269,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
 ; NO-ATOMIC-NEXT:    suba.l #12, %sp
 ; NO-ATOMIC-NEXT:    move.l #5, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -278,7 +278,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
 ; ATOMIC-NEXT:    suba.l #12, %sp
 ; ATOMIC-NEXT:    move.l #5, (4,%sp)
 ; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_load_8
 ; ATOMIC-NEXT:    adda.l #12, %sp
 ; ATOMIC-NEXT:    rts
   %1 = load atomic i64, ptr %a seq_cst, align 8
@@ -509,7 +509,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %val) nounwind {
 ; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8
 ; NO-ATOMIC-NEXT:    adda.l #20, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -520,7 +520,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %val) nounwind {
 ; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_store_8
 ; ATOMIC-NEXT:    adda.l #20, %sp
 ; ATOMIC-NEXT:    rts
   store atomic i64 %val, ptr %a unordered, align 8
@@ -535,7 +535,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %val) nounwind {
 ; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8
 ; NO-ATOMIC-NEXT:    adda.l #20, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -546,7 +546,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %val) nounwind {
 ; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_store_8
 ; ATOMIC-NEXT:    adda.l #20, %sp
 ; ATOMIC-NEXT:    rts
   store atomic i64 %val, ptr %a monotonic, align 8
@@ -561,7 +561,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %val) nounwind {
 ; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8
 ; NO-ATOMIC-NEXT:    adda.l #20, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -572,7 +572,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %val) nounwind {
 ; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_store_8
 ; ATOMIC-NEXT:    adda.l #20, %sp
 ; ATOMIC-NEXT:    rts
   store atomic i64 %val, ptr %a release, align 8
@@ -587,7 +587,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %val) nounwind {
 ; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8
 ; NO-ATOMIC-NEXT:    adda.l #20, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -598,7 +598,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %val) nounwind {
 ; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
 ; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
 ; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_store_8
 ; ATOMIC-NEXT:    adda.l #20, %sp
 ; ATOMIC-NEXT:    rts
   store atomic i64 %val, ptr %a seq_cst, align 8
diff --git a/llvm/test/CodeGen/M68k/Atomics/rmw.ll b/llvm/test/CodeGen/M68k/Atomics/rmw.ll
index 1036a0a8ba3d25..ce456f0960eec1 100644
--- a/llvm/test/CodeGen/M68k/Atomics/rmw.ll
+++ b/llvm/test/CodeGen/M68k/Atomics/rmw.ll
@@ -15,7 +15,7 @@ define i8 @atomicrmw_add_i8(i8 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    and.l #255, %d0
 ; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_add_1 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_add_1
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -58,7 +58,7 @@ define i16 @atomicrmw_sub_i16(i16 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    and.l #65535, %d0
 ; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_sub_2 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_sub_2
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -99,7 +99,7 @@ define i32 @atomicrmw_and_i32(i32 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_and_4 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_and_4
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -142,7 +142,7 @@ define i64 @atomicrmw_xor_i64(i64 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    move.l (28,%sp), (8,%sp)
 ; NO-ATOMIC-NEXT:    move.l (24,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (32,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __atomic_fetch_xor_8 at PLT
+; NO-ATOMIC-NEXT:    jsr __atomic_fetch_xor_8
 ; NO-ATOMIC-NEXT:    adda.l #20, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -155,7 +155,7 @@ define i64 @atomicrmw_xor_i64(i64 %val, ptr %ptr) {
 ; ATOMIC-NEXT:    move.l (28,%sp), (8,%sp)
 ; ATOMIC-NEXT:    move.l (24,%sp), (4,%sp)
 ; ATOMIC-NEXT:    move.l (32,%sp), (%sp)
-; ATOMIC-NEXT:    jsr __atomic_fetch_xor_8 at PLT
+; ATOMIC-NEXT:    jsr __atomic_fetch_xor_8
 ; ATOMIC-NEXT:    adda.l #20, %sp
 ; ATOMIC-NEXT:    rts
   %old = atomicrmw xor ptr %ptr, i64 %val release
@@ -172,7 +172,7 @@ define i8 @atomicrmw_or_i8(i8 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    and.l #255, %d0
 ; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_or_1 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_or_1
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -217,7 +217,7 @@ define i16 @atmoicrmw_nand_i16(i16 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    and.l #65535, %d0
 ; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_nand_2 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_nand_2
 ; NO-ATOMIC-NEXT:    move.w %d2, %d0
 ; NO-ATOMIC-NEXT:    movem.l (8,%sp), %d2 ; 8-byte Folded Reload
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
@@ -261,7 +261,7 @@ define i32 @atomicrmw_min_i32(i32 %val, ptr %ptr) {
 ; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
 ; NO-ATOMIC-NEXT:    move.l (16,%sp), (4,%sp)
 ; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
-; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_min_4 at PLT
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_min_4
 ; NO-ATOMIC-NEXT:    adda.l #12, %sp
 ; NO-ATOMIC-NEXT:    rts
 ;
@@ -323,7 +323,7 @@ define i64 @atomicrmw_m...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/89863


More information about the llvm-commits mailing list