[llvm] Enable unaligned loads on x86 using cmpxchg (PR #142645)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 3 10:46:52 PDT 2025
https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/142645
>From 80f8e1e827693276eb25605978d4be92c5975486 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 30 May 2025 19:07:41 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)
---
llvm/test/CodeGen/X86/atomic-unordered.ll | 256 +++++++++++++++++++---
1 file changed, 228 insertions(+), 28 deletions(-)
diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index e8e0ee0b7ef49..3034a2bd5be93 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -61,6 +61,74 @@ define void @store_i16(ptr %ptr, i16 %v) {
ret void
}
+define i16 @load_i16_unaligned(ptr %ptr) {
+; CHECK-O0-LABEL: load_i16_unaligned:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: pushq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O0-NEXT: movq %rdi, %rsi
+; CHECK-O0-NEXT: movl $2, %edi
+; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O0-NEXT: xorl %ecx, %ecx
+; CHECK-O0-NEXT: callq __atomic_load at PLT
+; CHECK-O0-NEXT: movw {{[0-9]+}}(%rsp), %ax
+; CHECK-O0-NEXT: popq %rcx
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: retq
+;
+; CHECK-O3-LABEL: load_i16_unaligned:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: pushq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O3-NEXT: movq %rdi, %rsi
+; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O3-NEXT: movl $2, %edi
+; CHECK-O3-NEXT: xorl %ecx, %ecx
+; CHECK-O3-NEXT: callq __atomic_load at PLT
+; CHECK-O3-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
+; CHECK-O3-NEXT: popq %rcx
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: retq
+ %v = load atomic i16, ptr %ptr unordered, align 1
+ ret i16 %v
+}
+
+
+define void @store_i16_unaligned(ptr %ptr, i16 %v) {
+; CHECK-O0-LABEL: store_i16_unaligned:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: pushq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O0-NEXT: movl %esi, %eax
+; CHECK-O0-NEXT: movq %rdi, %rsi
+; CHECK-O0-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
+; CHECK-O0-NEXT: movl $2, %edi
+; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O0-NEXT: xorl %ecx, %ecx
+; CHECK-O0-NEXT: callq __atomic_store at PLT
+; CHECK-O0-NEXT: popq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: retq
+;
+; CHECK-O3-LABEL: store_i16_unaligned:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: pushq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O3-NEXT: movq %rdi, %rax
+; CHECK-O3-NEXT: movw %si, {{[0-9]+}}(%rsp)
+; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O3-NEXT: movl $2, %edi
+; CHECK-O3-NEXT: movq %rax, %rsi
+; CHECK-O3-NEXT: xorl %ecx, %ecx
+; CHECK-O3-NEXT: callq __atomic_store at PLT
+; CHECK-O3-NEXT: popq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: retq
+ store atomic i16 %v, ptr %ptr unordered, align 1
+ ret void
+}
+
define i32 @load_i32(ptr %ptr) {
; CHECK-LABEL: load_i32:
; CHECK: # %bb.0:
@@ -79,6 +147,72 @@ define void @store_i32(ptr %ptr, i32 %v) {
ret void
}
+define i32 @load_i32_unaligned(ptr %ptr) {
+; CHECK-O0-LABEL: load_i32_unaligned:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: pushq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O0-NEXT: movq %rdi, %rsi
+; CHECK-O0-NEXT: movl $4, %edi
+; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O0-NEXT: xorl %ecx, %ecx
+; CHECK-O0-NEXT: callq __atomic_load at PLT
+; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; CHECK-O0-NEXT: popq %rcx
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: retq
+;
+; CHECK-O3-LABEL: load_i32_unaligned:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: pushq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O3-NEXT: movq %rdi, %rsi
+; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O3-NEXT: movl $4, %edi
+; CHECK-O3-NEXT: xorl %ecx, %ecx
+; CHECK-O3-NEXT: callq __atomic_load at PLT
+; CHECK-O3-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; CHECK-O3-NEXT: popq %rcx
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: retq
+ %v = load atomic i32, ptr %ptr unordered, align 1
+ ret i32 %v
+}
+
+define void @store_i32_unaligned(ptr %ptr, i32 %v) {
+; CHECK-O0-LABEL: store_i32_unaligned:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: pushq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O0-NEXT: movl %esi, %eax
+; CHECK-O0-NEXT: movq %rdi, %rsi
+; CHECK-O0-NEXT: movl %eax, {{[0-9]+}}(%rsp)
+; CHECK-O0-NEXT: movl $4, %edi
+; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O0-NEXT: xorl %ecx, %ecx
+; CHECK-O0-NEXT: callq __atomic_store at PLT
+; CHECK-O0-NEXT: popq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: retq
+;
+; CHECK-O3-LABEL: store_i32_unaligned:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: pushq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O3-NEXT: movq %rdi, %rax
+; CHECK-O3-NEXT: movl %esi, {{[0-9]+}}(%rsp)
+; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; CHECK-O3-NEXT: movl $4, %edi
+; CHECK-O3-NEXT: movq %rax, %rsi
+; CHECK-O3-NEXT: xorl %ecx, %ecx
+; CHECK-O3-NEXT: callq __atomic_store at PLT
+; CHECK-O3-NEXT: popq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: retq
+ store atomic i32 %v, ptr %ptr unordered, align 1
+ ret void
+}
+
define i64 @load_i64(ptr %ptr) {
; CHECK-LABEL: load_i64:
; CHECK: # %bb.0:
@@ -97,6 +231,72 @@ define void @store_i64(ptr %ptr, i64 %v) {
ret void
}
+define i64 @load_i64_unaligned(ptr %ptr) {
+; CHECK-O0-LABEL: load_i64_unaligned:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: pushq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O0-NEXT: movq %rdi, %rsi
+; CHECK-O0-NEXT: movl $8, %edi
+; CHECK-O0-NEXT: movq %rsp, %rdx
+; CHECK-O0-NEXT: xorl %ecx, %ecx
+; CHECK-O0-NEXT: callq __atomic_load at PLT
+; CHECK-O0-NEXT: movq (%rsp), %rax
+; CHECK-O0-NEXT: popq %rcx
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: retq
+;
+; CHECK-O3-LABEL: load_i64_unaligned:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: pushq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O3-NEXT: movq %rdi, %rsi
+; CHECK-O3-NEXT: movq %rsp, %rdx
+; CHECK-O3-NEXT: movl $8, %edi
+; CHECK-O3-NEXT: xorl %ecx, %ecx
+; CHECK-O3-NEXT: callq __atomic_load at PLT
+; CHECK-O3-NEXT: movq (%rsp), %rax
+; CHECK-O3-NEXT: popq %rcx
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: retq
+ %v = load atomic i64, ptr %ptr unordered, align 1
+ ret i64 %v
+}
+
+define void @store_i64_unaligned(ptr %ptr, i64 %v) {
+; CHECK-O0-LABEL: store_i64_unaligned:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: pushq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O0-NEXT: movq %rsi, %rax
+; CHECK-O0-NEXT: movq %rdi, %rsi
+; CHECK-O0-NEXT: movq %rax, (%rsp)
+; CHECK-O0-NEXT: movl $8, %edi
+; CHECK-O0-NEXT: movq %rsp, %rdx
+; CHECK-O0-NEXT: xorl %ecx, %ecx
+; CHECK-O0-NEXT: callq __atomic_store at PLT
+; CHECK-O0-NEXT: popq %rax
+; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: retq
+;
+; CHECK-O3-LABEL: store_i64_unaligned:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: pushq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
+; CHECK-O3-NEXT: movq %rdi, %rax
+; CHECK-O3-NEXT: movq %rsi, (%rsp)
+; CHECK-O3-NEXT: movq %rsp, %rdx
+; CHECK-O3-NEXT: movl $8, %edi
+; CHECK-O3-NEXT: movq %rax, %rsi
+; CHECK-O3-NEXT: xorl %ecx, %ecx
+; CHECK-O3-NEXT: callq __atomic_store at PLT
+; CHECK-O3-NEXT: popq %rax
+; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: retq
+ store atomic i64 %v, ptr %ptr unordered, align 1
+ ret void
+}
+
;; The tests in the rest of this file are intended to show transforms which we
;; either *can't* do for legality, or don't currently implement. The later
;; are noted carefully where relevant.
@@ -633,12 +833,12 @@ define i64 @load_fold_sdiv2(ptr %p, i64 %v2) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB35_1
+; CHECK-O3-NEXT: je .LBB41_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: cqto
; CHECK-O3-NEXT: idivq %rsi
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB35_1:
+; CHECK-O3-NEXT: .LBB41_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -664,12 +864,12 @@ define i64 @load_fold_sdiv3(ptr %p1, ptr %p2) {
; CHECK-O3-NEXT: movq %rax, %rdx
; CHECK-O3-NEXT: orq %rcx, %rdx
; CHECK-O3-NEXT: shrq $32, %rdx
-; CHECK-O3-NEXT: je .LBB36_1
+; CHECK-O3-NEXT: je .LBB42_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: cqto
; CHECK-O3-NEXT: idivq %rcx
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB36_1:
+; CHECK-O3-NEXT: .LBB42_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %ecx
@@ -719,12 +919,12 @@ define i64 @load_fold_udiv2(ptr %p, i64 %v2) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB38_1
+; CHECK-O3-NEXT: je .LBB44_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divq %rsi
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB38_1:
+; CHECK-O3-NEXT: .LBB44_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -751,12 +951,12 @@ define i64 @load_fold_udiv3(ptr %p1, ptr %p2) {
; CHECK-O3-NEXT: movq %rax, %rdx
; CHECK-O3-NEXT: orq %rcx, %rdx
; CHECK-O3-NEXT: shrq $32, %rdx
-; CHECK-O3-NEXT: je .LBB39_1
+; CHECK-O3-NEXT: je .LBB45_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divq %rcx
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB39_1:
+; CHECK-O3-NEXT: .LBB45_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %ecx
@@ -816,13 +1016,13 @@ define i64 @load_fold_srem2(ptr %p, i64 %v2) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB41_1
+; CHECK-O3-NEXT: je .LBB47_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: cqto
; CHECK-O3-NEXT: idivq %rsi
; CHECK-O3-NEXT: movq %rdx, %rax
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB41_1:
+; CHECK-O3-NEXT: .LBB47_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -849,13 +1049,13 @@ define i64 @load_fold_srem3(ptr %p1, ptr %p2) {
; CHECK-O3-NEXT: movq %rax, %rdx
; CHECK-O3-NEXT: orq %rcx, %rdx
; CHECK-O3-NEXT: shrq $32, %rdx
-; CHECK-O3-NEXT: je .LBB42_1
+; CHECK-O3-NEXT: je .LBB48_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: cqto
; CHECK-O3-NEXT: idivq %rcx
; CHECK-O3-NEXT: movq %rdx, %rax
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB42_1:
+; CHECK-O3-NEXT: .LBB48_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %ecx
@@ -912,13 +1112,13 @@ define i64 @load_fold_urem2(ptr %p, i64 %v2) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB44_1
+; CHECK-O3-NEXT: je .LBB50_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divq %rsi
; CHECK-O3-NEXT: movq %rdx, %rax
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB44_1:
+; CHECK-O3-NEXT: .LBB50_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -946,13 +1146,13 @@ define i64 @load_fold_urem3(ptr %p1, ptr %p2) {
; CHECK-O3-NEXT: movq %rax, %rdx
; CHECK-O3-NEXT: orq %rcx, %rdx
; CHECK-O3-NEXT: shrq $32, %rdx
-; CHECK-O3-NEXT: je .LBB45_1
+; CHECK-O3-NEXT: je .LBB51_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divq %rcx
; CHECK-O3-NEXT: movq %rdx, %rax
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB45_1:
+; CHECK-O3-NEXT: .LBB51_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %ecx
@@ -1469,13 +1669,13 @@ define void @rmw_fold_sdiv2(ptr %p, i64 %v) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB74_1
+; CHECK-O3-NEXT: je .LBB80_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: cqto
; CHECK-O3-NEXT: idivq %rsi
; CHECK-O3-NEXT: movq %rax, (%rdi)
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB74_1:
+; CHECK-O3-NEXT: .LBB80_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -1521,13 +1721,13 @@ define void @rmw_fold_udiv2(ptr %p, i64 %v) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB76_1
+; CHECK-O3-NEXT: je .LBB82_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divq %rsi
; CHECK-O3-NEXT: movq %rax, (%rdi)
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB76_1:
+; CHECK-O3-NEXT: .LBB82_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -1599,13 +1799,13 @@ define void @rmw_fold_srem2(ptr %p, i64 %v) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB78_1
+; CHECK-O3-NEXT: je .LBB84_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: cqto
; CHECK-O3-NEXT: idivq %rsi
; CHECK-O3-NEXT: movq %rdx, (%rdi)
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB78_1:
+; CHECK-O3-NEXT: .LBB84_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -1667,13 +1867,13 @@ define void @rmw_fold_urem2(ptr %p, i64 %v) {
; CHECK-O3-NEXT: movq %rax, %rcx
; CHECK-O3-NEXT: orq %rsi, %rcx
; CHECK-O3-NEXT: shrq $32, %rcx
-; CHECK-O3-NEXT: je .LBB80_1
+; CHECK-O3-NEXT: je .LBB86_1
; CHECK-O3-NEXT: # %bb.2:
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divq %rsi
; CHECK-O3-NEXT: movq %rdx, (%rdi)
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB80_1:
+; CHECK-O3-NEXT: .LBB86_1:
; CHECK-O3-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-O3-NEXT: xorl %edx, %edx
; CHECK-O3-NEXT: divl %esi
@@ -2323,11 +2523,11 @@ define i1 @fold_cmp_over_fence(ptr %p, i32 %v1) {
; CHECK-O0-NEXT: movl (%rdi), %eax
; CHECK-O0-NEXT: lock orl $0, -{{[0-9]+}}(%rsp)
; CHECK-O0-NEXT: cmpl %eax, %esi
-; CHECK-O0-NEXT: jne .LBB116_2
+; CHECK-O0-NEXT: jne .LBB122_2
; CHECK-O0-NEXT: # %bb.1: # %taken
; CHECK-O0-NEXT: movb $1, %al
; CHECK-O0-NEXT: retq
-; CHECK-O0-NEXT: .LBB116_2: # %untaken
+; CHECK-O0-NEXT: .LBB122_2: # %untaken
; CHECK-O0-NEXT: xorl %eax, %eax
; CHECK-O0-NEXT: # kill: def $al killed $al killed $eax
; CHECK-O0-NEXT: retq
@@ -2337,11 +2537,11 @@ define i1 @fold_cmp_over_fence(ptr %p, i32 %v1) {
; CHECK-O3-NEXT: movl (%rdi), %eax
; CHECK-O3-NEXT: lock orl $0, -{{[0-9]+}}(%rsp)
; CHECK-O3-NEXT: cmpl %eax, %esi
-; CHECK-O3-NEXT: jne .LBB116_2
+; CHECK-O3-NEXT: jne .LBB122_2
; CHECK-O3-NEXT: # %bb.1: # %taken
; CHECK-O3-NEXT: movb $1, %al
; CHECK-O3-NEXT: retq
-; CHECK-O3-NEXT: .LBB116_2: # %untaken
+; CHECK-O3-NEXT: .LBB122_2: # %untaken
; CHECK-O3-NEXT: xorl %eax, %eax
; CHECK-O3-NEXT: retq
%v2 = load atomic i32, ptr %p unordered, align 4
>From 7775d9a7e7c29bba443ae6de01ee55355ca68b6c Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 30 May 2025 23:36:47 -0400
Subject: [PATCH 2/2] Enable unaligned loads on x86 using cmpxchg
We can do this by using cmpxchg. It is really the only way, though the big concern is that x86 cpus can choose to do a cpu exception here, or handle it. So I am unsure of how to deal with that.
---
llvm/lib/CodeGen/AtomicExpandPass.cpp | 27 +++-
llvm/lib/Target/X86/X86ISelLowering.cpp | 3 +
llvm/test/CodeGen/X86/atomic-unaligned.ll | 91 ++++++++++--
llvm/test/CodeGen/X86/atomic-unordered.ll | 167 ++++------------------
4 files changed, 140 insertions(+), 148 deletions(-)
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index c376de877ac7d..6212dbffe1860 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -232,8 +232,22 @@ template <typename Inst>
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
unsigned Size = getAtomicOpSize(I);
Align Alignment = I->getAlign();
- return Alignment >= Size &&
- Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
+
+ // X86 we can do unaligned loads
+ return Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8 &&
+ (Alignment >= Size || TLI->supportsUnalignedAtomics());
+}
+
+template <typename Inst>
+static bool canLowerAtomicAsUnaligned(const TargetLowering *TLI, Inst *I) {
+ if (!TLI->supportsUnalignedAtomics())
+ return false;
+ unsigned Size = getAtomicOpSize(I);
+ Align Alignment = I->getAlign();
+
+ // X86 we can do unaligned loads
+ return Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8 &&
+ (Alignment < Size);
}
bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
@@ -510,6 +524,10 @@ AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
}
bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
+
+ if (canLowerAtomicAsUnaligned(TLI, LI))
+ return expandAtomicLoadToCmpXchg(LI);
+
switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
@@ -532,6 +550,11 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
}
bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
+ if (canLowerAtomicAsUnaligned(TLI, SI)) {
+ expandAtomicStore(SI);
+ return true;
+ }
+
switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2399936ffd827..9c19cd4240f72 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -137,6 +137,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// Set up the TargetLowering object.
+ // X86 supports unaligned atomic memory accesses via cmpxchg8b and cmpxchg16b
+ setSupportsUnalignedAtomics(true);
+
// X86 is weird. It always uses i8 for shift amounts and setcc results.
setBooleanContents(ZeroOrOneBooleanContent);
// X86-SSE is even stranger. It uses -1 or 0 for vector masks.
diff --git a/llvm/test/CodeGen/X86/atomic-unaligned.ll b/llvm/test/CodeGen/X86/atomic-unaligned.ll
index f02041cc5fc8f..3931746af8ff0 100644
--- a/llvm/test/CodeGen/X86/atomic-unaligned.ll
+++ b/llvm/test/CodeGen/X86/atomic-unaligned.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=x86_64 < %s | FileCheck %s
; Quick test to ensure that atomics which are not naturally-aligned
@@ -5,11 +6,19 @@
; sized libcalls.
define void @test_i32(ptr %a) nounwind {
; CHECK-LABEL: test_i32:
-; CHECK: callq __atomic_load
-; CHECK: callq __atomic_store
-; CHECK: callq __atomic_exchange
-; CHECK: callq __atomic_compare_exchange
-; CHECK: callq __atomic_compare_exchange
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: movl $1, %ecx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: xchgl %eax, (%rdi)
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: xchgl %eax, (%rdi)
+; CHECK-NEXT: lock addl $2, (%rdi)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: retq
%t0 = load atomic i32, ptr %a seq_cst, align 2
store atomic i32 1, ptr %a seq_cst, align 2
%t1 = atomicrmw xchg ptr %a, i32 1 seq_cst, align 2
@@ -20,10 +29,74 @@ define void @test_i32(ptr %a) nounwind {
define void @test_i128(ptr %a) nounwind {
; CHECK-LABEL: test_i128:
-; CHECK: callq __atomic_load
-; CHECK: callq __atomic_store
-; CHECK: callq __atomic_exchange
-; CHECK: callq __atomic_compare_exchange
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $32, %rsp
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movq %rsp, %r14
+; CHECK-NEXT: movl $16, %edi
+; CHECK-NEXT: movq %rbx, %rsi
+; CHECK-NEXT: movq %r14, %rdx
+; CHECK-NEXT: movl $5, %ecx
+; CHECK-NEXT: callq __atomic_load at PLT
+; CHECK-NEXT: movq $0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq $1, (%rsp)
+; CHECK-NEXT: movq %rsp, %rdx
+; CHECK-NEXT: movl $16, %edi
+; CHECK-NEXT: movq %rbx, %rsi
+; CHECK-NEXT: movl $5, %ecx
+; CHECK-NEXT: callq __atomic_store at PLT
+; CHECK-NEXT: movq $0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq $1, (%rsp)
+; CHECK-NEXT: movq %rsp, %rdx
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r15
+; CHECK-NEXT: movl $16, %edi
+; CHECK-NEXT: movq %rbx, %rsi
+; CHECK-NEXT: movq %r15, %rcx
+; CHECK-NEXT: movl $5, %r8d
+; CHECK-NEXT: callq __atomic_exchange at PLT
+; CHECK-NEXT: movq (%rbx), %rdx
+; CHECK-NEXT: movq 8(%rbx), %rcx
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rdx, %rax
+; CHECK-NEXT: addq $2, %rax
+; CHECK-NEXT: movq %rdx, (%rsp)
+; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: adcq $0, %rcx
+; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $16, %edi
+; CHECK-NEXT: movq %rbx, %rsi
+; CHECK-NEXT: movq %r14, %rdx
+; CHECK-NEXT: movq %r15, %rcx
+; CHECK-NEXT: movl $5, %r8d
+; CHECK-NEXT: movl $5, %r9d
+; CHECK-NEXT: callq __atomic_compare_exchange at PLT
+; CHECK-NEXT: movq (%rsp), %rdx
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: je .LBB1_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: movaps %xmm0, (%rsp)
+; CHECK-NEXT: movq $0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq $1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq %rsp, %rdx
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: movl $16, %edi
+; CHECK-NEXT: movq %rbx, %rsi
+; CHECK-NEXT: movl $5, %r8d
+; CHECK-NEXT: movl $5, %r9d
+; CHECK-NEXT: callq __atomic_compare_exchange at PLT
+; CHECK-NEXT: addq $32, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: retq
%t0 = load atomic i128, ptr %a seq_cst, align 8
store atomic i128 1, ptr %a seq_cst, align 8
%t1 = atomicrmw xchg ptr %a, i128 1 seq_cst, align 8
diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index 3034a2bd5be93..a3d706474b588 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -64,30 +64,18 @@ define void @store_i16(ptr %ptr, i16 %v) {
define i16 @load_i16_unaligned(ptr %ptr) {
; CHECK-O0-LABEL: load_i16_unaligned:
; CHECK-O0: # %bb.0:
-; CHECK-O0-NEXT: pushq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O0-NEXT: movq %rdi, %rsi
-; CHECK-O0-NEXT: movl $2, %edi
-; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O0-NEXT: xorl %ecx, %ecx
-; CHECK-O0-NEXT: callq __atomic_load at PLT
-; CHECK-O0-NEXT: movw {{[0-9]+}}(%rsp), %ax
-; CHECK-O0-NEXT: popq %rcx
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: xorl %eax, %eax
+; CHECK-O0-NEXT: movw %ax, %cx
+; CHECK-O0-NEXT: movw %cx, %ax
+; CHECK-O0-NEXT: lock cmpxchgw %cx, (%rdi)
+; CHECK-O0-NEXT: sete %cl
; CHECK-O0-NEXT: retq
;
; CHECK-O3-LABEL: load_i16_unaligned:
; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: pushq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O3-NEXT: movq %rdi, %rsi
-; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O3-NEXT: movl $2, %edi
; CHECK-O3-NEXT: xorl %ecx, %ecx
-; CHECK-O3-NEXT: callq __atomic_load at PLT
-; CHECK-O3-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
-; CHECK-O3-NEXT: popq %rcx
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: xorl %eax, %eax
+; CHECK-O3-NEXT: lock cmpxchgw %cx, (%rdi)
; CHECK-O3-NEXT: retq
%v = load atomic i16, ptr %ptr unordered, align 1
ret i16 %v
@@ -97,33 +85,13 @@ define i16 @load_i16_unaligned(ptr %ptr) {
define void @store_i16_unaligned(ptr %ptr, i16 %v) {
; CHECK-O0-LABEL: store_i16_unaligned:
; CHECK-O0: # %bb.0:
-; CHECK-O0-NEXT: pushq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O0-NEXT: movl %esi, %eax
-; CHECK-O0-NEXT: movq %rdi, %rsi
-; CHECK-O0-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-O0-NEXT: movw %ax, {{[0-9]+}}(%rsp)
-; CHECK-O0-NEXT: movl $2, %edi
-; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O0-NEXT: xorl %ecx, %ecx
-; CHECK-O0-NEXT: callq __atomic_store at PLT
-; CHECK-O0-NEXT: popq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: movw %si, %ax
+; CHECK-O0-NEXT: xchgw %ax, (%rdi)
; CHECK-O0-NEXT: retq
;
; CHECK-O3-LABEL: store_i16_unaligned:
; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: pushq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O3-NEXT: movq %rdi, %rax
-; CHECK-O3-NEXT: movw %si, {{[0-9]+}}(%rsp)
-; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O3-NEXT: movl $2, %edi
-; CHECK-O3-NEXT: movq %rax, %rsi
-; CHECK-O3-NEXT: xorl %ecx, %ecx
-; CHECK-O3-NEXT: callq __atomic_store at PLT
-; CHECK-O3-NEXT: popq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: xchgw %si, (%rdi)
; CHECK-O3-NEXT: retq
store atomic i16 %v, ptr %ptr unordered, align 1
ret void
@@ -150,65 +118,27 @@ define void @store_i32(ptr %ptr, i32 %v) {
define i32 @load_i32_unaligned(ptr %ptr) {
; CHECK-O0-LABEL: load_i32_unaligned:
; CHECK-O0: # %bb.0:
-; CHECK-O0-NEXT: pushq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O0-NEXT: movq %rdi, %rsi
-; CHECK-O0-NEXT: movl $4, %edi
-; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
; CHECK-O0-NEXT: xorl %ecx, %ecx
-; CHECK-O0-NEXT: callq __atomic_load at PLT
-; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %eax
-; CHECK-O0-NEXT: popq %rcx
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: movl %ecx, %eax
+; CHECK-O0-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-O0-NEXT: sete %cl
; CHECK-O0-NEXT: retq
;
; CHECK-O3-LABEL: load_i32_unaligned:
; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: pushq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O3-NEXT: movq %rdi, %rsi
-; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O3-NEXT: movl $4, %edi
; CHECK-O3-NEXT: xorl %ecx, %ecx
-; CHECK-O3-NEXT: callq __atomic_load at PLT
-; CHECK-O3-NEXT: movl {{[0-9]+}}(%rsp), %eax
-; CHECK-O3-NEXT: popq %rcx
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: xorl %eax, %eax
+; CHECK-O3-NEXT: lock cmpxchgl %ecx, (%rdi)
; CHECK-O3-NEXT: retq
%v = load atomic i32, ptr %ptr unordered, align 1
ret i32 %v
}
define void @store_i32_unaligned(ptr %ptr, i32 %v) {
-; CHECK-O0-LABEL: store_i32_unaligned:
-; CHECK-O0: # %bb.0:
-; CHECK-O0-NEXT: pushq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O0-NEXT: movl %esi, %eax
-; CHECK-O0-NEXT: movq %rdi, %rsi
-; CHECK-O0-NEXT: movl %eax, {{[0-9]+}}(%rsp)
-; CHECK-O0-NEXT: movl $4, %edi
-; CHECK-O0-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O0-NEXT: xorl %ecx, %ecx
-; CHECK-O0-NEXT: callq __atomic_store at PLT
-; CHECK-O0-NEXT: popq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
-; CHECK-O0-NEXT: retq
-;
-; CHECK-O3-LABEL: store_i32_unaligned:
-; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: pushq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O3-NEXT: movq %rdi, %rax
-; CHECK-O3-NEXT: movl %esi, {{[0-9]+}}(%rsp)
-; CHECK-O3-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O3-NEXT: movl $4, %edi
-; CHECK-O3-NEXT: movq %rax, %rsi
-; CHECK-O3-NEXT: xorl %ecx, %ecx
-; CHECK-O3-NEXT: callq __atomic_store at PLT
-; CHECK-O3-NEXT: popq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
-; CHECK-O3-NEXT: retq
+; CHECK-LABEL: store_i32_unaligned:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xchgl %esi, (%rdi)
+; CHECK-NEXT: retq
store atomic i32 %v, ptr %ptr unordered, align 1
ret void
}
@@ -234,65 +164,28 @@ define void @store_i64(ptr %ptr, i64 %v) {
define i64 @load_i64_unaligned(ptr %ptr) {
; CHECK-O0-LABEL: load_i64_unaligned:
; CHECK-O0: # %bb.0:
-; CHECK-O0-NEXT: pushq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O0-NEXT: movq %rdi, %rsi
-; CHECK-O0-NEXT: movl $8, %edi
-; CHECK-O0-NEXT: movq %rsp, %rdx
-; CHECK-O0-NEXT: xorl %ecx, %ecx
-; CHECK-O0-NEXT: callq __atomic_load at PLT
-; CHECK-O0-NEXT: movq (%rsp), %rax
-; CHECK-O0-NEXT: popq %rcx
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O0-NEXT: xorl %eax, %eax
+; CHECK-O0-NEXT: movl %eax, %ecx
+; CHECK-O0-NEXT: movq %rcx, %rax
+; CHECK-O0-NEXT: lock cmpxchgq %rcx, (%rdi)
+; CHECK-O0-NEXT: sete %cl
; CHECK-O0-NEXT: retq
;
; CHECK-O3-LABEL: load_i64_unaligned:
; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: pushq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O3-NEXT: movq %rdi, %rsi
-; CHECK-O3-NEXT: movq %rsp, %rdx
-; CHECK-O3-NEXT: movl $8, %edi
; CHECK-O3-NEXT: xorl %ecx, %ecx
-; CHECK-O3-NEXT: callq __atomic_load at PLT
-; CHECK-O3-NEXT: movq (%rsp), %rax
-; CHECK-O3-NEXT: popq %rcx
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
+; CHECK-O3-NEXT: xorl %eax, %eax
+; CHECK-O3-NEXT: lock cmpxchgq %rcx, (%rdi)
; CHECK-O3-NEXT: retq
%v = load atomic i64, ptr %ptr unordered, align 1
ret i64 %v
}
define void @store_i64_unaligned(ptr %ptr, i64 %v) {
-; CHECK-O0-LABEL: store_i64_unaligned:
-; CHECK-O0: # %bb.0:
-; CHECK-O0-NEXT: pushq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O0-NEXT: movq %rsi, %rax
-; CHECK-O0-NEXT: movq %rdi, %rsi
-; CHECK-O0-NEXT: movq %rax, (%rsp)
-; CHECK-O0-NEXT: movl $8, %edi
-; CHECK-O0-NEXT: movq %rsp, %rdx
-; CHECK-O0-NEXT: xorl %ecx, %ecx
-; CHECK-O0-NEXT: callq __atomic_store at PLT
-; CHECK-O0-NEXT: popq %rax
-; CHECK-O0-NEXT: .cfi_def_cfa_offset 8
-; CHECK-O0-NEXT: retq
-;
-; CHECK-O3-LABEL: store_i64_unaligned:
-; CHECK-O3: # %bb.0:
-; CHECK-O3-NEXT: pushq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 16
-; CHECK-O3-NEXT: movq %rdi, %rax
-; CHECK-O3-NEXT: movq %rsi, (%rsp)
-; CHECK-O3-NEXT: movq %rsp, %rdx
-; CHECK-O3-NEXT: movl $8, %edi
-; CHECK-O3-NEXT: movq %rax, %rsi
-; CHECK-O3-NEXT: xorl %ecx, %ecx
-; CHECK-O3-NEXT: callq __atomic_store at PLT
-; CHECK-O3-NEXT: popq %rax
-; CHECK-O3-NEXT: .cfi_def_cfa_offset 8
-; CHECK-O3-NEXT: retq
+; CHECK-LABEL: store_i64_unaligned:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xchgq %rsi, (%rdi)
+; CHECK-NEXT: retq
store atomic i64 %v, ptr %ptr unordered, align 1
ret void
}
More information about the llvm-commits
mailing list