[llvm-branch-commits] [llvm] [AtomicExpand] Add bitcasts when expanding load atomic vector (PR #148900)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Oct 31 09:05:34 PDT 2025
https://github.com/jofrn updated https://github.com/llvm/llvm-project/pull/148900
>From 0210ff75c7ae8e758b6ee04dc49e6c84f225df0a Mon Sep 17 00:00:00 2001
From: jofrn <jofernau at amd.com>
Date: Tue, 15 Jul 2025 13:03:15 -0400
Subject: [PATCH] [AtomicExpand] Add bitcasts when expanding load atomic vector
AtomicExpand fails for aligned `load atomic <n x T>` because it
does not find a compatible library call. This change adds appropriate
bitcasts so that the call can be lowered. It also adds support for
128 bit lowering in tablegen to support SSE/AVX.
---
.../include/llvm/Target/TargetSelectionDAG.td | 14 +++
llvm/lib/CodeGen/AtomicExpandPass.cpp | 22 ++++-
llvm/lib/Target/X86/X86InstrCompiler.td | 5 +
llvm/test/CodeGen/ARM/atomic-load-store.ll | 51 +++++++++++
llvm/test/CodeGen/X86/atomic-load-store.ll | 91 ++++++++++++++++++-
.../X86/expand-atomic-non-integer.ll | 66 ++++++++++++++
6 files changed, 244 insertions(+), 5 deletions(-)
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 07a858fd682fc..239fee8a3022d 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -1949,6 +1949,20 @@ def atomic_load_64 :
let MemoryVT = i64;
}
+def atomic_load_128_v2i64 :
+ PatFrag<(ops node:$ptr),
+ (atomic_load node:$ptr)> {
+ let IsAtomic = true;
+ let MemoryVT = v2i64;
+}
+
+def atomic_load_128_v4i32 :
+ PatFrag<(ops node:$ptr),
+ (atomic_load node:$ptr)> {
+ let IsAtomic = true;
+ let MemoryVT = v4i32;
+}
+
def atomic_load_nonext_8 :
PatFrag<(ops node:$ptr), (atomic_load_nonext node:$ptr)> {
let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 53f1cfe24a68d..45cdc7980fdc6 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -483,7 +483,12 @@ LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
- Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
+ Value *NewVal =
+ LI->getType()->isPointerTy() ||
+ (LI->getType()->isVectorTy() &&
+ cast<VectorType>(LI->getType())->getElementType()->isPointerTy())
+ ? Builder.CreateIntToPtr(NewLI, LI->getType())
+ : Builder.CreateBitCast(NewLI, LI->getType());
LI->replaceAllUsesWith(NewVal);
LI->eraseFromParent();
return NewLI;
@@ -2093,9 +2098,18 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
I->replaceAllUsesWith(V);
} else if (HasResult) {
Value *V;
- if (UseSizedLibcall)
- V = Builder.CreateBitOrPointerCast(Result, I->getType());
- else {
+ if (UseSizedLibcall) {
+ // Add bitcasts from Result's scalar type to I's <n x ptr> vector type
+ auto *PtrTy = dyn_cast<PointerType>(I->getType()->getScalarType());
+ auto *VTy = dyn_cast<VectorType>(I->getType());
+ if (VTy && PtrTy && !Result->getType()->isVectorTy()) {
+ unsigned AS = PtrTy->getAddressSpace();
+ Value *BC = Builder.CreateBitCast(
+ Result, VTy->getWithNewType(DL.getIntPtrType(Ctx, AS)));
+ V = Builder.CreateIntToPtr(BC, I->getType());
+ } else
+ V = Builder.CreateBitOrPointerCast(Result, I->getType());
+ } else {
V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
AllocaAlignment);
Builder.CreateLifetimeEnd(AllocaResult);
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index ce429b5916280..91dbf179a01b6 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1220,6 +1220,11 @@ def : Pat<(v2i64 (scalar_to_vector (i64 (atomic_load_64 addr:$src)))),
def : Pat<(v2i64 (scalar_to_vector (i64 (atomic_load_64 addr:$src)))),
(VMOV64toPQIZrm addr:$src)>, Requires<[HasAVX512]>;
+def : Pat<(v2i64 (atomic_load_128_v2i64 addr:$src)),
+ (VMOVAPDrm addr:$src)>; // load atomic <2 x i64>
+def : Pat<(v4i32 (atomic_load_128_v4i32 addr:$src)),
+ (VMOVAPDrm addr:$src)>; // load atomic <4 x i32>
+
// Floating point loads/stores.
def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
(MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
diff --git a/llvm/test/CodeGen/ARM/atomic-load-store.ll b/llvm/test/CodeGen/ARM/atomic-load-store.ll
index 560dfde356c29..eaa2ffd9b2731 100644
--- a/llvm/test/CodeGen/ARM/atomic-load-store.ll
+++ b/llvm/test/CodeGen/ARM/atomic-load-store.ll
@@ -983,3 +983,54 @@ define void @store_atomic_f64__seq_cst(ptr %ptr, double %val1) {
store atomic double %val1, ptr %ptr seq_cst, align 8
ret void
}
+
+define <1 x ptr> @atomic_vec1_ptr(ptr %x) #0 {
+; ARM-LABEL: atomic_vec1_ptr:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldr r0, [r0]
+; ARM-NEXT: dmb ish
+; ARM-NEXT: bx lr
+;
+; ARMOPTNONE-LABEL: atomic_vec1_ptr:
+; ARMOPTNONE: @ %bb.0:
+; ARMOPTNONE-NEXT: ldr r0, [r0]
+; ARMOPTNONE-NEXT: dmb ish
+; ARMOPTNONE-NEXT: bx lr
+;
+; THUMBTWO-LABEL: atomic_vec1_ptr:
+; THUMBTWO: @ %bb.0:
+; THUMBTWO-NEXT: ldr r0, [r0]
+; THUMBTWO-NEXT: dmb ish
+; THUMBTWO-NEXT: bx lr
+;
+; THUMBONE-LABEL: atomic_vec1_ptr:
+; THUMBONE: @ %bb.0:
+; THUMBONE-NEXT: push {r7, lr}
+; THUMBONE-NEXT: movs r1, #0
+; THUMBONE-NEXT: mov r2, r1
+; THUMBONE-NEXT: bl __sync_val_compare_and_swap_4
+; THUMBONE-NEXT: pop {r7, pc}
+;
+; ARMV4-LABEL: atomic_vec1_ptr:
+; ARMV4: @ %bb.0:
+; ARMV4-NEXT: push {r11, lr}
+; ARMV4-NEXT: mov r1, #2
+; ARMV4-NEXT: bl __atomic_load_4
+; ARMV4-NEXT: pop {r11, lr}
+; ARMV4-NEXT: mov pc, lr
+;
+; ARMV6-LABEL: atomic_vec1_ptr:
+; ARMV6: @ %bb.0:
+; ARMV6-NEXT: ldr r0, [r0]
+; ARMV6-NEXT: mov r1, #0
+; ARMV6-NEXT: mcr p15, #0, r1, c7, c10, #5
+; ARMV6-NEXT: bx lr
+;
+; THUMBM-LABEL: atomic_vec1_ptr:
+; THUMBM: @ %bb.0:
+; THUMBM-NEXT: ldr r0, [r0]
+; THUMBM-NEXT: dmb sy
+; THUMBM-NEXT: bx lr
+ %ret = load atomic <1 x ptr>, ptr %x acquire, align 4
+ ret <1 x ptr> %ret
+}
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 00310f6d1f219..3d49bb82d572e 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -244,6 +244,96 @@ define <2 x ptr addrspace(270)> @atomic_vec2_ptr270(ptr %x) {
%ret = load atomic <2 x ptr addrspace(270)>, ptr %x acquire, align 8
ret <2 x ptr addrspace(270)> %ret
}
+define <2 x ptr> @atomic_vec2_ptr_align(ptr %x) nounwind {
+; CHECK-SSE2-O3-LABEL: atomic_vec2_ptr_align:
+; CHECK-SSE2-O3: # %bb.0:
+; CHECK-SSE2-O3-NEXT: pushq %rax
+; CHECK-SSE2-O3-NEXT: movl $2, %esi
+; CHECK-SSE2-O3-NEXT: callq __atomic_load_16 at PLT
+; CHECK-SSE2-O3-NEXT: movq %rdx, %xmm1
+; CHECK-SSE2-O3-NEXT: movq %rax, %xmm0
+; CHECK-SSE2-O3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-O3-NEXT: popq %rax
+; CHECK-SSE2-O3-NEXT: retq
+;
+; CHECK-SSE4-O3-LABEL: atomic_vec2_ptr_align:
+; CHECK-SSE4-O3: # %bb.0:
+; CHECK-SSE4-O3-NEXT: vmovaps (%rdi), %xmm0
+; CHECK-SSE4-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec2_ptr_align:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: vmovaps (%rdi), %xmm0
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-SSE2-O0-LABEL: atomic_vec2_ptr_align:
+; CHECK-SSE2-O0: # %bb.0:
+; CHECK-SSE2-O0-NEXT: pushq %rax
+; CHECK-SSE2-O0-NEXT: movl $2, %esi
+; CHECK-SSE2-O0-NEXT: callq __atomic_load_16 at PLT
+; CHECK-SSE2-O0-NEXT: movq %rdx, %xmm1
+; CHECK-SSE2-O0-NEXT: movq %rax, %xmm0
+; CHECK-SSE2-O0-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-O0-NEXT: popq %rax
+; CHECK-SSE2-O0-NEXT: retq
+;
+; CHECK-SSE4-O0-LABEL: atomic_vec2_ptr_align:
+; CHECK-SSE4-O0: # %bb.0:
+; CHECK-SSE4-O0-NEXT: vmovapd (%rdi), %xmm0
+; CHECK-SSE4-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec2_ptr_align:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: vmovapd (%rdi), %xmm0
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <2 x ptr>, ptr %x acquire, align 16
+ ret <2 x ptr> %ret
+}
+define <4 x ptr addrspace(270)> @atomic_vec4_ptr270(ptr %x) nounwind {
+; CHECK-SSE2-O3-LABEL: atomic_vec4_ptr270:
+; CHECK-SSE2-O3: # %bb.0:
+; CHECK-SSE2-O3-NEXT: pushq %rax
+; CHECK-SSE2-O3-NEXT: movl $2, %esi
+; CHECK-SSE2-O3-NEXT: callq __atomic_load_16 at PLT
+; CHECK-SSE2-O3-NEXT: movq %rdx, %xmm1
+; CHECK-SSE2-O3-NEXT: movq %rax, %xmm0
+; CHECK-SSE2-O3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-O3-NEXT: popq %rax
+; CHECK-SSE2-O3-NEXT: retq
+;
+; CHECK-SSE4-O3-LABEL: atomic_vec4_ptr270:
+; CHECK-SSE4-O3: # %bb.0:
+; CHECK-SSE4-O3-NEXT: vmovaps (%rdi), %xmm0
+; CHECK-SSE4-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec4_ptr270:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: vmovaps (%rdi), %xmm0
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-SSE2-O0-LABEL: atomic_vec4_ptr270:
+; CHECK-SSE2-O0: # %bb.0:
+; CHECK-SSE2-O0-NEXT: pushq %rax
+; CHECK-SSE2-O0-NEXT: movl $2, %esi
+; CHECK-SSE2-O0-NEXT: callq __atomic_load_16 at PLT
+; CHECK-SSE2-O0-NEXT: movq %rdx, %xmm1
+; CHECK-SSE2-O0-NEXT: movq %rax, %xmm0
+; CHECK-SSE2-O0-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-O0-NEXT: popq %rax
+; CHECK-SSE2-O0-NEXT: retq
+;
+; CHECK-SSE4-O0-LABEL: atomic_vec4_ptr270:
+; CHECK-SSE4-O0: # %bb.0:
+; CHECK-SSE4-O0-NEXT: vmovapd (%rdi), %xmm0
+; CHECK-SSE4-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec4_ptr270:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: vmovapd (%rdi), %xmm0
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <4 x ptr addrspace(270)>, ptr %x acquire, align 16
+ ret <4 x ptr addrspace(270)> %ret
+}
define <2 x i32> @atomic_vec2_i32_align(ptr %x) {
; CHECK-SSE-O3-LABEL: atomic_vec2_i32_align:
@@ -727,7 +817,6 @@ define <4 x float> @atomic_vec4_float(ptr %x) nounwind {
}
define <4 x float> @atomic_vec4_float_align(ptr %x) nounwind {
-;
; CHECK-SSE2-O3-LABEL: atomic_vec4_float_align:
; CHECK-SSE2-O3: # %bb.0:
; CHECK-SSE2-O3-NEXT: pushq %rax
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
index 84c7df120e32f..40cf2ecec88ea 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
@@ -189,3 +189,69 @@ define void @pointer_cmpxchg_expand6(ptr addrspace(1) %ptr, ptr addrspace(2) %v)
ret void
}
+define <2 x ptr> @atomic_vec2_ptr_align(ptr %x) nounwind {
+; CHECK-LABEL: define <2 x ptr> @atomic_vec2_ptr_align(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16(ptr [[X]], i32 2)
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast i128 [[TMP1]] to <2 x i64>
+; CHECK-NEXT: [[TMP7:%.*]] = inttoptr <2 x i64> [[TMP6]] to <2 x ptr>
+; CHECK-NEXT: ret <2 x ptr> [[TMP7]]
+;
+ %ret = load atomic <2 x ptr>, ptr %x acquire, align 16
+ ret <2 x ptr> %ret
+}
+
+define <4 x ptr addrspace(270)> @atomic_vec4_ptr_align(ptr %x) nounwind {
+; CHECK-LABEL: define <4 x ptr addrspace(270)> @atomic_vec4_ptr_align(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16(ptr [[X]], i32 2)
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i128 [[TMP1]] to <4 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = inttoptr <4 x i32> [[TMP2]] to <4 x ptr addrspace(270)>
+; CHECK-NEXT: ret <4 x ptr addrspace(270)> [[TMP3]]
+;
+ %ret = load atomic <4 x ptr addrspace(270)>, ptr %x acquire, align 16
+ ret <4 x ptr addrspace(270)> %ret
+}
+
+define <2 x i16> @atomic_vec2_i16(ptr %x) nounwind {
+; CHECK-LABEL: define <2 x i16> @atomic_vec2_i16(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[RET:%.*]] = load atomic <2 x i16>, ptr [[X]] acquire, align 8
+; CHECK-NEXT: ret <2 x i16> [[RET]]
+;
+ %ret = load atomic <2 x i16>, ptr %x acquire, align 8
+ ret <2 x i16> %ret
+}
+
+define <2 x half> @atomic_vec2_half(ptr %x) nounwind {
+; CHECK-LABEL: define <2 x half> @atomic_vec2_half(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr [[X]] acquire, align 8
+; CHECK-NEXT: [[RET:%.*]] = bitcast i32 [[TMP1]] to <2 x half>
+; CHECK-NEXT: ret <2 x half> [[RET]]
+;
+ %ret = load atomic <2 x half>, ptr %x acquire, align 8
+ ret <2 x half> %ret
+}
+
+define <4 x i32> @atomic_vec4_i32(ptr %x) nounwind {
+; CHECK-LABEL: define <4 x i32> @atomic_vec4_i32(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16(ptr [[X]], i32 2)
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i128 [[TMP1]] to <4 x i32>
+; CHECK-NEXT: ret <4 x i32> [[TMP2]]
+;
+ %ret = load atomic <4 x i32>, ptr %x acquire, align 16
+ ret <4 x i32> %ret
+}
+
+define <4 x float> @atomic_vec4_float(ptr %x) nounwind {
+; CHECK-LABEL: define <4 x float> @atomic_vec4_float(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16(ptr [[X]], i32 2)
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i128 [[TMP1]] to <4 x float>
+; CHECK-NEXT: ret <4 x float> [[TMP2]]
+;
+ %ret = load atomic <4 x float>, ptr %x acquire, align 16
+ ret <4 x float> %ret
+}
More information about the llvm-branch-commits
mailing list