[llvm] [X86] Lowering of load atomic float via cast (PR #117189)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 22 13:58:07 PST 2024


https://github.com/jofrn updated https://github.com/llvm/llvm-project/pull/117189

>From b95a0215b71c0ce3bcd92945bf2fb9df336c796b Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Thu, 21 Nov 2024 11:46:32 -0500
Subject: [PATCH] [X86] Lowering of load atomic float via cast

X86 backend does not lower load atomic float, so it can be casted to an
integer before lowering.
---
 llvm/lib/IR/Verifier.cpp                      | 13 +++++--
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  8 ++++
 llvm/lib/Target/X86/X86ISelLowering.h         |  2 +
 llvm/test/CodeGen/X86/atomicvec-float.ll      | 38 +++++++++++++++++++
 .../AtomicExpand/atomicvec-float.ll           | 38 +++++++++++++++++++
 5 files changed, 95 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/atomicvec-float.ll
 create mode 100644 llvm/test/Transforms/AtomicExpand/atomicvec-float.ll

diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index f34fe7594c8602..999dd71262e813 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -137,6 +137,10 @@ static cl::opt<bool> VerifyNoAliasScopeDomination(
     cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
              "scopes are not dominating"));
 
+static cl::opt<bool> AllowAtomicVector(
+    "allow-atomic-vector", cl::Hidden, cl::init(false),
+    cl::desc("Allow load atomic vector to be permitted in the IR"));
+
 namespace llvm {
 
 struct VerifierSupport {
@@ -4256,10 +4260,11 @@ void Verifier::visitLoadInst(LoadInst &LI) {
     Check(LI.getOrdering() != AtomicOrdering::Release &&
               LI.getOrdering() != AtomicOrdering::AcquireRelease,
           "Load cannot have Release ordering", &LI);
-    Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
-          "atomic load operand must have integer, pointer, or floating point "
-          "type!",
-          ElTy, &LI);
+    if (!AllowAtomicVector)
+      Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
+            "atomic load operand must have integer, pointer, or floating point "
+            "type!",
+            ElTy, &LI);
     checkAtomicMemAccessSize(ElTy, &LI);
   } else {
     Check(LI.getSyncScopeID() == SyncScope::System,
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bcb84add65d83e..03f68843671a7b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30976,6 +30976,14 @@ bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
   return false;
 }
 
+TargetLoweringBase::AtomicExpansionKind
+X86TargetLowering::shouldCastAtomicLoadInIR(LoadInst *LI) const {
+  if (const auto VT = dyn_cast<VectorType>(LI->getType()))
+    if (VT->getElementType()->isFloatingPointTy())
+      return AtomicExpansionKind::CastToInteger;
+  return TargetLowering::shouldCastAtomicLoadInIR(LI);
+}
+
 TargetLoweringBase::AtomicExpansionKind
 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
   Type *MemType = SI->getValueOperand()->getType();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 14ada1721fd40e..75f8c46362327f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1808,6 +1808,8 @@ namespace llvm {
     const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
     ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
 
+    TargetLoweringBase::AtomicExpansionKind
+    shouldCastAtomicLoadInIR(LoadInst *LI) const override;
     TargetLoweringBase::AtomicExpansionKind
     shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
     TargetLoweringBase::AtomicExpansionKind
diff --git a/llvm/test/CodeGen/X86/atomicvec-float.ll b/llvm/test/CodeGen/X86/atomicvec-float.ll
new file mode 100644
index 00000000000000..ef4b9249ed23bf
--- /dev/null
+++ b/llvm/test/CodeGen/X86/atomicvec-float.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s --mtriple=x86_64 --allow-atomic-vector | FileCheck %s
+
+define float @load_atomic_float(ptr %src) {
+; CHECK-LABEL: load_atomic_float:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    retq
+  %ret = load atomic float, ptr %src acquire, align 4
+  ret float %ret
+}
+
+define <1 x float> @load_atomic_vector_float1(ptr %src) {
+; CHECK-LABEL: load_atomic_vector_float1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    retq
+  %ret = load atomic <1 x float>, ptr %src acquire, align 4
+  ret <1 x float> %ret
+}
+
+define <2 x float> @load_atomic_vector_float2(ptr %src) {
+; CHECK-LABEL: load_atomic_vector_float2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movq %rdi, %rsi
+; CHECK-NEXT:    movq %rsp, %rdx
+; CHECK-NEXT:    movl $8, %edi
+; CHECK-NEXT:    movl $2, %ecx
+; CHECK-NEXT:    callq __atomic_load at PLT
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+  %ret = load atomic <2 x float>, ptr %src acquire, align 4
+  ret <2 x float> %ret
+}
diff --git a/llvm/test/Transforms/AtomicExpand/atomicvec-float.ll b/llvm/test/Transforms/AtomicExpand/atomicvec-float.ll
new file mode 100644
index 00000000000000..f2abb94954e87f
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/atomicvec-float.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s --mtriple=x86_64 --passes=atomic-expand --allow-atomic-vector -S -o - | FileCheck %s
+
+define float @load_atomic_float(ptr %src) {
+; CHECK-LABEL: define float @load_atomic_float(
+; CHECK-SAME: ptr [[SRC:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[SRC]] acquire, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT:    ret float [[TMP2]]
+;
+  %ret = load atomic float, ptr %src acquire, align 4
+  ret float %ret
+}
+
+define <1 x float> @load_atomic_vector_float1(ptr %src) {
+; CHECK-LABEL: define <1 x float> @load_atomic_vector_float1(
+; CHECK-SAME: ptr [[SRC:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[SRC]] acquire, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to <1 x float>
+; CHECK-NEXT:    ret <1 x float> [[TMP2]]
+;
+  %ret = load atomic <1 x float>, ptr %src acquire, align 4
+  ret <1 x float> %ret
+}
+
+define <2 x float> @load_atomic_vector_float2(ptr %src) {
+; CHECK-LABEL: define <2 x float> @load_atomic_vector_float2(
+; CHECK-SAME: ptr [[SRC:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca <2 x float>, align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]])
+; CHECK-NEXT:    call void @__atomic_load(i64 8, ptr [[SRC]], ptr [[TMP1]], i32 2)
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]])
+; CHECK-NEXT:    ret <2 x float> [[TMP2]]
+;
+  %ret = load atomic <2 x float>, ptr %src acquire, align 4
+  ret <2 x float> %ret
+}



More information about the llvm-commits mailing list