[llvm] 137f785 - [AMDGPU] Set MaxAtomicSizeInBitsSupported. (#75185)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 18 13:51:10 PST 2023


Author: James Y Knight
Date: 2023-12-18T16:51:06-05:00
New Revision: 137f785fa6a1abb1651a603e3ce5b0e1f00e5be4

URL: https://github.com/llvm/llvm-project/commit/137f785fa6a1abb1651a603e3ce5b0e1f00e5be4
DIFF: https://github.com/llvm/llvm-project/commit/137f785fa6a1abb1651a603e3ce5b0e1f00e5be4.diff

LOG: [AMDGPU] Set MaxAtomicSizeInBitsSupported. (#75185)

This will result in larger atomic operations getting expanded to
`__atomic_*` libcalls via AtomicExpandPass, which matches what Clang
already does in the frontend.

While AMDGPU currently disables the use of all libcalls, I've changed it
to instead disable all of them _except_ the atomic ones. Those are
already be emitted by the Clang frontend, and enabling them in the
backend allows the same behavior there.

Added: 
    llvm/test/CodeGen/AMDGPU/atomic-oversize.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 9d7443012e3da3..156a264a7c1faa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -506,9 +506,11 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::SELECT, MVT::v12f32, Promote);
   AddPromotedToType(ISD::SELECT, MVT::v12f32, MVT::v12i32);
 
-  // There are no libcalls of any kind.
-  for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
-    setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
+  // Disable most libcalls.
+  for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) {
+    if (I < RTLIB::ATOMIC_LOAD || I > RTLIB::ATOMIC_FETCH_NAND_16)
+      setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
+  }
 
   setSchedulingPreference(Sched::RegPressure);
   setJumpIsExpensive(true);
@@ -556,6 +558,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
                        ISD::FSUB,       ISD::FNEG,
                        ISD::FABS,       ISD::AssertZext,
                        ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN});
+
+  setMaxAtomicSizeInBitsSupported(64);
 }
 
 bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const {

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic-oversize.ll b/llvm/test/CodeGen/AMDGPU/atomic-oversize.ll
new file mode 100644
index 00000000000000..f62a93f523365c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomic-oversize.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
+
+define void @test(ptr %a) nounwind {
+; CHECK-LABEL: test:
+; CHECK: __atomic_load_16
+; CHECK: __atomic_store_16
+  %1 = load atomic i128, ptr %a seq_cst, align 16
+  store atomic i128 %1, ptr %a seq_cst, align 16
+  ret void
+}

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
index bdfd90dc11dca5..6c84474edc05bb 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
@@ -1,15 +1,13 @@
-; RUN: not --crash opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck %s
-; The AtomicExpand pass cannot handle missing libcalls (yet) so reports a fatal error.
-; CHECK: LLVM ERROR: expandAtomicOpToLibcall shouldn't fail for Load
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck --check-prefix=GCN %s
 
 define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) {
 ; GCN-LABEL: @atomic_load_global_align1(
 ; GCN-NEXT:    [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
-; GCN-NEXT:    [[TMP3:%.*]] = alloca i32, align 4
-; GCN-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP3]])
-; GCN-NEXT:    call void @0(i64 4, ptr [[TMP2]], ptr [[TMP3]], i32 5)
-; GCN-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4
-; GCN-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP3]])
+; GCN-NEXT:    [[TMP3:%.*]] = alloca i32, align 4, addrspace(5)
+; GCN-NEXT:    call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[TMP3]])
+; GCN-NEXT:    call void @__atomic_load(i64 4, ptr [[TMP2]], ptr addrspace(5) [[TMP3]], i32 5)
+; GCN-NEXT:    [[TMP5:%.*]] = load i32, ptr addrspace(5) [[TMP3]], align 4
+; GCN-NEXT:    call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[TMP3]])
 ; GCN-NEXT:    ret i32 [[TMP5]]
 ;
   %val = load atomic i32, ptr addrspace(1) %ptr  seq_cst, align 1
@@ -19,11 +17,11 @@ define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) {
 define void @atomic_store_global_align1(ptr addrspace(1) %ptr, i32 %val) {
 ; GCN-LABEL: @atomic_store_global_align1(
 ; GCN-NEXT:    [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
-; GCN-NEXT:    [[TMP3:%.*]] = alloca i32, align 4
-; GCN-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP3]])
-; GCN-NEXT:    store i32 [[VAL:%.*]], ptr [[TMP3]], align 4
-; GCN-NEXT:    call void @1(i64 4, ptr [[TMP2]], ptr [[TMP3]], i32 0)
-; GCN-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP3]])
+; GCN-NEXT:    [[TMP3:%.*]] = alloca i32, align 4, addrspace(5)
+; GCN-NEXT:    call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[TMP3]])
+; GCN-NEXT:    store i32 [[VAL:%.*]], ptr addrspace(5) [[TMP3]], align 4
+; GCN-NEXT:    call void @__atomic_store(i64 4, ptr [[TMP2]], ptr addrspace(5) [[TMP3]], i32 0)
+; GCN-NEXT:    call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[TMP3]])
 ; GCN-NEXT:    ret void
 ;
   store atomic i32 %val, ptr addrspace(1) %ptr monotonic, align 1


        


More information about the llvm-commits mailing list