[llvm] ddca666 - [ARM] Fix shouldExpandAtomicLoadInIR for subtargets without ldrexd.

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 18 15:54:55 PDT 2022


Author: Eli Friedman
Date: 2022-03-18T15:54:38-07:00
New Revision: ddca66622c62721f2d05ffebefda17fc567d2cfd

URL: https://github.com/llvm/llvm-project/commit/ddca66622c62721f2d05ffebefda17fc567d2cfd
DIFF: https://github.com/llvm/llvm-project/commit/ddca66622c62721f2d05ffebefda17fc567d2cfd.diff

LOG: [ARM] Fix shouldExpandAtomicLoadInIR for subtargets without ldrexd.

Regression from 2f497ec3; we should not try to generate ldrexd on
targets that don't have it.

Also, while I'm here, fix shouldExpandAtomicStoreInIR, for consistency.
That doesn't really have any practical effect, though.  On Thumb targets
where we need to use __sync_* libcalls, there is no libcall for stores,
so SelectionDAG calls __sync_lock_test_and_set_8 anyway.

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/test/CodeGen/ARM/atomic-load-store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 11c3c3192eb3f..256ae98f548b9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -20976,8 +20976,16 @@ Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
 // anything for those.
 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+  bool has64BitAtomicStore;
+  if (Subtarget->isMClass())
+    has64BitAtomicStore = false;
+  else if (Subtarget->isThumb())
+    has64BitAtomicStore = Subtarget->hasV7Ops();
+  else
+    has64BitAtomicStore = Subtarget->hasV6Ops();
+
   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
-  return (Size == 64) && !Subtarget->isMClass();
+  return Size == 64 && has64BitAtomicStore;
 }
 
 // Loads and stores less than 64-bits are already atomic; ones above that
@@ -20989,9 +20997,17 @@ bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
 // sections A8.8.72-74 LDRD)
 TargetLowering::AtomicExpansionKind
 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+  bool has64BitAtomicLoad;
+  if (Subtarget->isMClass())
+    has64BitAtomicLoad = false;
+  else if (Subtarget->isThumb())
+    has64BitAtomicLoad = Subtarget->hasV7Ops();
+  else
+    has64BitAtomicLoad = Subtarget->hasV6Ops();
+
   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
-  return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
-                                                  : AtomicExpansionKind::None;
+  return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly
+                                            : AtomicExpansionKind::None;
 }
 
 // For the real atomic operations, we have ldrex/strex up to 32 bits,

diff  --git a/llvm/test/CodeGen/ARM/atomic-load-store.ll b/llvm/test/CodeGen/ARM/atomic-load-store.ll
index 876d6124ed211..ac33e4c65ca83 100644
--- a/llvm/test/CodeGen/ARM/atomic-load-store.ll
+++ b/llvm/test/CodeGen/ARM/atomic-load-store.ll
@@ -270,8 +270,15 @@ define i64 @test_old_load_64bit(i64* %p) {
 ;
 ; THUMBONE-LABEL: test_old_load_64bit:
 ; THUMBONE:       @ %bb.0:
-; THUMBONE-NEXT:    ldaexd r0, r1, [r0]
-; THUMBONE-NEXT:    bx lr
+; THUMBONE-NEXT:    push {r7, lr}
+; THUMBONE-NEXT:    sub sp, #8
+; THUMBONE-NEXT:    movs r2, #0
+; THUMBONE-NEXT:    str r2, [sp]
+; THUMBONE-NEXT:    str r2, [sp, #4]
+; THUMBONE-NEXT:    mov r3, r2
+; THUMBONE-NEXT:    bl __sync_val_compare_and_swap_8
+; THUMBONE-NEXT:    add sp, #8
+; THUMBONE-NEXT:    pop {r7, pc}
 ;
 ; ARMV4-LABEL: test_old_load_64bit:
 ; ARMV4:       @ %bb.0:


        


More information about the llvm-commits mailing list