[llvm] [AArch64][ISel] Add support for v8.4a RCpc `ldapur`/`stlur` (PR #67879)

via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 30 02:48:48 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

<details>
<summary>Changes</summary>

AArch64 backend now features v8.4 Load-Acquire RCpc and Store-Release register unscaled support. As part of the changes, dropping the check for valid scaled immediate in `SelectAddrModeUnscaled` was deemed necessary to achieve this, though if we want to retain this, we might have an helper checking if the uses are `ISD::ATOMIC_LOAD`/`ISD::ATOMIC_STORE` (similar to `isWorthFoldingADDlow`), and bail out only if this is not the case. Minor opportunity to refactor out the check in `isValidAsScaledImmediate` as well.

---

Patch is 49.08 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/67879.diff


6 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (+11-7) 
- (modified) llvm/lib/Target/AArch64/AArch64InstrAtomics.td (+31) 
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (+1-1) 
- (modified) llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp (+17-3) 
- (added) llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll (+817) 
- (added) llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-store-rcpc_immo.ll (+358) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 38759a2474518fc..7617dccdeee397f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -997,6 +997,15 @@ static bool isWorthFoldingADDlow(SDValue N) {
   return true;
 }
 
+/// Check if the immediate offset is valid as a scaled immediate.
+static bool isValidAsScaledImmediate(int64_t Offset, unsigned Range,
+                                     unsigned Size) {
+  if ((Offset & (Size - 1)) == 0 && Offset >= 0 &&
+      Offset < (Range << Log2_32(Size)))
+    return true;
+  return false;
+}
+
 /// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
 /// immediate" address.  The "Size" argument is the size in bytes of the memory
 /// reference, which determines the scale.
@@ -1092,7 +1101,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
       int64_t RHSC = (int64_t)RHS->getZExtValue();
       unsigned Scale = Log2_32(Size);
-      if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
+      if (isValidAsScaledImmediate(RHSC, 0x1000, Size)) {
         Base = N.getOperand(0);
         if (Base.getOpcode() == ISD::FrameIndex) {
           int FI = cast<FrameIndexSDNode>(Base)->getIndex();
@@ -1130,10 +1139,6 @@ bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
     return false;
   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
     int64_t RHSC = RHS->getSExtValue();
-    // If the offset is valid as a scaled immediate, don't match here.
-    if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
-        RHSC < (0x1000 << Log2_32(Size)))
-      return false;
     if (RHSC >= -256 && RHSC < 256) {
       Base = N.getOperand(0);
       if (Base.getOpcode() == ISD::FrameIndex) {
@@ -1312,11 +1317,10 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
   //     LDR  X2, [BaseReg, X0]
   if (isa<ConstantSDNode>(RHS)) {
     int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
-    unsigned Scale = Log2_32(Size);
     // Skip the immediate can be selected by load/store addressing mode.
     // Also skip the immediate can be encoded by a single ADD (SUB is also
     // checked by using -ImmOff).
-    if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
+    if (isValidAsScaledImmediate(ImmOff, 0x1000, Size) ||
         isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
       return false;
 
diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
index fa5a8515ed92eca..0002db52b1995c0 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -573,3 +573,34 @@ let Predicates = [HasRCPC3, HasNEON] in {
                 (i64 (bitconvert (v1f64 VecListOne64:$Vt)))),
             (STL1 (SUBREG_TO_REG (i64 0), VecListOne64:$Vt, dsub), (i64 0), GPR64sp:$Rn)>;
 }
+
+// v8.4a FEAT_LRCPC2 patterns
+let Predicates = [HasRCPC_IMMO] in {
+  // Load-Acquire RCpc Register unscaled loads
+  def : Pat<(acquiring_load<atomic_load_az_8>
+               (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
+          (LDAPURBi GPR64sp:$Rn, simm9:$offset)>;
+  def : Pat<(acquiring_load<atomic_load_az_16>
+               (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
+          (LDAPURHi GPR64sp:$Rn, simm9:$offset)>;
+  def : Pat<(acquiring_load<atomic_load_32>
+               (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
+          (LDAPURi GPR64sp:$Rn, simm9:$offset)>;
+  def : Pat<(acquiring_load<atomic_load_64>
+               (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+          (LDAPURXi GPR64sp:$Rn, simm9:$offset)>;
+
+  // Store-Release Register unscaled stores
+  def : Pat<(releasing_store<atomic_store_8>
+               (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
+          (STLURBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
+  def : Pat<(releasing_store<atomic_store_16>
+               (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
+          (STLURHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
+  def : Pat<(releasing_store<atomic_store_32>
+               (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
+          (STLURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
+  def : Pat<(releasing_store<atomic_store_64>
+               (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
+          (STLURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
+}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index cd377d659ad3ee9..6f5533945718710 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -94,7 +94,7 @@ def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
 def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
                        AssemblerPredicateWithAll<(all_of FeatureFlagM), "flagm">;
 
-def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
+def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPC_IMMO()">,
                        AssemblerPredicateWithAll<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
 
 def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index e0837b689607cc2..6a9b4082f952fe8 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2895,6 +2895,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
       } else {
         static constexpr unsigned Opcodes[] = {AArch64::STLRB, AArch64::STLRH,
                                                AArch64::STLRW, AArch64::STLRX};
+        static constexpr unsigned OpcodesUnscaled[] = {
+            AArch64::STLURBi, AArch64::STLURHi, AArch64::STLURWi,
+            AArch64::STLURXi};
         Register ValReg = LdSt.getReg(0);
         if (MRI.getType(ValReg).getSizeInBits() == 64 && MemSizeInBits != 64) {
           // Emit a subreg copy of 32 bits.
@@ -2903,6 +2906,20 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
               .addReg(I.getOperand(0).getReg(), 0, AArch64::sub_32);
           I.getOperand(0).setReg(NewVal);
         }
+        if (STI.hasRCPC_IMMO()) {
+          auto AddrModeFns =
+              selectAddrModeUnscaled(I.getOperand(1), MemSizeInBytes);
+          if (AddrModeFns) {
+            auto NewStoreI = MIB.buildInstr(
+                OpcodesUnscaled[Log2_32(MemSizeInBytes)], {}, {}, I.getFlags());
+            NewStoreI.addUse(I.getOperand(0).getReg());
+            NewStoreI.cloneMemRefs(I);
+            for (const auto &Fn : *AddrModeFns)
+              Fn(NewStoreI);
+            I.eraseFromParent();
+            return constrainSelectedInstRegOperands(*NewStoreI, TII, TRI, RBI);
+          }
+        }
         I.setDesc(TII.get(Opcodes[Log2_32(MemSizeInBytes)]));
       }
       constrainSelectedInstRegOperands(I, TII, TRI, RBI);
@@ -7182,9 +7199,6 @@ AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
     return std::nullopt;
   RHSC = RHSOp1.getCImm()->getSExtValue();
 
-  // If the offset is valid as a scaled immediate, don't match here.
-  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
-    return std::nullopt;
   if (RHSC >= -256 && RHSC < 256) {
     MachineOperand &Base = RootDef->getOperand(1);
     return {{
diff --git a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll
new file mode 100644
index 000000000000000..e5faf0bbc5b9287
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-load-rcpc_immo.ll
@@ -0,0 +1,817 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "\b(sp)\b" --filter "^\s*(ld|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"
+; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.4a -mattr=+rcpc-immo -O0 | FileCheck %s --check-prefixes=CHECK,-O0
+; RUN: llc %s -o - -verify-machineinstrs -mtriple=aarch64 -mattr=+v8.4a -mattr=+rcpc-immo -O1 | FileCheck %s --check-prefixes=CHECK,-O1
+
+define dso_local i8 @load_atomic_i8_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_unordered:
+; CHECK:    ldrb w0, [x0, #4]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep unordered, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_unordered_const:
+; CHECK:    ldrb w0, [x0, #4]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep unordered, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_monotonic:
+; CHECK:    ldrb w0, [x0, #4]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep monotonic, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_monotonic_const:
+; CHECK:    ldrb w0, [x0, #4]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep monotonic, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_acquire:
+; CHECK:    ldapurb w0, [x0, #4]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep acquire, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_acquire_const:
+; CHECK:    ldapurb w0, [x0, #4]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep acquire, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_seq_cst:
+; CHECK:    add x8, x0, #4
+; CHECK:    ldarb w0, [x8]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep seq_cst, align 1
+    ret i8 %r
+}
+
+define dso_local i8 @load_atomic_i8_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i8_aligned_seq_cst_const:
+; CHECK:    add x8, x0, #4
+; CHECK:    ldarb w0, [x8]
+    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
+    %r = load atomic i8, ptr %gep seq_cst, align 1
+    ret i8 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_unordered:
+; CHECK:    ldrh w0, [x0, #8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep unordered, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_unordered_const:
+; CHECK:    ldrh w0, [x0, #8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep unordered, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_monotonic:
+; CHECK:    ldrh w0, [x0, #8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep monotonic, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_monotonic_const:
+; CHECK:    ldrh w0, [x0, #8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep monotonic, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_acquire:
+; CHECK:    ldapurh w0, [x0, #8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep acquire, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_acquire_const:
+; CHECK:    ldapurh w0, [x0, #8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep acquire, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_seq_cst:
+; CHECK:    add x8, x0, #8
+; CHECK:    ldarh w0, [x8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep seq_cst, align 2
+    ret i16 %r
+}
+
+define dso_local i16 @load_atomic_i16_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i16_aligned_seq_cst_const:
+; CHECK:    add x8, x0, #8
+; CHECK:    ldarh w0, [x8]
+    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
+    %r = load atomic i16, ptr %gep seq_cst, align 2
+    ret i16 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_unordered:
+; CHECK:    ldr w0, [x0, #16]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep unordered, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_unordered_const:
+; CHECK:    ldr w0, [x0, #16]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep unordered, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_monotonic:
+; CHECK:    ldr w0, [x0, #16]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep monotonic, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_monotonic_const:
+; CHECK:    ldr w0, [x0, #16]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep monotonic, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_acquire:
+; CHECK:    ldapur w0, [x0, #16]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep acquire, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_acquire_const:
+; CHECK:    ldapur w0, [x0, #16]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep acquire, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_seq_cst:
+; CHECK:    add x8, x0, #16
+; CHECK:    ldar w0, [x8]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep seq_cst, align 4
+    ret i32 %r
+}
+
+define dso_local i32 @load_atomic_i32_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i32_aligned_seq_cst_const:
+; CHECK:    add x8, x0, #16
+; CHECK:    ldar w0, [x8]
+    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
+    %r = load atomic i32, ptr %gep seq_cst, align 4
+    ret i32 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_unordered:
+; CHECK:    ldr x0, [x0, #32]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep unordered, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_unordered_const:
+; CHECK:    ldr x0, [x0, #32]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep unordered, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_monotonic:
+; CHECK:    ldr x0, [x0, #32]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep monotonic, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_monotonic_const:
+; CHECK:    ldr x0, [x0, #32]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep monotonic, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_acquire:
+; CHECK:    ldapur x0, [x0, #32]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep acquire, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_acquire_const:
+; CHECK:    ldapur x0, [x0, #32]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep acquire, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_seq_cst:
+; CHECK:    add x8, x0, #32
+; CHECK:    ldar x0, [x8]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep seq_cst, align 8
+    ret i64 %r
+}
+
+define dso_local i64 @load_atomic_i64_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i64_aligned_seq_cst_const:
+; CHECK:    add x8, x0, #32
+; CHECK:    ldar x0, [x8]
+    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
+    %r = load atomic i64, ptr %gep seq_cst, align 8
+    ret i64 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_unordered(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_unordered:
+; CHECK:    ldp x0, x1, [x0, #64]
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep unordered, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_unordered_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_unordered_const:
+; CHECK:    ldp x0, x1, [x0, #64]
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep unordered, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_monotonic(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_monotonic:
+; CHECK:    ldp x0, x1, [x0, #64]
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep monotonic, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_monotonic_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_monotonic_const:
+; CHECK:    ldp x0, x1, [x0, #64]
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep monotonic, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_acquire(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_acquire:
+; CHECK:    ldp x0, x1, [x0, #64]
+; CHECK:    dmb ishld
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep acquire, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_acquire_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_acquire_const:
+; CHECK:    ldp x0, x1, [x0, #64]
+; CHECK:    dmb ishld
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep acquire, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_seq_cst(ptr %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_seq_cst:
+; CHECK:    ldp x0, x1, [x0, #64]
+; CHECK:    dmb ish
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep seq_cst, align 16
+    ret i128 %r
+}
+
+define dso_local i128 @load_atomic_i128_aligned_seq_cst_const(ptr readonly %ptr) {
+; CHECK-LABEL: load_atomic_i128_aligned_seq_cst_const:
+; CHECK:    ldp x0, x1, [x0, #64]
+; CHECK:    dmb ish
+    %gep = getelementptr inbounds i128, ptr %ptr, i32 4
+    %r = load atomic i128, ptr %gep seq_cst, align 16
+    ret i128 %r
+}
+...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/67879


More information about the llvm-commits mailing list