[llvm] [ARM][CodeGen] Disable MEMCPY LDM/STM inlining for v7-m (PR #106378)

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 28 05:11:19 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-arm

Author: Nashe Mncube (nasherm)

<details>
<summary>Changes</summary>

This patch disables the expansion of MEMCPY to LDM/STM on v7-m targets.


---
Full diff: https://github.com/llvm/llvm-project/pull/106378.diff


4 Files Affected:

- (modified) llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp (+124) 
- (modified) llvm/lib/Target/ARM/ARMSelectionDAGInfo.h (+8) 
- (modified) llvm/lib/Target/ARM/ARMSubtarget.h (+6) 
- (added) llvm/test/CodeGen/ARM/memcpy-v7m.ll (+33) 


``````````diff
diff --git a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index c57825949c1cef..a5abaf466a1de9 100644
--- a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -138,6 +138,126 @@ SDValue ARMSelectionDAGInfo::EmitSpecializedLibcall(
   return CallResult.second;
 }
 
+SDValue
+ARMSelectionDAGInfo::EmitMemcpyAsLdSt(SelectionDAG &DAG, SDLoc dl,
+                                      const ARMSubtarget &Subtarget,
+                                      SDValue Chain,
+                                      SDValue Dst, SDValue Src,
+                                      uint64_t SizeVal, bool isVolatile,
+                                      MachinePointerInfo DstPtrInfo,
+                                      MachinePointerInfo SrcPtrInfo) const {
+  // Do repeated batches of 4-byte loads and stores.
+  unsigned BytesLeft = SizeVal & 3;
+  unsigned NumMemOps = SizeVal >> 2;
+  unsigned EmittedNumMemOps = 0;
+  EVT VT = MVT::i32;
+  unsigned VTSize = 4;
+  unsigned i = 0;
+  // Emit a maximum of 4 loads in Thumb1 since we have fewer registers
+  const unsigned MaxLoads = Subtarget.isThumb1Only() ? 4 : 6;
+  SDValue TFOps[6];
+  SDValue Loads[6];
+  uint64_t SrcOff = 0, DstOff = 0;
+
+  MachineMemOperand::Flags MOFlags = MachineMemOperand::Flags::MONone;
+  if (isVolatile)
+    MOFlags = MachineMemOperand::Flags::MOVolatile;
+  MachineMemOperand::Flags LoadMOFlags = MOFlags;
+  if (SrcPtrInfo.isDereferenceable(SizeVal, *DAG.getContext(),
+                                   DAG.getDataLayout()))
+    LoadMOFlags |= MachineMemOperand::Flags::MODereferenceable;
+  if (auto *V = SrcPtrInfo.V.dyn_cast<const Value *>())
+    if (isa<GlobalVariable>(V) && cast<GlobalVariable>(V)->isConstant())
+      LoadMOFlags |= MachineMemOperand::Flags::MOInvariant;
+  MachineMemOperand::Flags StoreMOFlags = MOFlags;
+  if (DstPtrInfo.isDereferenceable(SizeVal, *DAG.getContext(),
+                                   DAG.getDataLayout()))
+    StoreMOFlags |= MachineMemOperand::Flags::MODereferenceable;
+
+  // Emit up to MaxLoads loads, then a TokenFactor barrier, then the
+  // same number of stores.  The loads and stores may get combined into
+  // ldm/stm later on.
+  while (EmittedNumMemOps < NumMemOps) {
+    for (i = 0;
+         i < MaxLoads && EmittedNumMemOps + i < NumMemOps; ++i) {
+      Loads[i] = DAG.getLoad(VT, dl, Chain,
+                             DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
+                                         DAG.getConstant(SrcOff, dl, MVT::i32)),
+                             SrcPtrInfo.getWithOffset(SrcOff), MaybeAlign(0),
+                             LoadMOFlags);
+      TFOps[i] = Loads[i].getValue(1);
+      SrcOff += VTSize;
+    }
+    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+                        ArrayRef(TFOps, i));
+
+    for (i = 0;
+         i < MaxLoads && EmittedNumMemOps + i < NumMemOps; ++i) {
+      TFOps[i] = DAG.getStore(
+          Chain, dl, Loads[i],
+          DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
+                      DAG.getConstant(DstOff, dl, MVT::i32)),
+          DstPtrInfo.getWithOffset(DstOff), MaybeAlign(0), StoreMOFlags);
+      DstOff += VTSize;
+    }
+    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+                        ArrayRef(TFOps, i));
+
+    EmittedNumMemOps += i;
+  }
+
+  if (BytesLeft == 0)
+    return Chain;
+
+  // Issue loads / stores for the trailing (1 - 3) bytes.
+  unsigned BytesLeftSave = BytesLeft;
+  i = 0;
+  while (BytesLeft) {
+    if (BytesLeft >= 2) {
+      VT = MVT::i16;
+      VTSize = 2;
+    } else {
+      VT = MVT::i8;
+      VTSize = 1;
+    }
+
+    Loads[i] = DAG.getLoad(VT, dl, Chain,
+                           DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
+                                       DAG.getConstant(SrcOff, dl, MVT::i32)),
+                           SrcPtrInfo.getWithOffset(SrcOff), MaybeAlign(0),
+                           LoadMOFlags);
+    TFOps[i] = Loads[i].getValue(1);
+    ++i;
+    SrcOff += VTSize;
+    BytesLeft -= VTSize;
+  }
+  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+                      ArrayRef(TFOps, i));
+
+  i = 0;
+  BytesLeft = BytesLeftSave;
+  while (BytesLeft) {
+    if (BytesLeft >= 2) {
+      VT = MVT::i16;
+      VTSize = 2;
+    } else {
+      VT = MVT::i8;
+      VTSize = 1;
+    }
+
+    TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
+                            DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
+                                        DAG.getConstant(DstOff, dl, MVT::i32)),
+                            DstPtrInfo.getWithOffset(DstOff), MaybeAlign(0),
+                            StoreMOFlags);
+    ++i;
+    DstOff += VTSize;
+    BytesLeft -= VTSize;
+  }
+  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+                     ArrayRef(TFOps, i));
+}
+
 static bool shouldGenerateInlineTPLoop(const ARMSubtarget &Subtarget,
                                        const SelectionDAG &DAG,
                                        ConstantSDNode *ConstantSize,
@@ -192,6 +312,10 @@ SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(
     return EmitSpecializedLibcall(DAG, dl, Chain, Dst, Src, Size,
                                   Alignment.value(), RTLIB::MEMCPY);
 
+  if (Subtarget.wantsMEMCPYAsLdSt())
+    return EmitMemcpyAsLdSt(DAG, dl, Subtarget, Chain, Dst, Src, SizeVal,
+                            isVolatile, DstPtrInfo, SrcPtrInfo);
+
   unsigned BytesLeft = SizeVal & 3;
   unsigned NumMemOps = SizeVal >> 2;
   unsigned EmittedNumMemOps = 0;
diff --git a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
index 275b1c0f8dc017..64d643977914ff 100644
--- a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
+++ b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
@@ -44,6 +44,14 @@ class ARMSelectionDAGInfo : public SelectionDAGTargetInfo {
                                   MachinePointerInfo DstPtrInfo,
                                   MachinePointerInfo SrcPtrInfo) const override;
 
+  SDValue EmitMemcpyAsLdSt(SelectionDAG &DAG, SDLoc dl,
+                           const ARMSubtarget &Subtarget,
+                           SDValue Chain,
+                           SDValue Dst, SDValue Src,
+                           uint64_t SizeVal, bool isVolatile,
+                           MachinePointerInfo DstPtrInfo,
+                           MachinePointerInfo SrcPtrInfo) const;
+
   SDValue
   EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
                            SDValue Dst, SDValue Src, SDValue Size,
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 00239ff94b7ba5..066cc1cf85ccfb 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -465,6 +465,12 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
   /// True if fast-isel is used.
   bool useFastISel() const;
 
+  /// True if ARMISD::MEMCPY should not be created/expanded specially (e.g.
+  /// as LDM/STM pairs).
+  bool wantsMEMCPYAsLdSt() const {
+    return HasV7Ops && ARMProcClass == MClass;
+  }
+
   /// Returns the correct return opcode for the current feature set.
   /// Use BX if available to allow mixing thumb/arm code, but fall back
   /// to plain mov pc,lr on ARMv4.
diff --git a/llvm/test/CodeGen/ARM/memcpy-v7m.ll b/llvm/test/CodeGen/ARM/memcpy-v7m.ll
new file mode 100644
index 00000000000000..1f3b64ba152f40
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/memcpy-v7m.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=thumbv7em-eabi -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8m-eabi -mcpu=teal -verify-machineinstrs %s -o - | FileCheck %s
+
+ at d = external global [64 x i32]
+ at s = external global [64 x i32]
+
+; Function Attrs: nounwind
+define void @t1() #0 {
+entry:
+; CHECK-LABEL: t1:
+; We use '[rl0-9]+' to allow 'r0'..'r12', 'lr'
+; CHECK: movt [[LB:[rl0-9]+]], :upper16:d
+; CHECK: movt [[SB:[rl0-9]+]], :upper16:s
+; CHECK-NOT: ldm
+; CHECK-NOT: stm
+    tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([64 x i32]* @s to i8*), i8* bitcast ([64 x i32]* @d to i8*), i32 17, i32 4, i1 false)
+    ret void
+}
+
+; Function Attrs: nounwind
+define void @t2() #0 {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: movt [[LB:[rl0-9]+]], :upper16:d
+; CHECK: movt [[SB:[rl0-9]+]], :upper16:s
+; CHECK-NOT: ldm
+; CHECK-NOT: stm
+    tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([64 x i32]* @s to i8*), i8* bitcast ([64 x i32]* @d to i8*), i32 15, i32 4, i1 false)
+    ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1

``````````

</details>


https://github.com/llvm/llvm-project/pull/106378


More information about the llvm-commits mailing list