[llvm] [PowerPC] Peephole address calculation in TOC memops (PR #76488)

Sean Fertile via llvm-commits llvm-commits at lists.llvm.org
Tue May 14 07:23:49 PDT 2024


================
@@ -7662,238 +7662,212 @@ static void foldADDIForLocalExecAccesses(SDNode *N, SelectionDAG *DAG) {
     DAG->RemoveDeadNode(InitialADDI.getNode());
 }
 
-void PPCDAGToDAGISel::PeepholePPC64() {
-  SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
-
-  while (Position != CurDAG->allnodes_begin()) {
-    SDNode *N = &*--Position;
-    // Skip dead nodes and any non-machine opcodes.
-    if (N->use_empty() || !N->isMachineOpcode())
-      continue;
+static bool isValidMemOp(SDNode *N, bool &IsLoad, MaybeAlign &ExtraAlign) {
+  switch (N->getMachineOpcode()) {
+  default:
+    return false;
+  // Global must be word-aligned for LD, STD, LWA.
+  case PPC::LWA:
+  case PPC::LD:
+  case PPC::DFLOADf64:
+  case PPC::DFLOADf32:
+    ExtraAlign = Align(4);
+    [[fallthrough]];
+  case PPC::LBZ:
+  case PPC::LBZ8:
+  case PPC::LFD:
+  case PPC::LFS:
+  case PPC::LHA:
+  case PPC::LHA8:
+  case PPC::LHZ:
+  case PPC::LHZ8:
+  case PPC::LWZ:
+  case PPC::LWZ8:
+    IsLoad = true;
+    return true;
+  case PPC::STD:
+  case PPC::DFSTOREf64:
+  case PPC::DFSTOREf32:
+    ExtraAlign = Align(4);
+    [[fallthrough]];
+  case PPC::STB:
+  case PPC::STB8:
+  case PPC::STFD:
+  case PPC::STFS:
+  case PPC::STH:
+  case PPC::STH8:
+  case PPC::STW:
+  case PPC::STW8:
+    return true;
+  }
+}
 
-    if (isVSXSwap(SDValue(N, 0)))
-      reduceVSXSwap(N, CurDAG);
+static bool isMemBaseCombinable(SDValue Base) {
+  if (!Base.isMachineOpcode())
+    return false;
+  switch (Base.getMachineOpcode()) {
+  default:
+    return false;
+  case PPC::ADDI8:
+  case PPC::ADDI:
+  case PPC::ADDIdtprelL:
+  case PPC::ADDItlsldL:
+  case PPC::ADDItocL8:
+  case PPC::ADDItoc:
+  case PPC::ADDItoc8:
+    return true;
+  }
+}
 
-    // This optimization is performed for non-TOC-based local-exec accesses.
-    foldADDIForLocalExecAccesses(N, CurDAG);
+static void peepholeMemOffset(SDNode *N, SelectionDAG *DAG,
+                              const PPCSubtarget *Subtarget) {
+  // TODO: Enable for AIX 32-bit.
+  if (!Subtarget->isPPC64())
+    return;
 
-    unsigned FirstOp;
-    unsigned StorageOpcode = N->getMachineOpcode();
-    bool RequiresMod4Offset = false;
+  bool IsLoad = false;
+  MaybeAlign ExtraAlign;
+  if (!isValidMemOp(N, IsLoad, ExtraAlign))
+    return;
 
-    switch (StorageOpcode) {
-    default: continue;
+  SDValue MemBase = N->getOperand(IsLoad ? 1 : 2);
+  if (!isMemBaseCombinable(MemBase))
+    return;
 
-    case PPC::LWA:
-    case PPC::LD:
-    case PPC::DFLOADf64:
-    case PPC::DFLOADf32:
-      RequiresMod4Offset = true;
-      [[fallthrough]];
-    case PPC::LBZ:
-    case PPC::LBZ8:
-    case PPC::LFD:
-    case PPC::LFS:
-    case PPC::LHA:
-    case PPC::LHA8:
-    case PPC::LHZ:
-    case PPC::LHZ8:
-    case PPC::LWZ:
-    case PPC::LWZ8:
-      FirstOp = 0;
-      break;
+  // Only additions with constant offsets will be folded.
+  auto *MemOffset = dyn_cast<ConstantSDNode>(N->getOperand(IsLoad ? 0 : 1));
+  if (!MemOffset)
+    return;
 
-    case PPC::STD:
-    case PPC::DFSTOREf64:
-    case PPC::DFSTOREf32:
-      RequiresMod4Offset = true;
-      [[fallthrough]];
-    case PPC::STB:
-    case PPC::STB8:
-    case PPC::STFD:
-    case PPC::STFS:
-    case PPC::STH:
-    case PPC::STH8:
-    case PPC::STW:
-    case PPC::STW8:
-      FirstOp = 1;
-      break;
-    }
+  SDValue ImmOp, RegOp;
+  unsigned BaseOpc = MemBase.getMachineOpcode();
+  assert(MemBase.getNumOperands() == 2 && "Invalid base of memop with offset!");
 
-    // If this is a load or store with a zero offset, or within the alignment,
-    // we may be able to fold an add-immediate into the memory operation.
-    // The check against alignment is below, as it can't occur until we check
-    // the arguments to N
-    if (!isa<ConstantSDNode>(N->getOperand(FirstOp)))
-      continue;
+  // ADDItoc and ADDItoc8 ('la') puts the register at the second operand.
+  if (BaseOpc == PPC::ADDItoc || BaseOpc == PPC::ADDItoc8) {
+    ImmOp = MemBase.getOperand(0);
+    RegOp = MemBase.getOperand(1);
+  } else {
+    ImmOp = MemBase.getOperand(1);
+    RegOp = MemBase.getOperand(0);
+  }
 
-    SDValue Base = N->getOperand(FirstOp + 1);
-    if (!Base.isMachineOpcode())
-      continue;
+  MaybeAlign ImmAlign;
+  if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOp))
+    ImmAlign = GA->getGlobal()->getPointerAlignment(DAG->getDataLayout());
+  else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOp))
+    ImmAlign = CP->getAlign();
 
-    unsigned Flags = 0;
-    bool ReplaceFlags = true;
+  if (ImmAlign && ExtraAlign && ImmAlign.value() < ExtraAlign.value())
+    return;
 
-    // When the feeding operation is an add-immediate of some sort,
-    // determine whether we need to add relocation information to the
-    // target flags on the immediate operand when we fold it into the
-    // load instruction.
-    //
-    // For something like ADDItocL8, the relocation information is
-    // inferred from the opcode; when we process it in the AsmPrinter,
-    // we add the necessary relocation there.  A load, though, can receive
-    // relocation from various flavors of ADDIxxx, so we need to carry
-    // the relocation information in the target flags.
-    switch (Base.getMachineOpcode()) {
-    default: continue;
-
-    case PPC::ADDI8:
-    case PPC::ADDI:
-      // In some cases (such as TLS) the relocation information
-      // is already in place on the operand, so copying the operand
-      // is sufficient.
-      ReplaceFlags = false;
-      break;
-    case PPC::ADDIdtprelL:
-      Flags = PPCII::MO_DTPREL_LO;
-      break;
-    case PPC::ADDItlsldL:
-      Flags = PPCII::MO_TLSLD_LO;
-      break;
-    case PPC::ADDItocL8:
-      Flags = PPCII::MO_TOC_LO;
-      break;
+  // On PPC64, the TOC base pointer is guaranteed by the ABI only to have
+  // 8-byte alignment, and so we can only use offsets less than 8 (otherwise,
+  // we might have needed different @ha relocation values for the offset
+  // pointers).
+  int MaxDisplacement = 7;
+  if (ImmAlign && ImmAlign.value().value() < 8)
+    MaxDisplacement = (int)ImmAlign.value().value() - 1;
+
+  // If addis also contributes to TOC relocation, it also needs to be updated.
+  bool UpdateHaBase = false;
+  SDValue HaBase = MemBase.getOperand(0);
+  int64_t Offset = MemOffset->getSExtValue();
+
+  // Some flags in addition needs to be carried to new memop.
+  PPCII::TOF NewOpFlags = PPCII::MO_NO_FLAG;
+  if (BaseOpc == PPC::ADDIdtprelL)
+    NewOpFlags = PPCII::MO_DTPREL_LO;
+  else if (BaseOpc == PPC::ADDItlsldL)
+    NewOpFlags = PPCII::MO_TLSLD_LO;
+  else if (BaseOpc == PPC::ADDItocL8)
+    NewOpFlags = PPCII::MO_TOC_LO;
+
+  if (NewOpFlags) {
+    if (Offset < 0 || Offset > MaxDisplacement) {
+      // Check base opcode and its uses, quit if it has multiple uses.
+      if (MemBase.getMachineOpcode() != PPC::ADDItocL8 || !MemBase.hasOneUse() ||
+          !HaBase.isMachineOpcode() || !HaBase.hasOneUse() ||
+          HaBase.getMachineOpcode() != PPC::ADDIStocHA8 ||
+          HaBase.getOperand(1) != ImmOp)
+        return;
+      UpdateHaBase = true;
     }
 
-    SDValue ImmOpnd = Base.getOperand(1);
-
-    // On PPC64, the TOC base pointer is guaranteed by the ABI only to have
-    // 8-byte alignment, and so we can only use offsets less than 8 (otherwise,
-    // we might have needed different @ha relocation values for the offset
-    // pointers).
-    int MaxDisplacement = 7;
-    if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) {
-      const GlobalValue *GV = GA->getGlobal();
-      Align Alignment = GV->getPointerAlignment(CurDAG->getDataLayout());
-      MaxDisplacement = std::min((int)Alignment.value() - 1, MaxDisplacement);
+    if (const auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOp)) {
+      // We can't perform this optimization for data whose alignment is
+      // insufficient for the instruction encoding.
+      if (ImmAlign && ImmAlign.value() < Align(4) &&
+          (ExtraAlign || (Offset % 4) != 0))
+        return;
+      ImmOp = DAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(ImmOp),
+                                          MVT::i64, Offset, NewOpFlags);
+    } else if (const auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOp)) {
+      ImmOp = DAG->getTargetConstantPool(CP->getConstVal(), MVT::i64,
+                                         CP->getAlign(), Offset, NewOpFlags);
     }
+  } else {
+    if (ImmAlign && ExtraAlign && ImmAlign.value() < ExtraAlign.value())
+      return;
+    if (auto *C = dyn_cast<ConstantSDNode>(ImmOp)) {
+      Offset += C->getSExtValue();
 
-    bool UpdateHBase = false;
-    SDValue HBase = Base.getOperand(0);
-
-    int Offset = N->getConstantOperandVal(FirstOp);
-    if (ReplaceFlags) {
-      if (Offset < 0 || Offset > MaxDisplacement) {
-        // If we have a addi(toc at l)/addis(toc at ha) pair, and the addis has only
-        // one use, then we can do this for any offset, we just need to also
-        // update the offset (i.e. the symbol addend) on the addis also.
-        if (Base.getMachineOpcode() != PPC::ADDItocL8)
-          continue;
-
-        if (!HBase.isMachineOpcode() ||
-            HBase.getMachineOpcode() != PPC::ADDIStocHA8)
-          continue;
-
-        if (!Base.hasOneUse() || !HBase.hasOneUse())
-          continue;
-
-        SDValue HImmOpnd = HBase.getOperand(1);
-        if (HImmOpnd != ImmOpnd)
-          continue;
-
-        UpdateHBase = true;
-      }
-    } else {
-      // Global addresses can be folded, but only if they are sufficiently
-      // aligned.
-      if (RequiresMod4Offset) {
-        if (GlobalAddressSDNode *GA =
-                dyn_cast<GlobalAddressSDNode>(ImmOpnd)) {
-          const GlobalValue *GV = GA->getGlobal();
-          Align Alignment = GV->getPointerAlignment(CurDAG->getDataLayout());
-          if (Alignment < 4)
-            continue;
-        }
+      if ((Offset % ExtraAlign.valueOrOne().value()) != 0 || !isInt<16>(Offset))
----------------
mandlebug wrote:

Split the `!isInt<16>(Offset)` check out into its own early return. 

https://github.com/llvm/llvm-project/pull/76488


More information about the llvm-commits mailing list