[llvm] r371511 - [Alignment][NFC] Use llvm::Align for TargetLowering::getPrefLoopAlignment

Guillaume Chatelet via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 10 05:00:43 PDT 2019


Author: gchatelet
Date: Tue Sep 10 05:00:43 2019
New Revision: 371511

URL: http://llvm.org/viewvc/llvm-project?rev=371511&view=rev
Log:
[Alignment][NFC] Use llvm::Align for TargetLowering::getPrefLoopAlignment

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Reviewed By: courbet

Subscribers: wuzish, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, MaskRay, jsji, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D67386

Modified:
    llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h
    llvm/trunk/include/llvm/CodeGen/TargetLowering.h
    llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
    llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h

Modified: llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h Tue Sep 10 05:00:43 2019
@@ -103,9 +103,9 @@ private:
   using LiveInVector = std::vector<RegisterMaskPair>;
   LiveInVector LiveIns;
 
-  /// Alignment of the basic block. Zero if the basic block does not need to be
-  /// aligned. The alignment is specified as log2(bytes).
-  unsigned LogAlignment = 0;
+  /// Alignment of the basic block. One if the basic block does not need to be
+  /// aligned.
+  llvm::Align Alignment;
 
   /// Indicate that this basic block is entered via an exception handler.
   bool IsEHPad = false;
@@ -374,11 +374,15 @@ public:
 
   /// Return alignment of the basic block. The alignment is specified as
   /// log2(bytes).
-  unsigned getLogAlignment() const { return LogAlignment; }
+  /// FIXME: Remove the Log versions once migration to llvm::Align is over.
+  unsigned getLogAlignment() const { return Log2(Alignment); }
+  llvm::Align getAlignment() const { return Alignment; }
 
   /// Set alignment of the basic block. The alignment is specified as
   /// log2(bytes).
-  void setLogAlignment(unsigned A) { LogAlignment = A; }
+  /// FIXME: Remove the Log versions once migration to llvm::Align is over.
+  void setLogAlignment(unsigned A) { Alignment = llvm::Align(1ULL << A); }
+  void setAlignment(llvm::Align A) { Alignment = A; }
 
   /// Returns true if the block is a landing pad. That is this basic block is
   /// entered via an exception handler.

Modified: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetLowering.h?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h Tue Sep 10 05:00:43 2019
@@ -1593,8 +1593,8 @@ public:
   }
 
   /// Return the preferred loop alignment.
-  virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const {
-    return Log2(PrefLoopAlignment);
+  virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
+    return PrefLoopAlignment;
   }
 
   /// Should loops be aligned even when the function is marked OptSize (but not

Modified: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp Tue Sep 10 05:00:43 2019
@@ -2807,8 +2807,8 @@ void MachineBlockPlacement::alignBlocks(
     if (!L)
       continue;
 
-    unsigned LogAlign = TLI->getPrefLoopLogAlignment(L);
-    if (!LogAlign)
+    const llvm::Align Align = TLI->getPrefLoopAlignment(L);
+    if (Align == 1)
       continue; // Don't care about loop alignment.
 
     // If the block is cold relative to the function entry don't waste space
@@ -2832,7 +2832,7 @@ void MachineBlockPlacement::alignBlocks(
     // Force alignment if all the predecessors are jumps. We already checked
     // that the block isn't cold above.
     if (!LayoutPred->isSuccessor(ChainBB)) {
-      ChainBB->setLogAlignment(LogAlign);
+      ChainBB->setLogAlignment(Log2(Align));
       continue;
     }
 
@@ -2844,7 +2844,7 @@ void MachineBlockPlacement::alignBlocks(
         MBPI->getEdgeProbability(LayoutPred, ChainBB);
     BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
     if (LayoutEdgeFreq <= (Freq * ColdProb))
-      ChainBB->setLogAlignment(LogAlign);
+      ChainBB->setLogAlignment(Log2(Align));
   }
 }
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Tue Sep 10 05:00:43 2019
@@ -10673,15 +10673,15 @@ void SITargetLowering::computeKnownBitsF
   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
 }
 
-unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
-  const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML);
-  const unsigned CacheLineLogAlign = 6; // log2(64)
+llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+  const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
+  const llvm::Align CacheLineAlign = llvm::Align(64);
 
   // Pre-GFX10 target did not benefit from loop alignment
   if (!ML || DisableLoopAlignment ||
       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
       getSubtarget()->hasInstFwdPrefetchBug())
-    return PrefLogAlign;
+    return PrefAlign;
 
   // On GFX10 I$ is 4 x 64 bytes cache lines.
   // By default prefetcher keeps one cache line behind and reads two ahead.
@@ -10695,8 +10695,8 @@ unsigned SITargetLowering::getPrefLoopLo
 
   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
   const MachineBasicBlock *Header = ML->getHeader();
-  if (Header->getLogAlignment() != PrefLogAlign)
-    return Header->getLogAlignment(); // Already processed.
+  if (Header->getAlignment() != PrefAlign)
+    return Header->getAlignment(); // Already processed.
 
   unsigned LoopSize = 0;
   for (const MachineBasicBlock *MBB : ML->blocks()) {
@@ -10708,15 +10708,15 @@ unsigned SITargetLowering::getPrefLoopLo
     for (const MachineInstr &MI : *MBB) {
       LoopSize += TII->getInstSizeInBytes(MI);
       if (LoopSize > 192)
-        return PrefLogAlign;
+        return PrefAlign;
     }
   }
 
   if (LoopSize <= 64)
-    return PrefLogAlign;
+    return PrefAlign;
 
   if (LoopSize <= 128)
-    return CacheLineLogAlign;
+    return CacheLineAlign;
 
   // If any of parent loops is surrounded by prefetch instructions do not
   // insert new for inner loop, which would reset parent's settings.
@@ -10724,7 +10724,7 @@ unsigned SITargetLowering::getPrefLoopLo
     if (MachineBasicBlock *Exit = P->getExitBlock()) {
       auto I = Exit->getFirstNonDebugInstr();
       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
-        return CacheLineLogAlign;
+        return CacheLineAlign;
     }
   }
 
@@ -10741,7 +10741,7 @@ unsigned SITargetLowering::getPrefLoopLo
       .addImm(2); // prefetch 1 line behind PC
   }
 
-  return CacheLineLogAlign;
+  return CacheLineAlign;
 }
 
 LLVM_ATTRIBUTE_UNUSED

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h Tue Sep 10 05:00:43 2019
@@ -379,7 +379,7 @@ public:
                                     unsigned Depth = 0) const override;
   AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
 
-  unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
+  llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
 
   void allocateHSAUserSGPRs(CCState &CCInfo,
                             MachineFunction &MF,

Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Tue Sep 10 05:00:43 2019
@@ -14006,7 +14006,7 @@ void PPCTargetLowering::computeKnownBits
   }
 }
 
-unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
+llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
   switch (Subtarget.getDarwinDirective()) {
   default: break;
   case PPC::DIR_970:
@@ -14027,7 +14027,7 @@ unsigned PPCTargetLowering::getPrefLoopL
       // Actual alignment of the loop will depend on the hotness check and other
       // logic in alignBlocks.
       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
-        return 5;
+        return llvm::Align(32);
     }
 
     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
@@ -14043,13 +14043,13 @@ unsigned PPCTargetLowering::getPrefLoopL
       }
 
     if (LoopSize > 16 && LoopSize <= 32)
-      return 5;
+      return llvm::Align(32);
 
     break;
   }
   }
 
-  return TargetLowering::getPrefLoopLogAlignment(ML);
+  return TargetLowering::getPrefLoopAlignment(ML);
 }
 
 /// getConstraintType - Given a constraint, return the type of

Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h?rev=371511&r1=371510&r2=371511&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h Tue Sep 10 05:00:43 2019
@@ -735,7 +735,7 @@ namespace llvm {
                                        const SelectionDAG &DAG,
                                        unsigned Depth = 0) const override;
 
-    unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
+    llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
 
     bool shouldInsertFencesForAtomic(const Instruction *I) const override {
       return true;




More information about the llvm-commits mailing list