[llvm] r253587 - [AArch64] Refactoring aarch64-ldst-opt. NCF.

Jun Bum Lim via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 19 10:41:27 PST 2015


Author: junbuml
Date: Thu Nov 19 12:41:27 2015
New Revision: 253587

URL: http://llvm.org/viewvc/llvm-project?rev=253587&view=rev
Log:
 [AArch64] Refactoring aarch64-ldst-opt. NCF.

Summary :
 * Rename isSmallTypeLdMerge() to isNarrowLoad().
 * Rename NumSmallTypeMerged to NumNarrowTypePromoted.
 * Use Subtarget defined as a member variable.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp

Modified: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp?rev=253587&r1=253586&r2=253587&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp Thu Nov 19 12:41:27 2015
@@ -41,7 +41,7 @@ STATISTIC(NumPostFolded, "Number of post
 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
 STATISTIC(NumUnscaledPairCreated,
           "Number of load/store from unscaled generated");
-STATISTIC(NumSmallTypeMerged, "Number of small type loads merged");
+STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
 
 static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
                                    cl::init(20), cl::Hidden);
@@ -189,7 +189,7 @@ static unsigned getBitExtrOpcode(Machine
   }
 }
 
-static bool isSmallTypeLdMerge(unsigned Opc) {
+static bool isNarrowLoad(unsigned Opc) {
   switch (Opc) {
   default:
     return false;
@@ -205,8 +205,8 @@ static bool isSmallTypeLdMerge(unsigned
   }
 }
 
-static bool isSmallTypeLdMerge(MachineInstr *MI) {
-  return isSmallTypeLdMerge(MI->getOpcode());
+static bool isNarrowLoad(MachineInstr *MI) {
+  return isNarrowLoad(MI->getOpcode());
 }
 
 // Scaling factor for unscaled load or store.
@@ -582,7 +582,7 @@ AArch64LoadStoreOpt::mergePairedInsns(Ma
 
   int OffsetImm = getLdStOffsetOp(RtMI).getImm();
 
-  if (isSmallTypeLdMerge(Opc)) {
+  if (isNarrowLoad(Opc)) {
     // Change the scaled offset from small to large type.
     if (!IsUnscaled) {
       assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
@@ -840,8 +840,7 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
   // range, plus allow an extra one in case we find a later insn that matches
   // with Offset-1)
   int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
-  if (!isSmallTypeLdMerge(Opc) &&
-      !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
+  if (!isNarrowLoad(Opc) && !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
     return E;
 
   // Track which registers have been modified and used between the first insn
@@ -900,15 +899,15 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
         // If the resultant immediate offset of merging these instructions
         // is out of range for a pairwise instruction, bail and keep looking.
         bool MIIsUnscaled = isUnscaledLdSt(MI);
-        bool IsSmallTypeLd = isSmallTypeLdMerge(MI->getOpcode());
-        if (!IsSmallTypeLd &&
+        bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
+        if (!IsNarrowLoad &&
             !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
           trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
           MemInsns.push_back(MI);
           continue;
         }
 
-        if (IsSmallTypeLd) {
+        if (IsNarrowLoad) {
           // If the alignment requirements of the larger type scaled load
           // instruction can't express the scaled offset of the smaller type
           // input, bail and keep looking.
@@ -1227,8 +1226,8 @@ bool AArch64LoadStoreOpt::tryToMergeLdSt
   LdStPairFlags Flags;
   MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit);
   if (Paired != E) {
-    if (isSmallTypeLdMerge(MI)) {
-      ++NumSmallTypeMerged;
+    if (isNarrowLoad(MI)) {
+      ++NumNarrowLoadsPromoted;
     } else {
       ++NumPairCreated;
       if (isUnscaledLdSt(MI))
@@ -1463,14 +1462,12 @@ bool AArch64LoadStoreOpt::optimizeBlock(
 }
 
 bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
-  const AArch64Subtarget *SubTarget =
-      &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
-  bool ProfitableArch = SubTarget->isCortexA57();
+  bool ProfitableArch = Subtarget->isCortexA57();
   // FIXME: The benefit from converting narrow loads into a wider load could be
   // microarchitectural as it assumes that a single load with two bitfield
   // extracts is cheaper than two narrow loads. Currently, this conversion is
   // enabled only in cortex-a57 on which performance benefits were verified.
-  return ProfitableArch & (!SubTarget->requiresStrictAlign());
+  return ProfitableArch && !Subtarget->requiresStrictAlign();
 }
 
 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {




More information about the llvm-commits mailing list