[llvm] r298092 - [AArch64] Use alias analysis in the load/store optimization pass.

Chad Rosier via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 17 07:19:55 PDT 2017


Author: mcrosier
Date: Fri Mar 17 09:19:55 2017
New Revision: 298092

URL: http://llvm.org/viewvc/llvm-project?rev=298092&view=rev
Log:
[AArch64] Use alias analysis in the load/store optimization pass.

This allows the optimization to rearrange loads and stores more aggressively.

Differential Revision: http://reviews.llvm.org/D30903

Added:
    llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp

Modified: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp?rev=298092&r1=298091&r2=298092&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp Fri Mar 17 09:19:55 2017
@@ -93,6 +93,7 @@ struct AArch64LoadStoreOpt : public Mach
     initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
   }
 
+  AliasAnalysis *AA;
   const AArch64InstrInfo *TII;
   const TargetRegisterInfo *TRI;
   const AArch64Subtarget *Subtarget;
@@ -100,6 +101,11 @@ struct AArch64LoadStoreOpt : public Mach
   // Track which registers have been modified and used.
   BitVector ModifiedRegs, UsedRegs;
 
+  virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<AAResultsWrapperPass>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
   // Scan the instructions looking for a load/store that can be combined
   // with the current instruction into a load/store pair.
   // Return the matching instruction if one is found, else MBB->end().
@@ -936,7 +942,7 @@ static int alignTo(int Num, int PowOf2)
 }
 
 static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
-                     const AArch64InstrInfo *TII) {
+                     AliasAnalysis *AA) {
   // One of the instructions must modify memory.
   if (!MIa.mayStore() && !MIb.mayStore())
     return false;
@@ -945,14 +951,14 @@ static bool mayAlias(MachineInstr &MIa,
   if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
     return false;
 
-  return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
+  return MIa.mayAlias(AA, MIb, /*UseTBAA*/false);
 }
 
 static bool mayAlias(MachineInstr &MIa,
                      SmallVectorImpl<MachineInstr *> &MemInsns,
-                     const AArch64InstrInfo *TII) {
+                     AliasAnalysis *AA) {
   for (MachineInstr *MIb : MemInsns)
-    if (mayAlias(MIa, *MIb, TII))
+    if (mayAlias(MIa, *MIb, AA))
       return true;
 
   return false;
@@ -1010,7 +1016,7 @@ bool AArch64LoadStoreOpt::findMatchingSt
       return false;
 
     // If we encounter a store aliased with the load, return early.
-    if (MI.mayStore() && mayAlias(LoadMI, MI, TII))
+    if (MI.mayStore() && mayAlias(LoadMI, MI, AA))
       return false;
   } while (MBBI != B && Count < Limit);
   return false;
@@ -1180,7 +1186,7 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
         // first.
         if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
             !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
-            !mayAlias(MI, MemInsns, TII)) {
+            !mayAlias(MI, MemInsns, AA)) {
           Flags.setMergeForward(false);
           return MBBI;
         }
@@ -1191,7 +1197,7 @@ AArch64LoadStoreOpt::findMatchingInsn(Ma
         // into the second.
         if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
             !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
-            !mayAlias(FirstMI, MemInsns, TII)) {
+            !mayAlias(FirstMI, MemInsns, AA)) {
           Flags.setMergeForward(true);
           return MBBI;
         }
@@ -1734,6 +1740,7 @@ bool AArch64LoadStoreOpt::runOnMachineFu
   Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
   TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
   TRI = Subtarget->getRegisterInfo();
+  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
 
   // Resize the modified and used register bitfield trackers.  We do this once
   // per function and then clear the bitfield each time we optimize a load or

Added: llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir?rev=298092&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir (added)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir Fri Mar 17 09:19:55 2017
@@ -0,0 +1,30 @@
+# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-ldst-opt %s -verify-machineinstrs -o - | FileCheck %s
+--- |
+  define void @ldr_str_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) {
+  entry:
+    %0 = load i32, i32* %y, align 4
+    store i32 %0, i32* %x, align 4
+    %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1
+    %1 = load i32, i32* %arrayidx2, align 4
+    %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1
+    store i32 %1, i32* %arrayidx3, align 4
+    ret void
+  }
+
+...
+---
+# CHECK-LABEL: name: ldr_str_aa
+# CHECK: %w8, %w9 = LDPWi %x1, 0
+# CHECK: STPWi %w8, %w9, %x0, 0
+name:            ldr_str_aa
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: %x0, %x1
+
+    %w8 = LDRWui %x1, 0 :: (load 4 from %ir.y)
+    STRWui killed %w8, %x0, 0 :: (store 4 into %ir.x)
+    %w9 = LDRWui killed %x1, 1 :: (load 4 from %ir.arrayidx2)
+    STRWui killed %w9, killed %x0, 1 :: (store 4 into %ir.arrayidx3)
+    RET undef %lr
+




More information about the llvm-commits mailing list