[llvm] d1f9b21 - [AggressiveInstCombine] Add `AssumptionCache` to aggressive instcombine

Anton Afanasyev via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 7 06:46:01 PDT 2021


Author: Anton Afanasyev
Date: 2021-09-07T16:45:00+03:00
New Revision: d1f9b216776d8d9fb85fb17b52066a1246e18a10

URL: https://github.com/llvm/llvm-project/commit/d1f9b216776d8d9fb85fb17b52066a1246e18a10
DIFF: https://github.com/llvm/llvm-project/commit/d1f9b216776d8d9fb85fb17b52066a1246e18a10.diff

LOG: [AggressiveInstCombine] Add `AssumptionCache` to aggressive instcombine

Add support for @llvm.assume() to TruncInstCombine allowing
optimizations based on these intrinsics while computing known bits.

Added: 
    

Modified: 
    llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
    llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
    llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
    llvm/test/Transforms/AggressiveInstCombine/trunc_assume.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 85abbf6d86e07..1fe6e39c1d07b 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -18,6 +18,7 @@
 #include "llvm-c/Transforms/AggressiveInstCombine.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/BasicAliasAnalysis.h"
 #include "llvm/Analysis/GlobalsModRef.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
@@ -394,10 +395,11 @@ static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
 
 /// This is the entry point for all transforms. Pass manager 
diff erences are
 /// handled in the callers of this function.
-static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) {
+static bool runImpl(Function &F, AssumptionCache &AC, TargetLibraryInfo &TLI,
+                    DominatorTree &DT) {
   bool MadeChange = false;
   const DataLayout &DL = F.getParent()->getDataLayout();
-  TruncInstCombine TIC(TLI, DL, DT);
+  TruncInstCombine TIC(AC, TLI, DL, DT);
   MadeChange |= TIC.run(F);
   MadeChange |= foldUnusualPatterns(F, DT);
   return MadeChange;
@@ -406,6 +408,7 @@ static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) {
 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
     AnalysisUsage &AU) const {
   AU.setPreservesCFG();
+  AU.addRequired<AssumptionCacheTracker>();
   AU.addRequired<DominatorTreeWrapperPass>();
   AU.addRequired<TargetLibraryInfoWrapperPass>();
   AU.addPreserved<AAResultsWrapperPass>();
@@ -415,16 +418,18 @@ void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
 }
 
 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
+  auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
-  return runImpl(F, TLI, DT);
+  return runImpl(F, AC, TLI, DT);
 }
 
 PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
                                                  FunctionAnalysisManager &AM) {
+  auto &AC = AM.getResult<AssumptionAnalysis>(F);
   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
-  if (!runImpl(F, TLI, DT)) {
+  if (!runImpl(F, AC, TLI, DT)) {
     // No changes, all analyses are preserved.
     return PreservedAnalyses::all();
   }
@@ -438,6 +443,7 @@ char AggressiveInstCombinerLegacyPass::ID = 0;
 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
                       "aggressive-instcombine",
                       "Combine pattern based expressions", false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",

diff  --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
index afa4f06d3e795..5d69e26d6ecc0 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
@@ -41,16 +41,18 @@ using namespace llvm;
 //===----------------------------------------------------------------------===//
 
 namespace llvm {
-  class DataLayout;
-  class DominatorTree;
-  class Function;
-  class Instruction;
-  class TargetLibraryInfo;
-  class TruncInst;
-  class Type;
-  class Value;
+class AssumptionCache;
+class DataLayout;
+class DominatorTree;
+class Function;
+class Instruction;
+class TargetLibraryInfo;
+class TruncInst;
+class Type;
+class Value;
 
 class TruncInstCombine {
+  AssumptionCache &AC;
   TargetLibraryInfo &TLI;
   const DataLayout &DL;
   const DominatorTree &DT;
@@ -77,9 +79,9 @@ class TruncInstCombine {
   MapVector<Instruction *, Info> InstInfoMap;
 
 public:
-  TruncInstCombine(TargetLibraryInfo &TLI, const DataLayout &DL,
-                   const DominatorTree &DT)
-      : TLI(TLI), DL(DL), DT(DT), CurrentTruncInst(nullptr) {}
+  TruncInstCombine(AssumptionCache &AC, TargetLibraryInfo &TLI,
+                   const DataLayout &DL, const DominatorTree &DT)
+      : AC(AC), TLI(TLI), DL(DL), DT(DT), CurrentTruncInst(nullptr) {}
 
   /// Perform TruncInst pattern optimization on given function.
   bool run(Function &F);
@@ -107,13 +109,15 @@ class TruncInstCombine {
   Type *getBestTruncatedType();
 
   KnownBits computeKnownBits(const Value *V) const {
-    return llvm::computeKnownBits(V, DL, /*Depth=*/0, /*AC=*/nullptr,
-                                  /*CtxI=*/nullptr, &DT);
+    return llvm::computeKnownBits(V, DL, /*Depth=*/0, &AC,
+                                  /*CtxI=*/cast<Instruction>(CurrentTruncInst),
+                                  &DT);
   }
 
   unsigned ComputeNumSignBits(const Value *V) const {
-    return llvm::ComputeNumSignBits(V, DL, /*Depth=*/0, /*AC=*/nullptr,
-                                    /*CtxI=*/nullptr, &DT);
+    return llvm::ComputeNumSignBits(
+        V, DL, /*Depth=*/0, &AC, /*CtxI=*/cast<Instruction>(CurrentTruncInst),
+        &DT);
   }
 
   /// Given a \p V value and a \p SclTy scalar type return the generated reduced

diff  --git a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
index 25ca5885f8315..e3a785aee1570 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
@@ -29,7 +29,6 @@
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/IRBuilder.h"

diff  --git a/llvm/test/Transforms/AggressiveInstCombine/trunc_assume.ll b/llvm/test/Transforms/AggressiveInstCombine/trunc_assume.ll
index 021c14162cf98..1c2a14323dd78 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/trunc_assume.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/trunc_assume.ll
@@ -5,11 +5,8 @@ define i16 @trunc_shl(i16 %x, i16 %y) {
 ; CHECK-LABEL: @trunc_shl(
 ; CHECK-NEXT:    [[CMP0:%.*]] = icmp ult i16 [[Y:%.*]], 16
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP0]])
-; CHECK-NEXT:    [[ZEXTX:%.*]] = zext i16 [[X:%.*]] to i32
-; CHECK-NEXT:    [[ZEXTY:%.*]] = zext i16 [[Y]] to i32
-; CHECK-NEXT:    [[I0:%.*]] = shl i32 [[ZEXTX]], [[ZEXTY]]
-; CHECK-NEXT:    [[R:%.*]] = trunc i32 [[I0]] to i16
-; CHECK-NEXT:    ret i16 [[R]]
+; CHECK-NEXT:    [[I0:%.*]] = shl i16 [[X:%.*]], [[Y]]
+; CHECK-NEXT:    ret i16 [[I0]]
 ;
   %cmp0 = icmp ult i16 %y, 16
   call void @llvm.assume(i1 %cmp0)
@@ -28,11 +25,8 @@ define i16 @trunc_lshr(i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[CMP1:%.*]] = icmp ult i16 [[Y:%.*]], 16
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP0]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP1]])
-; CHECK-NEXT:    [[ZEXTX:%.*]] = zext i16 [[X]] to i32
-; CHECK-NEXT:    [[ZEXTY:%.*]] = zext i16 [[Y]] to i32
-; CHECK-NEXT:    [[I0:%.*]] = lshr i32 [[ZEXTX]], [[ZEXTY]]
-; CHECK-NEXT:    [[R:%.*]] = trunc i32 [[I0]] to i16
-; CHECK-NEXT:    ret i16 [[R]]
+; CHECK-NEXT:    [[I0:%.*]] = lshr i16 [[X]], [[Y]]
+; CHECK-NEXT:    ret i16 [[I0]]
 ;
   %cmp0 = icmp ult i16 %x, 65536
   %cmp1 = icmp ult i16 %y, 16
@@ -55,11 +49,8 @@ define i16 @trunc_ashr(i16 %x, i16 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP0]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP1]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP2]])
-; CHECK-NEXT:    [[ZEXTX:%.*]] = sext i16 [[X]] to i32
-; CHECK-NEXT:    [[ZEXTY:%.*]] = sext i16 [[Y]] to i32
-; CHECK-NEXT:    [[I0:%.*]] = ashr i32 [[ZEXTX]], [[ZEXTY]]
-; CHECK-NEXT:    [[R:%.*]] = trunc i32 [[I0]] to i16
-; CHECK-NEXT:    ret i16 [[R]]
+; CHECK-NEXT:    [[I0:%.*]] = ashr i16 [[X]], [[Y]]
+; CHECK-NEXT:    ret i16 [[I0]]
 ;
   %cmp0 = icmp slt i16 %x, 32767
   %cmp1 = icmp sge i16 %x, -32768


        


More information about the llvm-commits mailing list