[llvm] b69919b - [GVN LoadPRE] Add an option to disable splitting backedge

Serguei Katkov via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 26 22:32:33 PDT 2020


Author: Serguei Katkov
Date: 2020-10-27T11:59:52+07:00
New Revision: b69919b537fffcc5b6aaa6d78e2c039725a81f5b

URL: https://github.com/llvm/llvm-project/commit/b69919b537fffcc5b6aaa6d78e2c039725a81f5b
DIFF: https://github.com/llvm/llvm-project/commit/b69919b537fffcc5b6aaa6d78e2c039725a81f5b.diff

LOG: [GVN LoadPRE] Add an option to disable splitting backedge

GVN Load PRE can split the backedge causing breaking the loop structure where the latch
contains the conditional branch with for example induction variable.

Different optimizations expect this form of the loop, so it is better to preserve it for some time.
This CL adds an option to control an ability to split backedge.

Default value is true so technically it is NFC and current behavior is not changed.

Reviewers: fedor.sergeev, mkazantsev, nikic, reames, fhahn
Reviewed By: mkazasntsev
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D89854

Added: 
    llvm/test/Transforms/GVN/PRE/load-pre-split-backedge.ll

Modified: 
    llvm/include/llvm/Transforms/Scalar/GVN.h
    llvm/lib/Passes/PassBuilder.cpp
    llvm/lib/Transforms/Scalar/GVN.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h
index be3804f95c3e..92013dea0bff 100644
--- a/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -73,6 +73,7 @@ struct GVNOptions {
   Optional<bool> AllowPRE = None;
   Optional<bool> AllowLoadPRE = None;
   Optional<bool> AllowLoadInLoopPRE = None;
+  Optional<bool> AllowLoadPRESplitBackedge = None;
   Optional<bool> AllowMemDep = None;
 
   GVNOptions() = default;
@@ -94,6 +95,12 @@ struct GVNOptions {
     return *this;
   }
 
+  /// Enables or disables PRE of loads in GVN.
+  GVNOptions &setLoadPRESplitBackedge(bool LoadPRESplitBackedge) {
+    AllowLoadPRESplitBackedge = LoadPRESplitBackedge;
+    return *this;
+  }
+
   /// Enables or disables use of MemDepAnalysis.
   GVNOptions &setMemDep(bool MemDep) {
     AllowMemDep = MemDep;
@@ -130,6 +137,7 @@ class GVN : public PassInfoMixin<GVN> {
   bool isPREEnabled() const;
   bool isLoadPREEnabled() const;
   bool isLoadInLoopPREEnabled() const;
+  bool isLoadPRESplitBackedgeEnabled() const;
   bool isMemDepEnabled() const;
 
   /// This class holds the mapping between values and value numbers.  It is used

diff  --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 9a19ba0166b3..15a6b5ed717b 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -1926,6 +1926,8 @@ Expected<GVNOptions> parseGVNOptions(StringRef Params) {
       Result.setPRE(Enable);
     } else if (ParamName == "load-pre") {
       Result.setLoadPRE(Enable);
+    } else if (ParamName == "split-backedge-load-pre") {
+      Result.setLoadPRESplitBackedge(Enable);
     } else if (ParamName == "memdep") {
       Result.setMemDep(Enable);
     } else {

diff  --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 4cb95425678c..3615316058e5 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -110,6 +110,9 @@ static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
                                             cl::init(true));
+static cl::opt<bool>
+GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
+                                cl::init(true));
 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
 
 static cl::opt<uint32_t> MaxNumDeps(
@@ -639,6 +642,11 @@ bool GVN::isLoadInLoopPREEnabled() const {
   return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
 }
 
+bool GVN::isLoadPRESplitBackedgeEnabled() const {
+  return Options.AllowLoadPRESplitBackedge.getValueOr(
+      GVNEnableSplitBackedgeInLoadPRE);
+}
+
 bool GVN::isMemDepEnabled() const {
   return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
 }
@@ -1222,6 +1230,16 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
         return false;
       }
 
+      // Do not split backedge as it will break the canonical loop form.
+      if (!isLoadPRESplitBackedgeEnabled())
+        if (DT->dominates(LoadBB, Pred)) {
+          LLVM_DEBUG(
+              dbgs()
+              << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
+              << Pred->getName() << "': " << *LI << '\n');
+          return false;
+        }
+
       CriticalEdgePred.push_back(Pred);
     } else {
       // Only add the predecessors that will not be split for now.

diff  --git a/llvm/test/Transforms/GVN/PRE/load-pre-split-backedge.ll b/llvm/test/Transforms/GVN/PRE/load-pre-split-backedge.ll
new file mode 100644
index 000000000000..03730d225c81
--- /dev/null
+++ b/llvm/test/Transforms/GVN/PRE/load-pre-split-backedge.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -basic-aa -gvn -enable-split-backedge-in-load-pre=true < %s | FileCheck %s --check-prefix=ON
+; RUN: opt -S -basic-aa -gvn -enable-split-backedge-in-load-pre=false < %s | FileCheck %s --check-prefix=OFF
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+
+define i32 @test(i1 %b, i1 %c, i32* noalias %p, i32* noalias %q) {
+; ON-LABEL: @test(
+; ON-NEXT:  entry:
+; ON-NEXT:    [[Y1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; ON-NEXT:    call void @use(i32 [[Y1]])
+; ON-NEXT:    br label [[HEADER:%.*]]
+; ON:       header:
+; ON-NEXT:    [[Y:%.*]] = phi i32 [ [[Y_PRE:%.*]], [[SKIP_HEADER_CRIT_EDGE:%.*]] ], [ [[Y]], [[HEADER]] ], [ [[Y1]], [[ENTRY:%.*]] ]
+; ON-NEXT:    call void @use(i32 [[Y]])
+; ON-NEXT:    br i1 [[B:%.*]], label [[SKIP:%.*]], label [[HEADER]]
+; ON:       skip:
+; ON-NEXT:    call void @clobber(i32* [[P]], i32* [[Q:%.*]])
+; ON-NEXT:    br i1 [[C:%.*]], label [[SKIP_HEADER_CRIT_EDGE]], label [[EXIT:%.*]]
+; ON:       skip.header_crit_edge:
+; ON-NEXT:    [[Y_PRE]] = load i32, i32* [[P]], align 4
+; ON-NEXT:    br label [[HEADER]]
+; ON:       exit:
+; ON-NEXT:    ret i32 [[Y]]
+;
+; OFF-LABEL: @test(
+; OFF-NEXT:  entry:
+; OFF-NEXT:    [[Y1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; OFF-NEXT:    call void @use(i32 [[Y1]])
+; OFF-NEXT:    br label [[HEADER:%.*]]
+; OFF:       header:
+; OFF-NEXT:    [[Y:%.*]] = load i32, i32* [[P]], align 4
+; OFF-NEXT:    call void @use(i32 [[Y]])
+; OFF-NEXT:    br i1 [[B:%.*]], label [[SKIP:%.*]], label [[HEADER]]
+; OFF:       skip:
+; OFF-NEXT:    call void @clobber(i32* [[P]], i32* [[Q:%.*]])
+; OFF-NEXT:    br i1 [[C:%.*]], label [[HEADER]], label [[EXIT:%.*]]
+; OFF:       exit:
+; OFF-NEXT:    ret i32 [[Y]]
+;
+entry:
+  %y1 = load i32, i32* %p
+  call void @use(i32 %y1)
+  br label %header
+header:
+  %y = load i32, i32* %p
+  call void @use(i32 %y)
+  br i1 %b, label %skip, label %header
+skip:
+  call void @clobber(i32* %p, i32* %q)
+  br i1 %c, label %header, label %exit
+exit:
+  ret i32 %y
+}
+
+declare void @use(i32) readonly
+declare void @clobber(i32* %p, i32* %q)


        


More information about the llvm-commits mailing list