[llvm] bcbd26b - [SCEV] Move ScalarEvolutionExpander.cpp to Transforms/Utils (NFC).
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Wed May 20 02:54:24 PDT 2020
Author: Florian Hahn
Date: 2020-05-20T10:53:40+01:00
New Revision: bcbd26bfe61a35e31b1f7e98b5761a1055273b69
URL: https://github.com/llvm/llvm-project/commit/bcbd26bfe61a35e31b1f7e98b5761a1055273b69
DIFF: https://github.com/llvm/llvm-project/commit/bcbd26bfe61a35e31b1f7e98b5761a1055273b69.diff
LOG: [SCEV] Move ScalarEvolutionExpander.cpp to Transforms/Utils (NFC).
SCEVExpander modifies the underlying function so it is more suitable in
Transforms/Utils, rather than Analysis. This allows using other
transform utils in SCEVExpander.
This patch was originally committed as b8a3c34eee06, but broke the
modules build, as LoopAccessAnalysis was using the Expander.
The code-gen part of LAA was moved to lib/Transforms recently, so this
patch can be landed again.
Reviewers: sanjoy.google, efriedma, reames
Reviewed By: sanjoy.google
Differential Revision: https://reviews.llvm.org/D71537
Added:
llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
Modified:
llvm/lib/Analysis/CMakeLists.txt
llvm/lib/Analysis/LoopAccessAnalysis.cpp
llvm/lib/CodeGen/HardwareLoops.cpp
llvm/lib/Target/ARM/MVETailPredication.cpp
llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
llvm/lib/Transforms/Scalar/LoopPredication.cpp
llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
llvm/lib/Transforms/Utils/CMakeLists.txt
llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
llvm/lib/Transforms/Utils/LoopUtils.cpp
llvm/lib/Transforms/Utils/LoopVersioning.cpp
llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/unittests/Analysis/ScalarEvolutionTest.cpp
llvm/unittests/Transforms/Utils/CMakeLists.txt
llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
Removed:
llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
llvm/lib/Analysis/ScalarEvolutionExpander.cpp
################################################################################
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
similarity index 100%
rename from llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
rename to llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 6308bda6f77f..7ae053f59d1a 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -85,7 +85,6 @@ add_llvm_component_library(LLVMAnalysis
RegionPrinter.cpp
ScalarEvolution.cpp
ScalarEvolutionAliasAnalysis.cpp
- ScalarEvolutionExpander.cpp
ScalarEvolutionNormalization.cpp
StackSafetyAnalysis.cpp
SyncDependenceAnalysis.cpp
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index b0a5e0ba7282..014cabd88fed 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -30,7 +30,6 @@
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
diff --git a/llvm/lib/CodeGen/HardwareLoops.cpp b/llvm/lib/CodeGen/HardwareLoops.cpp
index c1305fce08d1..f064f4ff65b7 100644
--- a/llvm/lib/CodeGen/HardwareLoops.cpp
+++ b/llvm/lib/CodeGen/HardwareLoops.cpp
@@ -20,7 +20,6 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -42,6 +41,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#define DEBUG_TYPE "hardware-loops"
diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp
index ffc090e0cdc6..e6bf433753ff 100644
--- a/llvm/lib/Target/ARM/MVETailPredication.cpp
+++ b/llvm/lib/Target/ARM/MVETailPredication.cpp
@@ -46,18 +46,18 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/TargetPassConfig.h"
-#include "llvm/InitializePasses.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/InitializePasses.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
using namespace llvm;
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index ffaf71e23690..2c1e0cadd9ee 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -20,7 +20,6 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -57,6 +56,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include <algorithm>
#include <array>
#include <cassert>
diff --git a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
index b761f337533b..fe2706117453 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
@@ -53,7 +53,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
@@ -74,6 +73,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include <cassert>
#include <iterator>
#include <utility>
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 6b3db66479a6..f6a0b6ea4637 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -41,7 +41,6 @@
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -83,6 +82,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
#include <cassert>
#include <cstdint>
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 904cfa27fef8..92466cc46058 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -57,7 +57,6 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
@@ -88,6 +87,7 @@
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/LoopSimplify.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <algorithm>
#include <cassert>
diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index ea2c3bb90d4d..687e14d6d7d2 100644
--- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -21,7 +21,6 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/CFG.h"
@@ -32,6 +31,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
using namespace llvm;
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index cc67c4bedce4..8bd1aa8514ca 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -56,7 +56,6 @@
#include "llvm/Analysis/MustExecute.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -94,6 +93,7 @@
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index 989653dc876a..4412b3079461 100644
--- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -38,7 +38,6 @@
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -58,6 +57,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/LoopVersioning.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/Transforms/Utils/SizeOpts.h"
#include <algorithm>
#include <cassert>
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index 11cfa563a47b..1d73a9f24453 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -184,7 +184,6 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
@@ -199,6 +198,7 @@
#include "llvm/Transforms/Utils/GuardUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#define DEBUG_TYPE "loop-predication"
diff --git a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
index 493a8033a719..5deee9e5546b 100644
--- a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -24,7 +24,6 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -55,6 +54,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index d8a272a7270e..5d884eb82895 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -72,7 +72,6 @@
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -111,6 +110,7 @@
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
diff --git a/llvm/lib/Transforms/Utils/CMakeLists.txt b/llvm/lib/Transforms/Utils/CMakeLists.txt
index 0ca23669e97d..f327117e32b7 100644
--- a/llvm/lib/Transforms/Utils/CMakeLists.txt
+++ b/llvm/lib/Transforms/Utils/CMakeLists.txt
@@ -52,6 +52,7 @@ add_llvm_component_library(LLVMTransformUtils
NameAnonGlobals.cpp
PredicateInfo.cpp
PromoteMemoryToRegister.cpp
+ ScalarEvolutionExpander.cpp
StripGCRelocates.cpp
SSAUpdater.cpp
SSAUpdaterBulk.cpp
diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index b8f8cc35fa30..c0d97cf7eeca 100644
--- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -25,7 +25,6 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Metadata.h"
@@ -37,6 +36,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
#include <algorithm>
diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp
index 5f5468541345..972a7fc8536c 100644
--- a/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -31,7 +31,6 @@
#include "llvm/Analysis/MustExecute.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -50,6 +49,7 @@
#include "llvm/Support/KnownBits.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
using namespace llvm;
using namespace llvm::PatternMatch;
diff --git a/llvm/lib/Transforms/Utils/LoopVersioning.cpp b/llvm/lib/Transforms/Utils/LoopVersioning.cpp
index 30a783c8ef11..16bd08c704ee 100644
--- a/llvm/lib/Transforms/Utils/LoopVersioning.cpp
+++ b/llvm/lib/Transforms/Utils/LoopVersioning.cpp
@@ -16,13 +16,13 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
using namespace llvm;
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
similarity index 99%
rename from llvm/lib/Analysis/ScalarEvolutionExpander.cpp
rename to llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 22619d0f754f..784fbe18ed95 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index cc270018156b..05632ca40fb0 100644
--- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -17,7 +17,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
@@ -27,6 +26,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
using namespace llvm;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 3149556642d7..af33eff042d8 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -91,7 +91,6 @@
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -138,6 +137,7 @@
#include "llvm/Transforms/Utils/LoopSimplify.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/LoopVersioning.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/Transforms/Utils/SizeOpts.h"
#include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
#include <algorithm>
diff --git a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
index e147502ae543..f78fc396ebc1 100644
--- a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -9,8 +9,8 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Constants.h"
@@ -141,71 +141,6 @@ TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
EXPECT_EQ(S1, S2);
}
-TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
- // It is to test the fix for PR30213. It exercises the branch in scev
- // expansion when the value in ValueOffsetPair is a ptr and the offset
- // is not divisible by the elem type size of value.
- auto *I8Ty = Type::getInt8Ty(Context);
- auto *I8PtrTy = Type::getInt8PtrTy(Context);
- auto *I32Ty = Type::getInt32Ty(Context);
- auto *I32PtrTy = Type::getInt32PtrTy(Context);
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
- BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
- BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
- BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
- BranchInst::Create(LoopBB, EntryBB);
- ReturnInst::Create(Context, nullptr, ExitBB);
-
- // loop: ; preds = %loop, %entry
- // %alloca = alloca i32
- // %gep0 = getelementptr i32, i32* %alloca, i32 1
- // %bitcast1 = bitcast i32* %gep0 to i8*
- // %gep1 = getelementptr i8, i8* %bitcast1, i32 1
- // %gep2 = getelementptr i8, i8* undef, i32 1
- // %cmp = icmp ult i8* undef, %bitcast1
- // %select = select i1 %cmp, i8* %gep1, i8* %gep2
- // %bitcast2 = bitcast i8* %select to i32*
- // br i1 undef, label %loop, label %exit
-
- const DataLayout &DL = F->getParent()->getDataLayout();
- BranchInst *Br = BranchInst::Create(
- LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
- AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
- "alloca", Br);
- ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
- GetElementPtrInst *Gep0 =
- GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
- CastInst *CastA =
- CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
- GetElementPtrInst *Gep1 =
- GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
- GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
- I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
- CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
- UndefValue::get(I8PtrTy), CastA, "cmp", Br);
- SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
- CastInst *CastB =
- CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
-
- ScalarEvolution SE = buildSE(*F);
- auto *S = SE.getSCEV(CastB);
- SCEVExpander Exp(SE, M.getDataLayout(), "expander");
- Value *V =
- Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
-
- // Expect the expansion code contains:
- // %0 = bitcast i32* %bitcast2 to i8*
- // %uglygep = getelementptr i8, i8* %0, i64 -1
- // %1 = bitcast i8* %uglygep to i32*
- EXPECT_TRUE(isa<BitCastInst>(V));
- Instruction *Gep = cast<Instruction>(V)->getPrevNode();
- EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
- EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
- EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
- EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
-}
static Instruction *getInstructionByName(Function &F, StringRef Name) {
for (auto &I : instructions(F))
@@ -719,83 +654,6 @@ TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
SE.getZeroExtendExpr(S, I128Ty);
}
-// Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
-TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) {
- /*
- * Create the following code:
- * func(i64 addrspace(10)* %arg)
- * top:
- * br label %L.ph
- * L.ph:
- * br label %L
- * L:
- * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
- * %add = add i64 %phi2, 1
- * br i1 undef, label %post, label %L2
- * post:
- * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
- * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
- * ret void
- *
- * We will create the appropriate SCEV expression for %gep and expand it,
- * then check that no inttoptr/ptrtoint instructions got inserted.
- */
-
- // Create a module with non-integral pointers in it's datalayout
- Module NIM("nonintegral", Context);
- std::string DataLayout = M.getDataLayoutStr();
- if (!DataLayout.empty())
- DataLayout += "-";
- DataLayout += "ni:10";
- NIM.setDataLayout(DataLayout);
-
- Type *T_int1 = Type::getInt1Ty(Context);
- Type *T_int64 = Type::getInt64Ty(Context);
- Type *T_pint64 = T_int64->getPointerTo(10);
-
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
-
- Argument *Arg = &*F->arg_begin();
-
- BasicBlock *Top = BasicBlock::Create(Context, "top", F);
- BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
- BasicBlock *L = BasicBlock::Create(Context, "L", F);
- BasicBlock *Post = BasicBlock::Create(Context, "post", F);
-
- IRBuilder<> Builder(Top);
- Builder.CreateBr(LPh);
-
- Builder.SetInsertPoint(LPh);
- Builder.CreateBr(L);
-
- Builder.SetInsertPoint(L);
- PHINode *Phi = Builder.CreatePHI(T_int64, 2);
- Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
- Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
- Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
- Phi->addIncoming(Add, L);
-
- Builder.SetInsertPoint(Post);
- Value *GepBase =
- Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
- Instruction *Ret = Builder.CreateRetVoid();
-
- ScalarEvolution SE = buildSE(*F);
- auto *AddRec =
- SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
- LI->getLoopFor(L), SCEV::FlagNUW);
-
- SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
- Exp.disableCanonicalMode();
- Exp.expandCodeFor(AddRec, T_pint64, Ret);
-
- // Make sure none of the instructions inserted were inttoptr/ptrtoint.
- // The verifier will check this.
- EXPECT_FALSE(verifyFunction(*F, &errs()));
-}
-
// Make sure that SCEV invalidates exit limits after invalidating the values it
// depends on when we forget a loop.
TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
@@ -1122,283 +980,6 @@ TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
EXPECT_EQ(Expr, ZeroConst);
}
-// Check that we can correctly identify the points at which the SCEV of the
-// AddRec can be expanded.
-TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) {
- /*
- * Create the following code:
- * func(i64 addrspace(10)* %arg)
- * top:
- * br label %L.ph
- * L.ph:
- * br label %L
- * L:
- * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
- * %add = add i64 %phi2, 1
- * %cond = icmp slt i64 %add, 1000; then becomes 2000.
- * br i1 %cond, label %post, label %L2
- * post:
- * ret void
- *
- */
-
- // Create a module with non-integral pointers in it's datalayout
- Module NIM("nonintegral", Context);
- std::string DataLayout = M.getDataLayoutStr();
- if (!DataLayout.empty())
- DataLayout += "-";
- DataLayout += "ni:10";
- NIM.setDataLayout(DataLayout);
-
- Type *T_int64 = Type::getInt64Ty(Context);
- Type *T_pint64 = T_int64->getPointerTo(10);
-
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
-
- BasicBlock *Top = BasicBlock::Create(Context, "top", F);
- BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
- BasicBlock *L = BasicBlock::Create(Context, "L", F);
- BasicBlock *Post = BasicBlock::Create(Context, "post", F);
-
- IRBuilder<> Builder(Top);
- Builder.CreateBr(LPh);
-
- Builder.SetInsertPoint(LPh);
- Builder.CreateBr(L);
-
- Builder.SetInsertPoint(L);
- PHINode *Phi = Builder.CreatePHI(T_int64, 2);
- auto *Add = cast<Instruction>(
- Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
- auto *Limit = ConstantInt::get(T_int64, 1000);
- auto *Cond = cast<Instruction>(
- Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
- Builder.CreateCondBr(Cond, L, Post);
- Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
- Phi->addIncoming(Add, L);
-
- Builder.SetInsertPoint(Post);
- Builder.CreateRetVoid();
-
- ScalarEvolution SE = buildSE(*F);
- const SCEV *S = SE.getSCEV(Phi);
- EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
- const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
- EXPECT_TRUE(AR->isAffine());
- EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
- EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
- EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
- EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
-}
-
-// Check that SCEV expander does not use the nuw instruction
-// for expansion.
-TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) {
- /*
- * Create the following code:
- * func(i64 %a)
- * entry:
- * br false, label %exit, label %body
- * body:
- * %s1 = add i64 %a, -1
- * br label %exit
- * exit:
- * %s = add nuw i64 %a, -1
- * ret %s
- */
-
- // Create a module.
- Module M("SCEVExpanderNUW", Context);
-
- Type *T_int64 = Type::getInt64Ty(Context);
-
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
- Argument *Arg = &*F->arg_begin();
- ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
-
- BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
- BasicBlock *Body = BasicBlock::Create(Context, "body", F);
- BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
-
- IRBuilder<> Builder(Entry);
- ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
- Builder.CreateCondBr(Cond, Exit, Body);
-
- Builder.SetInsertPoint(Body);
- auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- Builder.CreateBr(Exit);
-
- Builder.SetInsertPoint(Exit);
- auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- S2->setHasNoUnsignedWrap(true);
- auto *R = cast<Instruction>(Builder.CreateRetVoid());
-
- ScalarEvolution SE = buildSE(*F);
- const SCEV *S = SE.getSCEV(S1);
- EXPECT_TRUE(isa<SCEVAddExpr>(S));
- SCEVExpander Exp(SE, M.getDataLayout(), "expander");
- auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
- EXPECT_FALSE(I->hasNoUnsignedWrap());
-}
-
-// Check that SCEV expander does not use the nsw instruction
-// for expansion.
-TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) {
- /*
- * Create the following code:
- * func(i64 %a)
- * entry:
- * br false, label %exit, label %body
- * body:
- * %s1 = add i64 %a, -1
- * br label %exit
- * exit:
- * %s = add nsw i64 %a, -1
- * ret %s
- */
-
- // Create a module.
- Module M("SCEVExpanderNSW", Context);
-
- Type *T_int64 = Type::getInt64Ty(Context);
-
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
- Argument *Arg = &*F->arg_begin();
- ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
-
- BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
- BasicBlock *Body = BasicBlock::Create(Context, "body", F);
- BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
-
- IRBuilder<> Builder(Entry);
- ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
- Builder.CreateCondBr(Cond, Exit, Body);
-
- Builder.SetInsertPoint(Body);
- auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- Builder.CreateBr(Exit);
-
- Builder.SetInsertPoint(Exit);
- auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- S2->setHasNoSignedWrap(true);
- auto *R = cast<Instruction>(Builder.CreateRetVoid());
-
- ScalarEvolution SE = buildSE(*F);
- const SCEV *S = SE.getSCEV(S1);
- EXPECT_TRUE(isa<SCEVAddExpr>(S));
- SCEVExpander Exp(SE, M.getDataLayout(), "expander");
- auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
- EXPECT_FALSE(I->hasNoSignedWrap());
-}
-
-// Check that SCEV does not save the SCEV -> V
-// mapping of SCEV
diff er from V in NUW flag.
-TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) {
- /*
- * Create the following code:
- * func(i64 %a)
- * entry:
- * %s1 = add i64 %a, -1
- * %s2 = add nuw i64 %a, -1
- * br label %exit
- * exit:
- * ret %s
- */
-
- // Create a module.
- Module M("SCEVCacheNUW", Context);
-
- Type *T_int64 = Type::getInt64Ty(Context);
-
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
- Argument *Arg = &*F->arg_begin();
- ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
-
- BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
- BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
-
- IRBuilder<> Builder(Entry);
- auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- S2->setHasNoUnsignedWrap(true);
- Builder.CreateBr(Exit);
-
- Builder.SetInsertPoint(Exit);
- auto *R = cast<Instruction>(Builder.CreateRetVoid());
-
- ScalarEvolution SE = buildSE(*F);
- // Get S2 first to move it to cache.
- const SCEV *SC2 = SE.getSCEV(S2);
- EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
- // Now get S1.
- const SCEV *SC1 = SE.getSCEV(S1);
- EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
- // Expand for S1, it should use S1 not S2 in spite S2
- // first in the cache.
- SCEVExpander Exp(SE, M.getDataLayout(), "expander");
- auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
- EXPECT_FALSE(I->hasNoUnsignedWrap());
-}
-
-// Check that SCEV does not save the SCEV -> V
-// mapping of SCEV
diff er from V in NSW flag.
-TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) {
- /*
- * Create the following code:
- * func(i64 %a)
- * entry:
- * %s1 = add i64 %a, -1
- * %s2 = add nsw i64 %a, -1
- * br label %exit
- * exit:
- * ret %s
- */
-
- // Create a module.
- Module M("SCEVCacheNUW", Context);
-
- Type *T_int64 = Type::getInt64Ty(Context);
-
- FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
- Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
- Argument *Arg = &*F->arg_begin();
- ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
-
- BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
- BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
-
- IRBuilder<> Builder(Entry);
- auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
- S2->setHasNoSignedWrap(true);
- Builder.CreateBr(Exit);
-
- Builder.SetInsertPoint(Exit);
- auto *R = cast<Instruction>(Builder.CreateRetVoid());
-
- ScalarEvolution SE = buildSE(*F);
- // Get S2 first to move it to cache.
- const SCEV *SC2 = SE.getSCEV(S2);
- EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
- // Now get S1.
- const SCEV *SC1 = SE.getSCEV(S1);
- EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
- // Expand for S1, it should use S1 not S2 in spite S2
- // first in the cache.
- SCEVExpander Exp(SE, M.getDataLayout(), "expander");
- auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
- EXPECT_FALSE(I->hasNoSignedWrap());
-}
-
// Check logic of SCEV expression size computation.
TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
/*
@@ -1449,240 +1030,6 @@ TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
EXPECT_EQ(S2S->getExpressionSize(), 5u);
}
-TEST_F(ScalarEvolutionsTest, SCEVExpandInsertCanonicalIV) {
- LLVMContext C;
- SMDiagnostic Err;
-
- // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
- // SCEVExpander will insert one.
- auto TestNoCanonicalIV = [&](
- std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
- std::unique_ptr<Module> M =
- parseAssemblyString("define i32 @test(i32 %limit) { "
- "entry: "
- " br label %loop "
- "loop: "
- " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
- " %i.inc = add nsw i32 %i, 1 "
- " %cont = icmp slt i32 %i.inc, %limit "
- " br i1 %cont, label %loop, label %exit "
- "exit: "
- " ret i32 %i.inc "
- "}",
- Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto &I = GetInstByName(F, "i");
- auto *Loop = LI.getLoopFor(I.getParent());
- EXPECT_FALSE(Loop->getCanonicalInductionVariable());
-
- auto *AR = GetAddRec(SE, Loop);
- unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *InsertAt = I.getNextNode();
- Exp.expandCodeFor(AR, nullptr, InsertAt);
- PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
- unsigned CanonicalIVBitWidth =
- cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
- EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
- });
- };
-
- // Expand the addrec produced by GetAddRec into a loop with a canonical IV
- // which is narrower than addrec type.
- // SCEVExpander will insert a canonical IV of a wider type to expand the
- // addrec.
- auto TestNarrowCanonicalIV = [&](
- std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
- std::unique_ptr<Module> M = parseAssemblyString(
- "define i32 @test(i32 %limit) { "
- "entry: "
- " br label %loop "
- "loop: "
- " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
- " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
- " %i.inc = add nsw i32 %i, 1 "
- " %canonical.iv.inc = add i8 %canonical.iv, 1 "
- " %cont = icmp slt i32 %i.inc, %limit "
- " br i1 %cont, label %loop, label %exit "
- "exit: "
- " ret i32 %i.inc "
- "}",
- Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto &I = GetInstByName(F, "i");
-
- auto *LoopHeaderBB = I.getParent();
- auto *Loop = LI.getLoopFor(LoopHeaderBB);
- PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
- EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
-
- auto *AR = GetAddRec(SE, Loop);
-
- unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
- unsigned CanonicalIVBitWidth =
- cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
- EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *InsertAt = I.getNextNode();
- Exp.expandCodeFor(AR, nullptr, InsertAt);
-
- // Loop over all of the PHI nodes, looking for the new canonical indvar.
- PHINode *NewCanonicalIV = nullptr;
- for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
- ++i) {
- PHINode *PN = cast<PHINode>(i);
- if (PN == &I || PN == CanonicalIV)
- continue;
- // We expect that the only PHI added is the new canonical IV
- EXPECT_FALSE(NewCanonicalIV);
- NewCanonicalIV = PN;
- }
-
- // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
- BasicBlock *Incoming = nullptr, *Backedge = nullptr;
- EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
- auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
- EXPECT_TRUE(isa<ConstantInt>(Start));
- EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
- auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
- EXPECT_TRUE(isa<BinaryOperator>(Next));
- auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
- EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
- EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
- auto *Step = NextBinOp->getOperand(1);
- EXPECT_TRUE(isa<ConstantInt>(Step));
- EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
-
- unsigned NewCanonicalIVBitWidth =
- cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
- EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
- });
- };
-
- // Expand the addrec produced by GetAddRec into a loop with a canonical IV
- // of addrec width.
- // To expand the addrec SCEVExpander should use the existing canonical IV.
- auto TestMatchingCanonicalIV = [&](
- std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
- unsigned ARBitWidth) {
- auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
- std::unique_ptr<Module> M = parseAssemblyString(
- "define i32 @test(i32 %limit) { "
- "entry: "
- " br label %loop "
- "loop: "
- " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
- " %canonical.iv = phi " + ARBitWidthTypeStr +
- " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
- " %i.inc = add nsw i32 %i, 1 "
- " %canonical.iv.inc = add " + ARBitWidthTypeStr +
- " %canonical.iv, 1 "
- " %cont = icmp slt i32 %i.inc, %limit "
- " br i1 %cont, label %loop, label %exit "
- "exit: "
- " ret i32 %i.inc "
- "}",
- Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto &I = GetInstByName(F, "i");
- auto &CanonicalIV = GetInstByName(F, "canonical.iv");
-
- auto *LoopHeaderBB = I.getParent();
- auto *Loop = LI.getLoopFor(LoopHeaderBB);
- EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
- unsigned CanonicalIVBitWidth =
- cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
-
- auto *AR = GetAddRec(SE, Loop);
- EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
- EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *InsertAt = I.getNextNode();
- Exp.expandCodeFor(AR, nullptr, InsertAt);
-
- // Loop over all of the PHI nodes, looking if a new canonical indvar was
- // introduced.
- PHINode *NewCanonicalIV = nullptr;
- for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
- ++i) {
- PHINode *PN = cast<PHINode>(i);
- if (PN == &I || PN == &CanonicalIV)
- continue;
- NewCanonicalIV = PN;
- }
- EXPECT_FALSE(NewCanonicalIV);
- });
- };
-
- unsigned ARBitWidth = 16;
- Type *ARType = IntegerType::get(C, ARBitWidth);
-
- // Expand {5,+,1}
- auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
- return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), L, SCEV::FlagAnyWrap);
- };
- TestNoCanonicalIV(GetAR2);
- TestNarrowCanonicalIV(GetAR2);
- TestMatchingCanonicalIV(GetAR2, ARBitWidth);
-}
-
-TEST_F(ScalarEvolutionsTest, SCEVExpanderShlNSW) {
-
- auto checkOneCase = [this](std::string &&str) {
- LLVMContext C;
- SMDiagnostic Err;
- std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- Function *F = M->getFunction("f");
- ASSERT_NE(F, nullptr) << "Could not find function 'f'";
-
- BasicBlock &Entry = F->getEntryBlock();
- LoadInst *Load = cast<LoadInst>(&Entry.front());
- BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
-
- ScalarEvolution SE = buildSE(*F);
- const SCEV *AndSCEV = SE.getSCEV(And);
- EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
- EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
- EXPECT_EQ(I->getOpcode(), Instruction::Shl);
- EXPECT_FALSE(I->hasNoSignedWrap());
- };
-
- checkOneCase("define void @f(i16* %arrayidx) { "
- " %1 = load i16, i16* %arrayidx "
- " %2 = and i16 %1, -32768 "
- " ret void "
- "} ");
-
- checkOneCase("define void @f(i8* %arrayidx) { "
- " %1 = load i8, i8* %arrayidx "
- " %2 = and i8 %1, -128 "
- " ret void "
- "} ");
-}
-
TEST_F(ScalarEvolutionsTest, SCEVLoopDecIntrinsic) {
LLVMContext C;
SMDiagnostic Err;
@@ -1773,186 +1120,4 @@ TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) {
});
}
-// Test expansion of nested addrecs in CanonicalMode.
-// Expanding nested addrecs in canonical mode requiers a canonical IV of a
-// type wider than the type of the addrec itself. Currently, SCEVExpander
-// just falls back to literal mode for nested addrecs.
-TEST_F(ScalarEvolutionsTest, SCEVExpandNonAffineAddRec) {
- LLVMContext C;
- SMDiagnostic Err;
-
- // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
- auto TestNoCanonicalIV = [&](std::function<const SCEVAddRecExpr *(
- ScalarEvolution & SE, Loop * L)> GetAddRec) {
- std::unique_ptr<Module> M =
- parseAssemblyString("define i32 @test(i32 %limit) { "
- "entry: "
- " br label %loop "
- "loop: "
- " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
- " %i.inc = add nsw i32 %i, 1 "
- " %cont = icmp slt i32 %i.inc, %limit "
- " br i1 %cont, label %loop, label %exit "
- "exit: "
- " ret i32 %i.inc "
- "}",
- Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto &I = GetInstByName(F, "i");
- auto *Loop = LI.getLoopFor(I.getParent());
- EXPECT_FALSE(Loop->getCanonicalInductionVariable());
-
- auto *AR = GetAddRec(SE, Loop);
- EXPECT_FALSE(AR->isAffine());
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *InsertAt = I.getNextNode();
- Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
- auto *ExpandedAR = SE.getSCEV(V);
- // Check that the expansion happened literally.
- EXPECT_EQ(AR, ExpandedAR);
- });
- };
-
- // Expand the addrec produced by GetAddRec into a loop with a canonical IV
- // which is narrower than addrec type.
- auto TestNarrowCanonicalIV = [&](
- std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
- GetAddRec) {
- std::unique_ptr<Module> M = parseAssemblyString(
- "define i32 @test(i32 %limit) { "
- "entry: "
- " br label %loop "
- "loop: "
- " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
- " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
- " %i.inc = add nsw i32 %i, 1 "
- " %canonical.iv.inc = add i8 %canonical.iv, 1 "
- " %cont = icmp slt i32 %i.inc, %limit "
- " br i1 %cont, label %loop, label %exit "
- "exit: "
- " ret i32 %i.inc "
- "}",
- Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto &I = GetInstByName(F, "i");
-
- auto *LoopHeaderBB = I.getParent();
- auto *Loop = LI.getLoopFor(LoopHeaderBB);
- PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
- EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
-
- auto *AR = GetAddRec(SE, Loop);
- EXPECT_FALSE(AR->isAffine());
-
- unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
- unsigned CanonicalIVBitWidth =
- cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
- EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *InsertAt = I.getNextNode();
- Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
- auto *ExpandedAR = SE.getSCEV(V);
- // Check that the expansion happened literally.
- EXPECT_EQ(AR, ExpandedAR);
- });
- };
-
- // Expand the addrec produced by GetAddRec into a loop with a canonical IV
- // of addrec width.
- auto TestMatchingCanonicalIV = [&](
- std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
- GetAddRec,
- unsigned ARBitWidth) {
- auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
- std::unique_ptr<Module> M = parseAssemblyString(
- "define i32 @test(i32 %limit) { "
- "entry: "
- " br label %loop "
- "loop: "
- " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
- " %canonical.iv = phi " + ARBitWidthTypeStr +
- " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
- " %i.inc = add nsw i32 %i, 1 "
- " %canonical.iv.inc = add " + ARBitWidthTypeStr +
- " %canonical.iv, 1 "
- " %cont = icmp slt i32 %i.inc, %limit "
- " br i1 %cont, label %loop, label %exit "
- "exit: "
- " ret i32 %i.inc "
- "}",
- Err, C);
-
- assert(M && "Could not parse module?");
- assert(!verifyModule(*M) && "Must have been well formed!");
-
- runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
- auto &I = GetInstByName(F, "i");
- auto &CanonicalIV = GetInstByName(F, "canonical.iv");
-
- auto *LoopHeaderBB = I.getParent();
- auto *Loop = LI.getLoopFor(LoopHeaderBB);
- EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
- unsigned CanonicalIVBitWidth =
- cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
-
- auto *AR = GetAddRec(SE, Loop);
- EXPECT_FALSE(AR->isAffine());
- EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
- EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
-
- SCEVExpander Exp(SE, M->getDataLayout(), "expander");
- auto *InsertAt = I.getNextNode();
- Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
- auto *ExpandedAR = SE.getSCEV(V);
- // Check that the expansion happened literally.
- EXPECT_EQ(AR, ExpandedAR);
- });
- };
-
- unsigned ARBitWidth = 16;
- Type *ARType = IntegerType::get(C, ARBitWidth);
-
- // Expand {5,+,1,+,1}
- auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
- SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), SE.getOne(ARType)};
- return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
- };
- TestNoCanonicalIV(GetAR3);
- TestNarrowCanonicalIV(GetAR3);
- TestMatchingCanonicalIV(GetAR3, ARBitWidth);
-
- // Expand {5,+,1,+,1,+,1}
- auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
- SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), SE.getOne(ARType),
- SE.getOne(ARType)};
- return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
- };
- TestNoCanonicalIV(GetAR4);
- TestNarrowCanonicalIV(GetAR4);
- TestMatchingCanonicalIV(GetAR4, ARBitWidth);
-
- // Expand {5,+,1,+,1,+,1,+,1}
- auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
- SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
- SE.getOne(ARType), SE.getOne(ARType),
- SE.getOne(ARType), SE.getOne(ARType)};
- return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
- };
- TestNoCanonicalIV(GetAR5);
- TestNarrowCanonicalIV(GetAR5);
- TestMatchingCanonicalIV(GetAR5, ARBitWidth);
-}
-
} // end namespace llvm
diff --git a/llvm/unittests/Transforms/Utils/CMakeLists.txt b/llvm/unittests/Transforms/Utils/CMakeLists.txt
index c9c0f9f84165..64723a05f13e 100644
--- a/llvm/unittests/Transforms/Utils/CMakeLists.txt
+++ b/llvm/unittests/Transforms/Utils/CMakeLists.txt
@@ -18,6 +18,7 @@ add_llvm_unittest(UtilsTests
LocalTest.cpp
LoopRotationUtilsTest.cpp
LoopUtilsTest.cpp
+ ScalarEvolutionExpanderTest.cpp
SizeOptsTest.cpp
SSAUpdaterBulkTest.cpp
UnrollLoopTest.cpp
diff --git a/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp b/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
index cf764bf76f06..bfa269e8f6ee 100644
--- a/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CodeMoverUtilsTest.cpp
@@ -11,11 +11,11 @@
#include "llvm/Analysis/DependenceAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "gtest/gtest.h"
using namespace llvm;
diff --git a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
new file mode 100644
index 000000000000..c146d155ffbe
--- /dev/null
+++ b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
@@ -0,0 +1,915 @@
+//=== ScalarEvolutionExpanderTest.cpp - ScalarEvolutionExpander unit tests ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+
+// We use this fixture to ensure that we clean up ScalarEvolution before
+// deleting the PassManager.
+class ScalarEvolutionExpanderTest : public testing::Test {
+protected:
+ LLVMContext Context;
+ Module M;
+ TargetLibraryInfoImpl TLII;
+ TargetLibraryInfo TLI;
+
+ std::unique_ptr<AssumptionCache> AC;
+ std::unique_ptr<DominatorTree> DT;
+ std::unique_ptr<LoopInfo> LI;
+
+ ScalarEvolutionExpanderTest() : M("", Context), TLII(), TLI(TLII) {}
+
+ ScalarEvolution buildSE(Function &F) {
+ AC.reset(new AssumptionCache(F));
+ DT.reset(new DominatorTree(F));
+ LI.reset(new LoopInfo(*DT));
+ return ScalarEvolution(F, TLI, *AC, *DT, *LI);
+ }
+
+ void runWithSE(
+ Module &M, StringRef FuncName,
+ function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
+ auto *F = M.getFunction(FuncName);
+ ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
+ ScalarEvolution SE = buildSE(*F);
+ Test(*F, *LI, SE);
+ }
+};
+
+static Instruction &GetInstByName(Function &F, StringRef Name) {
+ for (auto &I : instructions(F))
+ if (I.getName() == Name)
+ return I;
+ llvm_unreachable("Could not find instructions!");
+}
+
+TEST_F(ScalarEvolutionExpanderTest, ExpandPtrTypeSCEV) {
+ // It is to test the fix for PR30213. It exercises the branch in scev
+ // expansion when the value in ValueOffsetPair is a ptr and the offset
+ // is not divisible by the elem type size of value.
+ auto *I8Ty = Type::getInt8Ty(Context);
+ auto *I8PtrTy = Type::getInt8PtrTy(Context);
+ auto *I32Ty = Type::getInt32Ty(Context);
+ auto *I32PtrTy = Type::getInt32PtrTy(Context);
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
+ BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+ BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
+ BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
+ BranchInst::Create(LoopBB, EntryBB);
+ ReturnInst::Create(Context, nullptr, ExitBB);
+
+ // loop: ; preds = %loop, %entry
+ // %alloca = alloca i32
+ // %gep0 = getelementptr i32, i32* %alloca, i32 1
+ // %bitcast1 = bitcast i32* %gep0 to i8*
+ // %gep1 = getelementptr i8, i8* %bitcast1, i32 1
+ // %gep2 = getelementptr i8, i8* undef, i32 1
+ // %cmp = icmp ult i8* undef, %bitcast1
+ // %select = select i1 %cmp, i8* %gep1, i8* %gep2
+ // %bitcast2 = bitcast i8* %select to i32*
+ // br i1 undef, label %loop, label %exit
+
+ const DataLayout &DL = F->getParent()->getDataLayout();
+ BranchInst *Br = BranchInst::Create(
+ LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
+ AllocaInst *Alloca =
+ new AllocaInst(I32Ty, DL.getAllocaAddrSpace(), "alloca", Br);
+ ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
+ GetElementPtrInst *Gep0 =
+ GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
+ CastInst *CastA =
+ CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
+ GetElementPtrInst *Gep1 =
+ GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
+ GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
+ I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
+ CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
+ UndefValue::get(I8PtrTy), CastA, "cmp", Br);
+ SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
+ CastInst *CastB =
+ CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
+
+ ScalarEvolution SE = buildSE(*F);
+ auto *S = SE.getSCEV(CastB);
+ SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+ Value *V =
+ Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
+
+ // Expect the expansion code contains:
+ // %0 = bitcast i32* %bitcast2 to i8*
+ // %uglygep = getelementptr i8, i8* %0, i64 -1
+ // %1 = bitcast i8* %uglygep to i32*
+ EXPECT_TRUE(isa<BitCastInst>(V));
+ Instruction *Gep = cast<Instruction>(V)->getPrevNode();
+ EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
+ EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
+ EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
+ EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
+}
+
+// Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
+TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) {
+ /*
+ * Create the following code:
+ * func(i64 addrspace(10)* %arg)
+ * top:
+ * br label %L.ph
+ * L.ph:
+ * br label %L
+ * L:
+ * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
+ * %add = add i64 %phi2, 1
+ * br i1 undef, label %post, label %L2
+ * post:
+ * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
+ * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
+ * ret void
+ *
+ * We will create the appropriate SCEV expression for %gep and expand it,
+ * then check that no inttoptr/ptrtoint instructions got inserted.
+ */
+
+ // Create a module with non-integral pointers in it's datalayout
+ Module NIM("nonintegral", Context);
+ std::string DataLayout = M.getDataLayoutStr();
+ if (!DataLayout.empty())
+ DataLayout += "-";
+ DataLayout += "ni:10";
+ NIM.setDataLayout(DataLayout);
+
+ Type *T_int1 = Type::getInt1Ty(Context);
+ Type *T_int64 = Type::getInt64Ty(Context);
+ Type *T_pint64 = T_int64->getPointerTo(10);
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
+
+ Argument *Arg = &*F->arg_begin();
+
+ BasicBlock *Top = BasicBlock::Create(Context, "top", F);
+ BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
+ BasicBlock *L = BasicBlock::Create(Context, "L", F);
+ BasicBlock *Post = BasicBlock::Create(Context, "post", F);
+
+ IRBuilder<> Builder(Top);
+ Builder.CreateBr(LPh);
+
+ Builder.SetInsertPoint(LPh);
+ Builder.CreateBr(L);
+
+ Builder.SetInsertPoint(L);
+ PHINode *Phi = Builder.CreatePHI(T_int64, 2);
+ Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
+ Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
+ Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
+ Phi->addIncoming(Add, L);
+
+ Builder.SetInsertPoint(Post);
+ Value *GepBase =
+ Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
+ Instruction *Ret = Builder.CreateRetVoid();
+
+ ScalarEvolution SE = buildSE(*F);
+ auto *AddRec =
+ SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
+ LI->getLoopFor(L), SCEV::FlagNUW);
+
+ SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
+ Exp.disableCanonicalMode();
+ Exp.expandCodeFor(AddRec, T_pint64, Ret);
+
+ // Make sure none of the instructions inserted were inttoptr/ptrtoint.
+ // The verifier will check this.
+ EXPECT_FALSE(verifyFunction(*F, &errs()));
+}
+
+// Check that we can correctly identify the points at which the SCEV of the
+// AddRec can be expanded.
+TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderIsSafeToExpandAt) {
+ /*
+ * Create the following code:
+ * func(i64 addrspace(10)* %arg)
+ * top:
+ * br label %L.ph
+ * L.ph:
+ * br label %L
+ * L:
+ * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
+ * %add = add i64 %phi2, 1
+ * %cond = icmp slt i64 %add, 1000; then becomes 2000.
+ * br i1 %cond, label %post, label %L2
+ * post:
+ * ret void
+ *
+ */
+
+ // Create a module with non-integral pointers in it's datalayout
+ Module NIM("nonintegral", Context);
+ std::string DataLayout = M.getDataLayoutStr();
+ if (!DataLayout.empty())
+ DataLayout += "-";
+ DataLayout += "ni:10";
+ NIM.setDataLayout(DataLayout);
+
+ Type *T_int64 = Type::getInt64Ty(Context);
+ Type *T_pint64 = T_int64->getPointerTo(10);
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
+
+ BasicBlock *Top = BasicBlock::Create(Context, "top", F);
+ BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
+ BasicBlock *L = BasicBlock::Create(Context, "L", F);
+ BasicBlock *Post = BasicBlock::Create(Context, "post", F);
+
+ IRBuilder<> Builder(Top);
+ Builder.CreateBr(LPh);
+
+ Builder.SetInsertPoint(LPh);
+ Builder.CreateBr(L);
+
+ Builder.SetInsertPoint(L);
+ PHINode *Phi = Builder.CreatePHI(T_int64, 2);
+ auto *Add = cast<Instruction>(
+ Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
+ auto *Limit = ConstantInt::get(T_int64, 1000);
+ auto *Cond = cast<Instruction>(
+ Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
+ Builder.CreateCondBr(Cond, L, Post);
+ Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
+ Phi->addIncoming(Add, L);
+
+ Builder.SetInsertPoint(Post);
+ Builder.CreateRetVoid();
+
+ ScalarEvolution SE = buildSE(*F);
+ const SCEV *S = SE.getSCEV(Phi);
+ EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
+ EXPECT_TRUE(AR->isAffine());
+ EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
+ EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
+ EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
+ EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
+}
+
+// Check that SCEV expander does not use the nuw instruction
+// for expansion.
+TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderNUW) {
+ /*
+ * Create the following code:
+ * func(i64 %a)
+ * entry:
+ * br false, label %exit, label %body
+ * body:
+ * %s1 = add i64 %a, -1
+ * br label %exit
+ * exit:
+ * %s = add nuw i64 %a, -1
+ * ret %s
+ */
+
+ // Create a module.
+ Module M("SCEVExpanderNUW", Context);
+
+ Type *T_int64 = Type::getInt64Ty(Context);
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
+ Argument *Arg = &*F->arg_begin();
+ ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+ BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+ BasicBlock *Body = BasicBlock::Create(Context, "body", F);
+ BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+ IRBuilder<> Builder(Entry);
+ ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
+ Builder.CreateCondBr(Cond, Exit, Body);
+
+ Builder.SetInsertPoint(Body);
+ auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ Builder.CreateBr(Exit);
+
+ Builder.SetInsertPoint(Exit);
+ auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ S2->setHasNoUnsignedWrap(true);
+ auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+ ScalarEvolution SE = buildSE(*F);
+ const SCEV *S = SE.getSCEV(S1);
+ EXPECT_TRUE(isa<SCEVAddExpr>(S));
+ SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+ auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
+ EXPECT_FALSE(I->hasNoUnsignedWrap());
+}
+
+// Check that SCEV expander does not use the nsw instruction
+// for expansion.
+TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderNSW) {
+ /*
+ * Create the following code:
+ * func(i64 %a)
+ * entry:
+ * br false, label %exit, label %body
+ * body:
+ * %s1 = add i64 %a, -1
+ * br label %exit
+ * exit:
+ * %s = add nsw i64 %a, -1
+ * ret %s
+ */
+
+ // Create a module.
+ Module M("SCEVExpanderNSW", Context);
+
+ Type *T_int64 = Type::getInt64Ty(Context);
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
+ Argument *Arg = &*F->arg_begin();
+ ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+ BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+ BasicBlock *Body = BasicBlock::Create(Context, "body", F);
+ BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+ IRBuilder<> Builder(Entry);
+ ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
+ Builder.CreateCondBr(Cond, Exit, Body);
+
+ Builder.SetInsertPoint(Body);
+ auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ Builder.CreateBr(Exit);
+
+ Builder.SetInsertPoint(Exit);
+ auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ S2->setHasNoSignedWrap(true);
+ auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+ ScalarEvolution SE = buildSE(*F);
+ const SCEV *S = SE.getSCEV(S1);
+ EXPECT_TRUE(isa<SCEVAddExpr>(S));
+ SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+ auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
+ EXPECT_FALSE(I->hasNoSignedWrap());
+}
+
+// Check that SCEV does not save the SCEV -> V
+// mapping of SCEV
diff er from V in NUW flag.
+TEST_F(ScalarEvolutionExpanderTest, SCEVCacheNUW) {
+ /*
+ * Create the following code:
+ * func(i64 %a)
+ * entry:
+ * %s1 = add i64 %a, -1
+ * %s2 = add nuw i64 %a, -1
+ * br label %exit
+ * exit:
+ * ret %s
+ */
+
+ // Create a module.
+ Module M("SCEVCacheNUW", Context);
+
+ Type *T_int64 = Type::getInt64Ty(Context);
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
+ Argument *Arg = &*F->arg_begin();
+ ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+ BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+ BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+ IRBuilder<> Builder(Entry);
+ auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ S2->setHasNoUnsignedWrap(true);
+ Builder.CreateBr(Exit);
+
+ Builder.SetInsertPoint(Exit);
+ auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+ ScalarEvolution SE = buildSE(*F);
+ // Get S2 first to move it to cache.
+ const SCEV *SC2 = SE.getSCEV(S2);
+ EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
+ // Now get S1.
+ const SCEV *SC1 = SE.getSCEV(S1);
+ EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
+ // Expand for S1, it should use S1 not S2 in spite S2
+ // first in the cache.
+ SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+ auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
+ EXPECT_FALSE(I->hasNoUnsignedWrap());
+}
+
+// Check that SCEV does not save the SCEV -> V
+// mapping of SCEV
diff er from V in NSW flag.
+TEST_F(ScalarEvolutionExpanderTest, SCEVCacheNSW) {
+ /*
+ * Create the following code:
+ * func(i64 %a)
+ * entry:
+ * %s1 = add i64 %a, -1
+ * %s2 = add nsw i64 %a, -1
+ * br label %exit
+ * exit:
+ * ret %s
+ */
+
+ // Create a module.
+ Module M("SCEVCacheNUW", Context);
+
+ Type *T_int64 = Type::getInt64Ty(Context);
+
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
+ Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
+ Argument *Arg = &*F->arg_begin();
+ ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+ BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+ BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+ IRBuilder<> Builder(Entry);
+ auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+ S2->setHasNoSignedWrap(true);
+ Builder.CreateBr(Exit);
+
+ Builder.SetInsertPoint(Exit);
+ auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+ ScalarEvolution SE = buildSE(*F);
+ // Get S2 first to move it to cache.
+ const SCEV *SC2 = SE.getSCEV(S2);
+ EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
+ // Now get S1.
+ const SCEV *SC1 = SE.getSCEV(S1);
+ EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
+ // Expand for S1, it should use S1 not S2 in spite S2
+ // first in the cache.
+ SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+ auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
+ EXPECT_FALSE(I->hasNoSignedWrap());
+}
+
+TEST_F(ScalarEvolutionExpanderTest, SCEVExpandInsertCanonicalIV) {
+ LLVMContext C;
+ SMDiagnostic Err;
+
+ // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
+ // SCEVExpander will insert one.
+ auto TestNoCanonicalIV =
+ [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)>
+ GetAddRec) {
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "define i32 @test(i32 %limit) { "
+ "entry: "
+ " br label %loop "
+ "loop: "
+ " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
+ " %i.inc = add nsw i32 %i, 1 "
+ " %cont = icmp slt i32 %i.inc, %limit "
+ " br i1 %cont, label %loop, label %exit "
+ "exit: "
+ " ret i32 %i.inc "
+ "}",
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(
+ *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I = GetInstByName(F, "i");
+ auto *Loop = LI.getLoopFor(I.getParent());
+ EXPECT_FALSE(Loop->getCanonicalInductionVariable());
+
+ auto *AR = GetAddRec(SE, Loop);
+ unsigned ExpectedCanonicalIVWidth =
+ SE.getTypeSizeInBits(AR->getType());
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *InsertAt = I.getNextNode();
+ Exp.expandCodeFor(AR, nullptr, InsertAt);
+ PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
+ unsigned CanonicalIVBitWidth =
+ cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
+ EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
+ });
+ };
+
+ // Expand the addrec produced by GetAddRec into a loop with a canonical IV
+ // which is narrower than addrec type.
+ // SCEVExpander will insert a canonical IV of a wider type to expand the
+ // addrec.
+ auto TestNarrowCanonicalIV = [&](std::function<const SCEV *(
+ ScalarEvolution & SE, Loop * L)>
+ GetAddRec) {
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "define i32 @test(i32 %limit) { "
+ "entry: "
+ " br label %loop "
+ "loop: "
+ " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
+ " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
+ " %i.inc = add nsw i32 %i, 1 "
+ " %canonical.iv.inc = add i8 %canonical.iv, 1 "
+ " %cont = icmp slt i32 %i.inc, %limit "
+ " br i1 %cont, label %loop, label %exit "
+ "exit: "
+ " ret i32 %i.inc "
+ "}",
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I = GetInstByName(F, "i");
+
+ auto *LoopHeaderBB = I.getParent();
+ auto *Loop = LI.getLoopFor(LoopHeaderBB);
+ PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
+ EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
+
+ auto *AR = GetAddRec(SE, Loop);
+
+ unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
+ unsigned CanonicalIVBitWidth =
+ cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
+ EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *InsertAt = I.getNextNode();
+ Exp.expandCodeFor(AR, nullptr, InsertAt);
+
+ // Loop over all of the PHI nodes, looking for the new canonical indvar.
+ PHINode *NewCanonicalIV = nullptr;
+ for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
+ ++i) {
+ PHINode *PN = cast<PHINode>(i);
+ if (PN == &I || PN == CanonicalIV)
+ continue;
+ // We expect that the only PHI added is the new canonical IV
+ EXPECT_FALSE(NewCanonicalIV);
+ NewCanonicalIV = PN;
+ }
+
+ // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
+ BasicBlock *Incoming = nullptr, *Backedge = nullptr;
+ EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
+ auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
+ EXPECT_TRUE(isa<ConstantInt>(Start));
+ EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
+ auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
+ EXPECT_TRUE(isa<BinaryOperator>(Next));
+ auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
+ EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
+ EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
+ auto *Step = NextBinOp->getOperand(1);
+ EXPECT_TRUE(isa<ConstantInt>(Step));
+ EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
+
+ unsigned NewCanonicalIVBitWidth =
+ cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
+ EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
+ });
+ };
+
+ // Expand the addrec produced by GetAddRec into a loop with a canonical IV
+ // of addrec width.
+ // To expand the addrec SCEVExpander should use the existing canonical IV.
+ auto TestMatchingCanonicalIV =
+ [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
+ unsigned ARBitWidth) {
+ auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "define i32 @test(i32 %limit) { "
+ "entry: "
+ " br label %loop "
+ "loop: "
+ " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
+ " %canonical.iv = phi " +
+ ARBitWidthTypeStr +
+ " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
+ " %i.inc = add nsw i32 %i, 1 "
+ " %canonical.iv.inc = add " +
+ ARBitWidthTypeStr +
+ " %canonical.iv, 1 "
+ " %cont = icmp slt i32 %i.inc, %limit "
+ " br i1 %cont, label %loop, label %exit "
+ "exit: "
+ " ret i32 %i.inc "
+ "}",
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(
+ *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I = GetInstByName(F, "i");
+ auto &CanonicalIV = GetInstByName(F, "canonical.iv");
+
+ auto *LoopHeaderBB = I.getParent();
+ auto *Loop = LI.getLoopFor(LoopHeaderBB);
+ EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
+ unsigned CanonicalIVBitWidth =
+ cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
+
+ auto *AR = GetAddRec(SE, Loop);
+ EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
+ EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *InsertAt = I.getNextNode();
+ Exp.expandCodeFor(AR, nullptr, InsertAt);
+
+ // Loop over all of the PHI nodes, looking if a new canonical
+ // indvar was introduced.
+ PHINode *NewCanonicalIV = nullptr;
+ for (BasicBlock::iterator i = LoopHeaderBB->begin();
+ isa<PHINode>(i); ++i) {
+ PHINode *PN = cast<PHINode>(i);
+ if (PN == &I || PN == &CanonicalIV)
+ continue;
+ NewCanonicalIV = PN;
+ }
+ EXPECT_FALSE(NewCanonicalIV);
+ });
+ };
+
+ unsigned ARBitWidth = 16;
+ Type *ARType = IntegerType::get(C, ARBitWidth);
+
+ // Expand {5,+,1}
+ auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
+ return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), L, SCEV::FlagAnyWrap);
+ };
+ TestNoCanonicalIV(GetAR2);
+ TestNarrowCanonicalIV(GetAR2);
+ TestMatchingCanonicalIV(GetAR2, ARBitWidth);
+}
+
+TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderShlNSW) {
+
+ auto checkOneCase = [this](std::string &&str) {
+ LLVMContext C;
+ SMDiagnostic Err;
+ std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ Function *F = M->getFunction("f");
+ ASSERT_NE(F, nullptr) << "Could not find function 'f'";
+
+ BasicBlock &Entry = F->getEntryBlock();
+ LoadInst *Load = cast<LoadInst>(&Entry.front());
+ BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
+
+ ScalarEvolution SE = buildSE(*F);
+ const SCEV *AndSCEV = SE.getSCEV(And);
+ EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
+ EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
+ EXPECT_EQ(I->getOpcode(), Instruction::Shl);
+ EXPECT_FALSE(I->hasNoSignedWrap());
+ };
+
+ checkOneCase("define void @f(i16* %arrayidx) { "
+ " %1 = load i16, i16* %arrayidx "
+ " %2 = and i16 %1, -32768 "
+ " ret void "
+ "} ");
+
+ checkOneCase("define void @f(i8* %arrayidx) { "
+ " %1 = load i8, i8* %arrayidx "
+ " %2 = and i8 %1, -128 "
+ " ret void "
+ "} ");
+}
+
+// Test expansion of nested addrecs in CanonicalMode.
+// Expanding nested addrecs in canonical mode requiers a canonical IV of a
+// type wider than the type of the addrec itself. Currently, SCEVExpander
+// just falls back to literal mode for nested addrecs.
+TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
+ LLVMContext C;
+ SMDiagnostic Err;
+
+ // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
+ auto TestNoCanonicalIV =
+ [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
+ GetAddRec) {
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "define i32 @test(i32 %limit) { "
+ "entry: "
+ " br label %loop "
+ "loop: "
+ " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
+ " %i.inc = add nsw i32 %i, 1 "
+ " %cont = icmp slt i32 %i.inc, %limit "
+ " br i1 %cont, label %loop, label %exit "
+ "exit: "
+ " ret i32 %i.inc "
+ "}",
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(*M, "test",
+ [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I = GetInstByName(F, "i");
+ auto *Loop = LI.getLoopFor(I.getParent());
+ EXPECT_FALSE(Loop->getCanonicalInductionVariable());
+
+ auto *AR = GetAddRec(SE, Loop);
+ EXPECT_FALSE(AR->isAffine());
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *InsertAt = I.getNextNode();
+ Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
+ auto *ExpandedAR = SE.getSCEV(V);
+ // Check that the expansion happened literally.
+ EXPECT_EQ(AR, ExpandedAR);
+ });
+ };
+
+ // Expand the addrec produced by GetAddRec into a loop with a canonical IV
+ // which is narrower than addrec type.
+ auto TestNarrowCanonicalIV = [&](std::function<const SCEVAddRecExpr *(
+ ScalarEvolution & SE, Loop * L)>
+ GetAddRec) {
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "define i32 @test(i32 %limit) { "
+ "entry: "
+ " br label %loop "
+ "loop: "
+ " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
+ " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
+ " %i.inc = add nsw i32 %i, 1 "
+ " %canonical.iv.inc = add i8 %canonical.iv, 1 "
+ " %cont = icmp slt i32 %i.inc, %limit "
+ " br i1 %cont, label %loop, label %exit "
+ "exit: "
+ " ret i32 %i.inc "
+ "}",
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I = GetInstByName(F, "i");
+
+ auto *LoopHeaderBB = I.getParent();
+ auto *Loop = LI.getLoopFor(LoopHeaderBB);
+ PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
+ EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
+
+ auto *AR = GetAddRec(SE, Loop);
+ EXPECT_FALSE(AR->isAffine());
+
+ unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
+ unsigned CanonicalIVBitWidth =
+ cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
+ EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *InsertAt = I.getNextNode();
+ Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
+ auto *ExpandedAR = SE.getSCEV(V);
+ // Check that the expansion happened literally.
+ EXPECT_EQ(AR, ExpandedAR);
+ });
+ };
+
+ // Expand the addrec produced by GetAddRec into a loop with a canonical IV
+ // of addrec width.
+ auto TestMatchingCanonicalIV =
+ [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
+ GetAddRec,
+ unsigned ARBitWidth) {
+ auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
+ std::unique_ptr<Module> M = parseAssemblyString(
+ "define i32 @test(i32 %limit) { "
+ "entry: "
+ " br label %loop "
+ "loop: "
+ " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
+ " %canonical.iv = phi " +
+ ARBitWidthTypeStr +
+ " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
+ " %i.inc = add nsw i32 %i, 1 "
+ " %canonical.iv.inc = add " +
+ ARBitWidthTypeStr +
+ " %canonical.iv, 1 "
+ " %cont = icmp slt i32 %i.inc, %limit "
+ " br i1 %cont, label %loop, label %exit "
+ "exit: "
+ " ret i32 %i.inc "
+ "}",
+ Err, C);
+
+ assert(M && "Could not parse module?");
+ assert(!verifyModule(*M) && "Must have been well formed!");
+
+ runWithSE(
+ *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+ auto &I = GetInstByName(F, "i");
+ auto &CanonicalIV = GetInstByName(F, "canonical.iv");
+
+ auto *LoopHeaderBB = I.getParent();
+ auto *Loop = LI.getLoopFor(LoopHeaderBB);
+ EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
+ unsigned CanonicalIVBitWidth =
+ cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
+
+ auto *AR = GetAddRec(SE, Loop);
+ EXPECT_FALSE(AR->isAffine());
+ EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
+ EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
+
+ SCEVExpander Exp(SE, M->getDataLayout(), "expander");
+ auto *InsertAt = I.getNextNode();
+ Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
+ auto *ExpandedAR = SE.getSCEV(V);
+ // Check that the expansion happened literally.
+ EXPECT_EQ(AR, ExpandedAR);
+ });
+ };
+
+ unsigned ARBitWidth = 16;
+ Type *ARType = IntegerType::get(C, ARBitWidth);
+
+ // Expand {5,+,1,+,1}
+ auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
+ SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), SE.getOne(ARType)};
+ return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
+ };
+ TestNoCanonicalIV(GetAR3);
+ TestNarrowCanonicalIV(GetAR3);
+ TestMatchingCanonicalIV(GetAR3, ARBitWidth);
+
+ // Expand {5,+,1,+,1,+,1}
+ auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
+ SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), SE.getOne(ARType),
+ SE.getOne(ARType)};
+ return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
+ };
+ TestNoCanonicalIV(GetAR4);
+ TestNarrowCanonicalIV(GetAR4);
+ TestMatchingCanonicalIV(GetAR4, ARBitWidth);
+
+ // Expand {5,+,1,+,1,+,1,+,1}
+ auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
+ SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
+ SE.getOne(ARType), SE.getOne(ARType),
+ SE.getOne(ARType), SE.getOne(ARType)};
+ return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
+ };
+ TestNoCanonicalIV(GetAR5);
+ TestNarrowCanonicalIV(GetAR5);
+ TestMatchingCanonicalIV(GetAR5, ARBitWidth);
+}
+
+} // end namespace llvm
More information about the llvm-commits
mailing list