[llvm-branch-commits] [llvm] 2d1b024 - [DSE][NFC] Need to be carefull mixing signed and unsigned types
Evgeniy Brevnov via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 8 01:58:45 PST 2020
Author: Evgeniy Brevnov
Date: 2020-12-08T16:53:37+07:00
New Revision: 2d1b024d06b2a81f1c35193a998a864be09e2ae8
URL: https://github.com/llvm/llvm-project/commit/2d1b024d06b2a81f1c35193a998a864be09e2ae8
DIFF: https://github.com/llvm/llvm-project/commit/2d1b024d06b2a81f1c35193a998a864be09e2ae8.diff
LOG: [DSE][NFC] Need to be carefull mixing signed and unsigned types
Currently in some places we use signed type to represent size of an access and put explicit casts from unsigned to signed.
For example: int64_t EarlierSize = int64_t(Loc.Size.getValue());
Even though it doesn't loos bits (immidiatly) it may overflow and we end up with negative size. Potentially that cause later code to work incorrectly. A simple expample is a check that size is not negative.
I think it would be safer and clearer if we use unsigned type for the size and handle it appropriately.
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D92648
Added:
Modified:
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 9499e892871f..e4b8980b01e8 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1071,8 +1071,8 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
}
static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
- int64_t &EarlierSize, int64_t LaterOffset,
- int64_t LaterSize, bool IsOverwriteEnd) {
+ uint64_t &EarlierSize, int64_t LaterOffset,
+ uint64_t LaterSize, bool IsOverwriteEnd) {
// TODO: base this on the target vector size so that if the earlier
// store was too small to get vector writes anyway then its likely
// a good idea to shorten it
@@ -1127,16 +1127,23 @@ static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
static bool tryToShortenEnd(Instruction *EarlierWrite,
OverlapIntervalsTy &IntervalMap,
- int64_t &EarlierStart, int64_t &EarlierSize) {
+ int64_t &EarlierStart, uint64_t &EarlierSize) {
if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
return false;
OverlapIntervalsTy::iterator OII = --IntervalMap.end();
int64_t LaterStart = OII->second;
- int64_t LaterSize = OII->first - LaterStart;
+ uint64_t LaterSize = OII->first - LaterStart;
- if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
- LaterStart + LaterSize >= EarlierStart + EarlierSize) {
+ assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
+
+ if (LaterStart > EarlierStart &&
+ // Note: "LaterStart - EarlierStart" is known to be positive due to
+ // preceding check.
+ (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
+ // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
+ // be non negative due to preceding checks.
+ LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
LaterSize, true)) {
IntervalMap.erase(OII);
@@ -1148,16 +1155,23 @@ static bool tryToShortenEnd(Instruction *EarlierWrite,
static bool tryToShortenBegin(Instruction *EarlierWrite,
OverlapIntervalsTy &IntervalMap,
- int64_t &EarlierStart, int64_t &EarlierSize) {
+ int64_t &EarlierStart, uint64_t &EarlierSize) {
if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
return false;
OverlapIntervalsTy::iterator OII = IntervalMap.begin();
int64_t LaterStart = OII->second;
- int64_t LaterSize = OII->first - LaterStart;
+ uint64_t LaterSize = OII->first - LaterStart;
+
+ assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
- if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
- assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
+ if (LaterStart <= EarlierStart &&
+ // Note: "EarlierStart - LaterStart" is known to be non negative due to
+ // preceding check.
+ LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
+ // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
+ // positive due to preceding checks.
+ assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
"Should have been handled as OW_Complete");
if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
LaterSize, false)) {
@@ -1179,7 +1193,7 @@ static bool removePartiallyOverlappedStores(const DataLayout &DL,
const Value *Ptr = Loc.Ptr->stripPointerCasts();
int64_t EarlierStart = 0;
- int64_t EarlierSize = int64_t(Loc.Size.getValue());
+ uint64_t EarlierSize = Loc.Size.getValue();
GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
OverlapIntervalsTy &IntervalMap = OI.second;
Changed |=
@@ -1428,8 +1442,8 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
"when partial-overwrite "
"tracking is enabled");
// The overwrite result is known, so these must be known, too.
- int64_t EarlierSize = DepLoc.Size.getValue();
- int64_t LaterSize = Loc.Size.getValue();
+ uint64_t EarlierSize = DepLoc.Size.getValue();
+ uint64_t LaterSize = Loc.Size.getValue();
bool IsOverwriteEnd = (OR == OW_End);
MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
InstWriteOffset, LaterSize, IsOverwriteEnd);
More information about the llvm-branch-commits
mailing list