[llvm] r362853 - [GVN] non-functional code movement
Keno Fischer via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 7 16:08:38 PDT 2019
Author: kfischer
Date: Fri Jun 7 16:08:38 2019
New Revision: 362853
URL: http://llvm.org/viewvc/llvm-project?rev=362853&view=rev
Log:
[GVN] non-functional code movement
Summary: Move some code around, in preparation for later fixes
to the non-integral addrspace handling (D59661)
Patch By Jameson Nash <jameson at juliacomputing.com>
Reviewed By: reames, loladiro
Differential Revision: https://reviews.llvm.org/D59729
Modified:
llvm/trunk/lib/Transforms/Scalar/GVN.cpp
llvm/trunk/lib/Transforms/Utils/VNCoercion.cpp
Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=362853&r1=362852&r2=362853&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Fri Jun 7 16:08:38 2019
@@ -859,11 +859,12 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
const DataLayout &DL = LI->getModule()->getDataLayout();
+ Instruction *DepInst = DepInfo.getInst();
if (DepInfo.isClobber()) {
// If the dependence is to a store that writes to a superset of the bits
// read by the load, we can extract the bits we need for the load from the
// stored value.
- if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
+ if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
// Can't forward from non-atomic to atomic without violating memory model.
if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
int Offset =
@@ -879,7 +880,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
// load i32* P
// load i8* (P+1)
// if we have this, replace the later with an extraction from the former.
- if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
+ if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
// Can't forward from non-atomic to atomic without violating memory model.
@@ -896,7 +897,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
- if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
+ if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
if (Address && !LI->isAtomic()) {
int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
DepMI, DL);
@@ -910,8 +911,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
LLVM_DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
- Instruction *I = DepInfo.getInst();
- dbgs() << " is clobbered by " << *I << '\n';);
+ dbgs() << " is clobbered by " << *DepInst << '\n';);
if (ORE->allowExtraAnalysis(DEBUG_TYPE))
reportMayClobberedLoad(LI, DepInfo, DT, ORE);
@@ -919,8 +919,6 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
}
assert(DepInfo.isDef() && "follows from above");
- Instruction *DepInst = DepInfo.getInst();
-
// Loading the allocation -> undef.
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
// Loading immediately after lifetime begin -> undef.
@@ -939,9 +937,8 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
// Reject loads and stores that are to the same address but are of
// different types if we have to. If the stored value is larger or equal to
// the loaded value, we can reuse it.
- if (S->getValueOperand()->getType() != LI->getType() &&
- !canCoerceMustAliasedValueToLoad(S->getValueOperand(),
- LI->getType(), DL))
+ if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
+ DL))
return false;
// Can't forward from non-atomic to atomic without violating memory model.
@@ -956,8 +953,7 @@ bool GVN::AnalyzeLoadAvailability(LoadIn
// If the types mismatch and we can't handle it, reject reuse of the load.
// If the stored value is larger or equal to the loaded value, we can reuse
// it.
- if (LD->getType() != LI->getType() &&
- !canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
+ if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
return false;
// Can't forward from non-atomic to atomic without violating memory model.
Modified: llvm/trunk/lib/Transforms/Utils/VNCoercion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/VNCoercion.cpp?rev=362853&r1=362852&r2=362853&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/VNCoercion.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/VNCoercion.cpp Fri Jun 7 16:08:38 2019
@@ -14,13 +14,17 @@ namespace VNCoercion {
/// Return true if coerceAvailableValueToLoadType will succeed.
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
const DataLayout &DL) {
+ Type *StoredTy = StoredVal->getType();
+ if (StoredTy == LoadTy)
+ return true;
+
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
- if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
- StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy())
+ if (LoadTy->isStructTy() || LoadTy->isArrayTy() || StoredTy->isStructTy() ||
+ StoredTy->isArrayTy())
return false;
- uint64_t StoreSize = DL.getTypeSizeInBits(StoredVal->getType());
+ uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy);
// The store size must be byte-aligned to support future type casts.
if (llvm::alignTo(StoreSize, 8) != StoreSize)
@@ -306,7 +310,7 @@ int analyzeLoadFromClobberingMemInst(Typ
return -1;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
- if (!GV || !GV->isConstant())
+ if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return -1;
// See if the access is within the bounds of the transfer.
More information about the llvm-commits
mailing list