[llvm] 8399fa6 - [MemCpyOptimizer] Use auto* for cast<> results (style). NFC.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 6 07:15:15 PST 2022
Author: Simon Pilgrim
Date: 2022-01-06T15:15:03Z
New Revision: 8399fa673b2fc2af49252be1488702f170a5d72b
URL: https://github.com/llvm/llvm-project/commit/8399fa673b2fc2af49252be1488702f170a5d72b
DIFF: https://github.com/llvm/llvm-project/commit/8399fa673b2fc2af49252be1488702f170a5d72b.diff
LOG: [MemCpyOptimizer] Use auto* for cast<> results (style). NFC.
Added:
Modified:
llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 13847406c25a0..1c1818da0f79c 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -172,7 +172,7 @@ class MemsetRanges {
bool empty() const { return Ranges.empty(); }
void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ if (auto *SI = dyn_cast<StoreInst>(Inst))
addStore(OffsetFromFirst, SI);
else
addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
@@ -365,7 +365,7 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
const DataLayout &DL = StartInst->getModule()->getDataLayout();
// We can't track scalable types
- if (StoreInst *SI = dyn_cast<StoreInst>(StartInst))
+ if (auto *SI = dyn_cast<StoreInst>(StartInst))
if (DL.getTypeStoreSize(SI->getOperand(0)->getType()).isScalable())
return nullptr;
@@ -411,7 +411,7 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
continue;
}
- if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
+ if (auto *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
@@ -441,7 +441,7 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
Ranges.addStore(*Offset, NextStore);
} else {
- MemSetInst *MSI = cast<MemSetInst>(BI);
+ auto *MSI = cast<MemSetInst>(BI);
if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
!isa<ConstantInt>(MSI->getLength()))
@@ -662,7 +662,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
return false;
// Load to store forwarding can be interpreted as memcpy.
- if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
+ if (auto *LI = dyn_cast<LoadInst>(StoredVal)) {
if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
@@ -872,7 +872,7 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
return false;
// Require that src be an alloca. This simplifies the reasoning considerably.
- AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
+ auto *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
@@ -931,14 +931,14 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
append_range(srcUseList, U->users());
continue;
}
- if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
+ if (const auto *G = dyn_cast<GetElementPtrInst>(U)) {
if (!G->hasAllZeroIndices())
return false;
append_range(srcUseList, U->users());
continue;
}
- if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
+ if (const auto *IT = dyn_cast<IntrinsicInst>(U))
if (IT->isLifetimeStartOrEnd())
continue;
@@ -1089,8 +1089,8 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
// Second, the length of the memcpy's must be the same, or the preceding one
// must be larger than the following one.
if (MDep->getLength() != M->getLength()) {
- ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
- ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
+ auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
+ auto *MLen = dyn_cast<ConstantInt>(M->getLength());
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
return false;
}
@@ -1209,7 +1209,7 @@ bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
const unsigned DestAlign =
std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
if (DestAlign > 1)
- if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
+ if (auto *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
IRBuilder<> Builder(MemCpy);
@@ -1257,12 +1257,11 @@ static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
if (MSSA->isLiveOnEntryDef(Def))
return isa<AllocaInst>(getUnderlyingObject(V));
- if (IntrinsicInst *II =
- dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
+ if (auto *II = dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
- ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0));
+ auto *LTSize = cast<ConstantInt>(II->getArgOperand(0));
- if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
+ if (auto *CSize = dyn_cast<ConstantInt>(Size)) {
if (AA->isMustAlias(V, II->getArgOperand(1)) &&
LTSize->getZExtValue() >= CSize->getZExtValue())
return true;
@@ -1272,7 +1271,7 @@ static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
// does) and we're querying a pointer based on that alloca, then we know
// the memory is definitely undef, regardless of how exactly we alias.
// The size also doesn't matter, as an out-of-bounds access would be UB.
- AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
+ auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
const DataLayout &DL = Alloca->getModule()->getDataLayout();
if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL))
@@ -1312,12 +1311,12 @@ bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
// Don't worry about sizes larger than i64.
// A known memset size is required.
- ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
+ auto *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
if (!CMemSetSize)
return false;
// A known memcpy size is also required.
- ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize);
+ auto *CCopySize = dyn_cast<ConstantInt>(CopySize);
if (!CCopySize)
return false;
if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
@@ -1369,7 +1368,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
}
// If copying from a constant, try to turn the memcpy into a memset.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
+ if (auto *GV = dyn_cast<GlobalVariable>(M->getSource()))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
M->getModule()->getDataLayout())) {
@@ -1416,7 +1415,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
// d) memcpy from a just-memset'd source can be turned into memset.
if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) {
if (Instruction *MI = MD->getMemoryInst()) {
- if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
+ if (auto *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
if (auto *C = dyn_cast<CallInst>(MI)) {
// The memcpy must post-dom the call. Limit to the same block for
// now. Additionally, we need to ensure that there are no accesses
@@ -1515,7 +1514,7 @@ bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
return false;
// The length of the memcpy must be larger or equal to the size of the byval.
- ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
+ auto *C1 = dyn_cast<ConstantInt>(MDep->getLength());
if (!C1 || !TypeSize::isKnownGE(
TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize))
return false;
@@ -1586,13 +1585,13 @@ bool MemCpyOptPass::iterateOnFunction(Function &F) {
bool RepeatInstruction = false;
- if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ if (auto *SI = dyn_cast<StoreInst>(I))
MadeChange |= processStore(SI, BI);
- else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
+ else if (auto *M = dyn_cast<MemSetInst>(I))
RepeatInstruction = processMemSet(M, BI);
- else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
+ else if (auto *M = dyn_cast<MemCpyInst>(I))
RepeatInstruction = processMemCpy(M, BI);
- else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
+ else if (auto *M = dyn_cast<MemMoveInst>(I))
RepeatInstruction = processMemMove(M);
else if (auto *CB = dyn_cast<CallBase>(I)) {
for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
More information about the llvm-commits
mailing list