[llvm] r334687 - [NFC] fix trivial typos in comments
Hiroshi Inoue via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 13 22:41:49 PDT 2018
Author: inouehrs
Date: Wed Jun 13 22:41:49 2018
New Revision: 334687
URL: http://llvm.org/viewvc/llvm-project?rev=334687&view=rev
Log:
[NFC] fix trivial typos in comments
Modified:
llvm/trunk/lib/Transforms/Scalar/DeadStoreElimination.cpp
llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
llvm/trunk/lib/Transforms/Scalar/GVN.cpp
llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp
llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp
llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp
llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp
llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp
llvm/trunk/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
llvm/trunk/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
llvm/trunk/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp
llvm/trunk/lib/Transforms/Scalar/StructurizeCFG.cpp
Modified: llvm/trunk/lib/Transforms/Scalar/DeadStoreElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/DeadStoreElimination.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/DeadStoreElimination.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/DeadStoreElimination.cpp Wed Jun 13 22:41:49 2018
@@ -304,7 +304,7 @@ static Value *getStoredPointerOperand(In
//TODO: factor this to reuse getLocForWrite
MemoryLocation Loc = getLocForWrite(I);
assert(Loc.Ptr &&
- "unable to find pointer writen for analyzable instruction?");
+ "unable to find pointer written for analyzable instruction?");
// TODO: most APIs don't expect const Value *
return const_cast<Value*>(Loc.Ptr);
}
Modified: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp Wed Jun 13 22:41:49 2018
@@ -349,8 +349,8 @@ public:
/// that dominated values can succeed in their lookup.
ScopedHTType AvailableValues;
- /// A scoped hash table of the current values of previously encounted memory
- /// locations.
+ /// A scoped hash table of the current values of previously encountered
+ /// memory locations.
///
/// This allows us to get efficient access to dominating loads or stores when
/// we have a fully redundant load. In addition to the most recent load, we
Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Wed Jun 13 22:41:49 2018
@@ -1064,7 +1064,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, A
// It is illegal to move the array access to any point above the guard,
// because if the index is out of bounds we should deoptimize rather than
// access the array.
- // Check that there is no guard in this block above our intruction.
+ // Check that there is no guard in this block above our instruction.
if (!IsSafeToSpeculativelyExecute) {
auto It = FirstImplicitControlFlowInsts.find(TmpBB);
if (It != FirstImplicitControlFlowInsts.end()) {
Modified: llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp Wed Jun 13 22:41:49 2018
@@ -167,7 +167,7 @@ JumpThreadingPass::JumpThreadingPass(int
}
// Update branch probability information according to conditional
-// branch probablity. This is usually made possible for cloned branches
+// branch probability. This is usually made possible for cloned branches
// in inline instances by the context specific profile in the caller.
// For instance,
//
@@ -1353,7 +1353,7 @@ bool JumpThreadingPass::SimplifyPartiall
DefMaxInstsToScan, AA, &IsLoadCSE, &NumScanedInst);
// If PredBB has a single predecessor, continue scanning through the
- // single precessor.
+ // single predecessor.
BasicBlock *SinglePredBB = PredBB;
while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() &&
NumScanedInst < DefMaxInstsToScan) {
@@ -2524,7 +2524,7 @@ bool JumpThreadingPass::TryToUnfoldSelec
break;
}
} else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) {
- // Look for a Select in BB that uses PN as condtion.
+ // Look for a Select in BB that uses PN as condition.
if (isUnfoldCandidate(SelectI, U.get())) {
SI = SelectI;
break;
Modified: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp Wed Jun 13 22:41:49 2018
@@ -1040,7 +1040,7 @@ bool LoopIdiomRecognize::processLoopStor
CallInst *NewCall = nullptr;
// Check whether to generate an unordered atomic memcpy:
- // If the load or store are atomic, then they must neccessarily be unordered
+ // If the load or store are atomic, then they must necessarily be unordered
// by previous checks.
if (!SI->isAtomic() && !LI->isAtomic())
NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(),
@@ -1470,7 +1470,7 @@ bool LoopIdiomRecognize::recognizePopcou
if (!EntryBI || EntryBI->isConditional())
return false;
- // It should have a precondition block where the generated popcount instrinsic
+ // It should have a precondition block where the generated popcount intrinsic
// function can be inserted.
auto *PreCondBB = PH->getSinglePredecessor();
if (!PreCondBB)
Modified: llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp Wed Jun 13 22:41:49 2018
@@ -123,7 +123,7 @@ using namespace llvm;
#define DEBUG_TYPE "loop-reduce"
-/// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for
+/// MaxIVUsers is an arbitrary threshold that provides an early opportunity for
/// bail out. This threshold is far beyond the number of users that LSR can
/// conceivably solve, so it should not affect generated code, but catches the
/// worst cases before LSR burns too much compile time and stack space.
@@ -331,7 +331,7 @@ struct Formula {
/// #2 enforces that 1 * reg is reg.
/// #3 ensures invariant regs with respect to current loop can be combined
/// together in LSR codegen.
- /// This invariant can be temporarly broken while building a formula.
+ /// This invariant can be temporarily broken while building a formula.
/// However, every formula inserted into the LSRInstance must be in canonical
/// form.
SmallVector<const SCEV *, 4> BaseRegs;
@@ -2700,7 +2700,7 @@ findIVOperand(User::op_iterator OI, User
return OI;
}
-/// IVChain logic must consistenctly peek base TruncInst operands, so wrap it in
+/// IVChain logic must consistently peek base TruncInst operands, so wrap it in
/// a convenient helper.
static Value *getWideOperand(Value *Oper) {
if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
@@ -4454,7 +4454,7 @@ void LSRInstance::NarrowSearchSpaceByRef
/// The benefit is that it is more likely to find out a better solution
/// from a formulae set with more Scale and ScaledReg variations than
/// a formulae set with the same Scale and ScaledReg. The picking winner
-/// reg heurstic will often keep the formulae with the same Scale and
+/// reg heuristic will often keep the formulae with the same Scale and
/// ScaledReg and filter others, and we want to avoid that if possible.
void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
if (EstimateSearchSpaceComplexity() < ComplexityLimit)
Modified: llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp Wed Jun 13 22:41:49 2018
@@ -765,7 +765,7 @@ static bool computeUnrollCount(
// compute the former when the latter is zero.
unsigned ExactTripCount = TripCount;
assert((ExactTripCount == 0 || MaxTripCount == 0) &&
- "ExtractTripCound and MaxTripCount cannot both be non zero.");
+ "ExtractTripCount and MaxTripCount cannot both be non zero.");
unsigned FullUnrollTripCount = ExactTripCount ? ExactTripCount : MaxTripCount;
UP.Count = FullUnrollTripCount;
if (FullUnrollTripCount && FullUnrollTripCount <= UP.FullUnrollMaxCount) {
@@ -804,7 +804,7 @@ static bool computeUnrollCount(
}
// 5th priority is partial unrolling.
- // Try partial unroll only when TripCount could be staticaly calculated.
+ // Try partial unroll only when TripCount could be statically calculated.
if (TripCount) {
UP.Partial |= ExplicitUnroll;
if (!UP.Partial) {
@@ -1041,7 +1041,7 @@ static LoopUnrollResult tryToUnrollLoop(
// loop tests remains the same compared to the non-unrolled version, whereas
// the generic upper bound unrolling keeps all but the last loop test so the
// number of loop tests goes up which may end up being worse on targets with
- // constriained branch predictor resources so is controlled by an option.)
+ // constrained branch predictor resources so is controlled by an option.)
// In addition we only unroll small upper bounds.
if (!(UP.UpperBound || MaxOrZero) || MaxTripCount > UnrollMaxUpperBound) {
MaxTripCount = 0;
Modified: llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp Wed Jun 13 22:41:49 2018
@@ -531,7 +531,7 @@ static bool moveUp(AliasAnalysis &AA, St
return false;
// Keep track of the arguments of all instruction we plan to lift
- // so we can make sure to lift them as well if apropriate.
+ // so we can make sure to lift them as well if appropriate.
DenseSet<Instruction*> Args;
if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
if (Ptr->getParent() == SI->getParent())
Modified: llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/NewGVN.cpp Wed Jun 13 22:41:49 2018
@@ -1259,7 +1259,7 @@ bool NewGVN::someEquivalentDominates(con
// This must be an instruction because we are only called from phi nodes
// in the case that the value it needs to check against is an instruction.
- // The most likely candiates for dominance are the leader and the next leader.
+ // The most likely candidates for dominance are the leader and the next leader.
// The leader or nextleader will dominate in all cases where there is an
// equivalent that is higher up in the dom tree.
// We can't *only* check them, however, because the
@@ -1624,7 +1624,7 @@ NewGVN::performSymbolicPredicateInfoEval
const Expression *NewGVN::performSymbolicCallEvaluation(Instruction *I) const {
auto *CI = cast<CallInst>(I);
if (auto *II = dyn_cast<IntrinsicInst>(I)) {
- // Instrinsics with the returned attribute are copies of arguments.
+ // Intrinsics with the returned attribute are copies of arguments.
if (auto *ReturnedValue = II->getReturnedArgOperand()) {
if (II->getIntrinsicID() == Intrinsic::ssa_copy)
if (const auto *Result = performSymbolicPredicateInfoEvaluation(I))
@@ -2217,7 +2217,7 @@ void NewGVN::moveMemoryToNewCongruenceCl
MemoryAccess *InstMA,
CongruenceClass *OldClass,
CongruenceClass *NewClass) {
- // If the leader is I, and we had a represenative MemoryAccess, it should
+ // If the leader is I, and we had a representative MemoryAccess, it should
// be the MemoryAccess of OldClass.
assert((!InstMA || !OldClass->getMemoryLeader() ||
OldClass->getLeader() != I ||
@@ -4124,7 +4124,7 @@ bool NewGVN::eliminateInstructions(Funct
// It's about to be alive again.
if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader))
ProbablyDead.erase(cast<Instruction>(DominatingLeader));
- // Copy instructions, however, are still dead beacuse we use their
+ // Copy instructions, however, are still dead because we use their
// operand as the leader.
if (LeaderUseCount == 0 && isSSACopy)
ProbablyDead.insert(II);
Modified: llvm/trunk/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp Wed Jun 13 22:41:49 2018
@@ -1376,7 +1376,7 @@ public:
assert(OldI != NewI && "Disallowed at construction?!");
assert((!IsDeoptimize || !New) &&
- "Deoptimize instrinsics are not replaced!");
+ "Deoptimize intrinsics are not replaced!");
Old = nullptr;
New = nullptr;
@@ -1386,7 +1386,7 @@ public:
if (IsDeoptimize) {
// Note: we've inserted instructions, so the call to llvm.deoptimize may
- // not necessarilly be followed by the matching return.
+ // not necessarily be followed by the matching return.
auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator());
new UnreachableInst(RI->getContext(), RI);
RI->eraseFromParent();
@@ -1984,7 +1984,7 @@ chainToBasePointerCost(SmallVectorImpl<I
Cost += 2;
} else {
- llvm_unreachable("unsupported instruciton type during rematerialization");
+ llvm_unreachable("unsupported instruction type during rematerialization");
}
}
Modified: llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp Wed Jun 13 22:41:49 2018
@@ -708,7 +708,7 @@ Value *ConstantOffsetExtractor::removeCo
BinaryOperator::BinaryOps NewOp = BO->getOpcode();
if (BO->getOpcode() == Instruction::Or) {
// Rebuild "or" as "add", because "or" may be invalid for the new
- // epxression.
+ // expression.
//
// For instance, given
// a | (b + 5) where a and b + 5 have no common bits,
@@ -1068,7 +1068,7 @@ bool SeparateConstOffsetFromGEP::splitGE
DL->getTypeAllocSize(GEP->getResultElementType()));
Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
- // Very likely. As long as %gep is natually aligned, the byte offset we
+ // Very likely. As long as %gep is naturally aligned, the byte offset we
// extracted should be a multiple of sizeof(*%gep).
int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
Modified: llvm/trunk/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp Wed Jun 13 22:41:49 2018
@@ -523,7 +523,7 @@ static bool unswitchAllTrivialConditions
return Changed;
if (!unswitchTrivialSwitch(L, *SI, DT, LI))
- // Coludn't unswitch this one so we're done.
+ // Couldn't unswitch this one so we're done.
return Changed;
// Mark that we managed to unswitch something.
@@ -1783,7 +1783,7 @@ unswitchLoop(Loop &L, DominatorTree &DT,
// irreducible control flow into reducible control flow and introduce new
// loops "out of thin air". If we ever discover important use cases for doing
// this, we can add support to loop unswitch, but it is a lot of complexity
- // for what seems little or no real world benifit.
+ // for what seems little or no real world benefit.
LoopBlocksRPO RPOT(&L);
RPOT.perform(&LI);
if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
Modified: llvm/trunk/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp Wed Jun 13 22:41:49 2018
@@ -266,7 +266,7 @@ static bool isSafeAndProfitableToSpecula
// Assume we will commute the constant to the RHS to be canonical.
Idx = 1;
- // Get the intrinsic ID if this user is an instrinsic.
+ // Get the intrinsic ID if this user is an intrinsic.
Intrinsic::ID IID = Intrinsic::not_intrinsic;
if (auto *UserII = dyn_cast<IntrinsicInst>(UserI))
IID = UserII->getIntrinsicID();
@@ -609,7 +609,7 @@ static void speculatePHIs(ArrayRef<PHINo
// Each predecessor is numbered by its index in `SpecPreds`, so for each
// instruction we speculate, the speculated instruction is stored in that
- // index of the vector asosciated with the original instruction. We also
+ // index of the vector associated with the original instruction. We also
// store the incoming values for each predecessor from any PHIs used.
SmallDenseMap<Instruction *, SmallVector<Value *, 2>, 16> SpeculatedValueMap;
Modified: llvm/trunk/lib/Transforms/Scalar/StructurizeCFG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/StructurizeCFG.cpp?rev=334687&r1=334686&r2=334687&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/StructurizeCFG.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/StructurizeCFG.cpp Wed Jun 13 22:41:49 2018
@@ -880,7 +880,7 @@ void StructurizeCFG::createFlow() {
}
/// Handle a rare case where the disintegrated nodes instructions
-/// no longer dominate all their uses. Not sure if this is really nessasary
+/// no longer dominate all their uses. Not sure if this is really necessary
void StructurizeCFG::rebuildSSA() {
SSAUpdater Updater;
for (BasicBlock *BB : ParentRegion->blocks())
More information about the llvm-commits
mailing list