[llvm] [LV][NFC] Fix typos (PR #111971)
Piotr Fusik via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 11 02:36:24 PDT 2024
https://github.com/pfusik created https://github.com/llvm/llvm-project/pull/111971
None
>From 977963fb9a770a89455d8acad2a8b3bf9ffb6f9d Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Fri, 11 Oct 2024 11:34:28 +0200
Subject: [PATCH] [LV][NFC] Fix typos
---
.../Transforms/Vectorize/LoopVectorize.cpp | 31 +++++++++----------
1 file changed, 15 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 05dc58a42249ca..67384943301c2b 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -812,8 +812,7 @@ class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
};
} // end namespace llvm
-/// Look for a meaningful debug location on the instruction or it's
-/// operands.
+/// Look for a meaningful debug location on the instruction or its operands.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I) {
if (!I)
return DebugLoc();
@@ -1801,7 +1800,7 @@ class GeneratedRTChecks {
/// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
/// accurately estimate the cost of the runtime checks. The blocks are
- /// un-linked from the IR and is added back during vector code generation. If
+ /// un-linked from the IR and are added back during vector code generation. If
/// there is no vector code generation, the check blocks are removed
/// completely.
void create(Loop *L, const LoopAccessInfo &LAI,
@@ -2584,7 +2583,7 @@ PHINode *InnerLoopVectorizer::createInductionResumeValue(
}
}
- // Create phi nodes to merge from the backedge-taken check block.
+ // Create phi nodes to merge from the backedge-taken check block.
PHINode *BCResumeVal =
PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
LoopScalarPreHeader->getFirstNonPHIIt());
@@ -3005,7 +3004,8 @@ void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
// We can't sink an instruction if it is a phi node, is not in the loop,
// may have side effects or may read from memory.
- // TODO Could dor more granular checking to allow sinking a load past non-store instructions.
+ // TODO Could do more granular checking to allow sinking
+ // a load past non-store instructions.
if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
I->mayHaveSideEffects() || I->mayReadFromMemory())
continue;
@@ -3143,9 +3143,8 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
// (2) Add to the worklist all bitcast and getelementptr instructions used by
// memory accesses requiring a scalar use. The pointer operands of loads and
- // stores will be scalar as long as the memory accesses is not a gather or
- // scatter operation. The value operand of a store will remain scalar if the
- // store is scalarized.
+ // stores will be scalar unless the operation is a gather or scatter.
+ // The value operand of a store will remain scalar if the store is scalarized.
for (auto *BB : TheLoop->blocks())
for (auto &I : *BB) {
if (auto *Load = dyn_cast<LoadInst>(&I)) {
@@ -3417,7 +3416,7 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
auto *Group = getInterleavedAccessGroup(I);
assert(Group && "Must have a group.");
- // If the instruction's allocated size doesn't equal it's type size, it
+ // If the instruction's allocated size doesn't equal its type size, it
// requires padding and will be scalarized.
auto &DL = I->getDataLayout();
auto *ScalarTy = getLoadStoreType(I);
@@ -3512,11 +3511,11 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
assert(VF.isVector() && !Uniforms.contains(VF) &&
"This function should not be visited twice for the same VF");
- // Visit the list of Uniforms. If we'll not find any uniform value, we'll
- // not analyze again. Uniforms.count(VF) will return 1.
+ // Visit the list of Uniforms. If we find no uniform value, we won't
+ // analyze again. Uniforms.count(VF) will return 1.
Uniforms[VF].clear();
- // We now know that the loop is vectorizable!
+ // Now we know that the loop is vectorizable!
// Collect instructions inside the loop that will remain uniform after
// vectorization.
@@ -3563,7 +3562,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
auto PrevVF = VF.divideCoefficientBy(2);
// Return true if all lanes perform the same memory operation, and we can
- // thus chose to execute only one.
+ // thus choose to execute only one.
auto IsUniformMemOpUse = [&](Instruction *I) {
// If the value was already known to not be uniform for the previous
// (smaller VF), it cannot be uniform for the larger VF.
@@ -3954,7 +3953,7 @@ FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
FixedScalableVFPair
LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
- // TODO: It may by useful to do since it's still likely to be dynamically
+ // TODO: It may be useful to do since it's still likely to be dynamically
// uniform if the target can skip.
reportVectorizationFailure(
"Not inserting runtime ptr check for divergent target",
@@ -4028,7 +4027,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
"No decisions should have been taken at this point");
// Note: There is no need to invalidate any cost modeling decisions here, as
- // non where taken so far.
+ // none were taken so far.
InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
}
@@ -7917,7 +7916,7 @@ EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
BasicBlock *Bypass, BasicBlock *Insert) {
assert(EPI.TripCount &&
- "Expected trip count to have been safed in the first pass.");
+ "Expected trip count to have been saved in the first pass.");
assert(
(!isa<Instruction>(EPI.TripCount) ||
DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
More information about the llvm-commits
mailing list