[llvm] 111fcb0 - [llvm] Fix duplicate word typos. NFC
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 1 18:25:24 PDT 2023
Author: Fangrui Song
Date: 2023-09-01T18:25:16-07:00
New Revision: 111fcb0df02db3db8bed1d5db6d911b7ce544d92
URL: https://github.com/llvm/llvm-project/commit/111fcb0df02db3db8bed1d5db6d911b7ce544d92
DIFF: https://github.com/llvm/llvm-project/commit/111fcb0df02db3db8bed1d5db6d911b7ce544d92.diff
LOG: [llvm] Fix duplicate word typos. NFC
Those fixes were taken from https://reviews.llvm.org/D137338
Added:
Modified:
llvm/docs/PDB/DbiStream.rst
llvm/include/llvm/ADT/CombinationGenerator.h
llvm/include/llvm/ADT/FloatingPointMode.h
llvm/include/llvm/ADT/IntervalMap.h
llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
llvm/include/llvm/Analysis/GuardUtils.h
llvm/include/llvm/Analysis/ScalarEvolution.h
llvm/include/llvm/AsmParser/Parser.h
llvm/include/llvm/BinaryFormat/MachO.h
llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
llvm/include/llvm/CodeGen/ISDOpcodes.h
llvm/include/llvm/CodeGen/LiveInterval.h
llvm/include/llvm/CodeGen/LiveIntervals.h
llvm/include/llvm/CodeGen/MachineBasicBlock.h
llvm/include/llvm/CodeGen/ScheduleDAG.h
llvm/include/llvm/CodeGen/TargetLowering.h
llvm/include/llvm/CodeGen/TargetPassConfig.h
llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h
llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h
llvm/include/llvm/Demangle/Demangle.h
llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
llvm/include/llvm/IR/AutoUpgrade.h
llvm/include/llvm/IR/Instructions.h
llvm/include/llvm/IR/LegacyPassManagers.h
llvm/include/llvm/IR/PassManagerInternal.h
llvm/include/llvm/MC/MCSectionWasm.h
llvm/include/llvm/MC/MCStreamer.h
llvm/include/llvm/Passes/PassBuilder.h
llvm/include/llvm/Support/DynamicLibrary.h
llvm/include/llvm/Support/FileSystem.h
llvm/include/llvm/Support/MemAlloc.h
llvm/include/llvm/Support/SourceMgr.h
llvm/include/llvm/Support/TrailingObjects.h
llvm/include/llvm/Support/VirtualFileSystem.h
llvm/include/llvm/Target/Target.td
llvm/include/llvm/Transforms/Utils/MisExpect.h
llvm/include/llvm/Transforms/Utils/ValueMapper.h
llvm/lib/Analysis/CGSCCPassManager.cpp
llvm/lib/Analysis/IVDescriptors.cpp
llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
llvm/lib/CodeGen/MachinePipeliner.cpp
llvm/lib/CodeGen/MachineScheduler.cpp
llvm/lib/CodeGen/ReachingDefAnalysis.cpp
llvm/lib/CodeGen/RegAllocPBQP.cpp
llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp
llvm/lib/Debuginfod/HTTPServer.cpp
llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
llvm/lib/IR/Verifier.cpp
llvm/lib/MC/WasmObjectWriter.cpp
llvm/lib/MC/XCOFFObjectWriter.cpp
llvm/lib/Object/ArchiveWriter.cpp
llvm/lib/Support/RISCVISAInfo.cpp
llvm/lib/Support/SourceMgr.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64InstrInfo.h
llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
llvm/lib/Target/M68k/M68kInstrInfo.cpp
llvm/lib/Target/M68k/M68kInstrInfo.h
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/RISCV/RISCVFrameLowering.h
llvm/lib/Target/VE/VEISelLowering.cpp
llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/lib/Transforms/IPO/IROutliner.cpp
llvm/lib/Transforms/IPO/LowerTypeTests.cpp
llvm/lib/Transforms/IPO/PartialInlining.cpp
llvm/lib/Transforms/IPO/SampleProfile.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
llvm/lib/Transforms/Scalar/LICM.cpp
llvm/lib/Transforms/Scalar/LoopFuse.cpp
llvm/lib/Transforms/Scalar/LoopPredication.cpp
llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
llvm/lib/Transforms/Scalar/NewGVN.cpp
llvm/lib/Transforms/Utils/LoopPeel.cpp
llvm/lib/Transforms/Utils/LoopUtils.cpp
llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/lib/Transforms/Vectorize/VPlan.h
llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll
llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
llvm/tools/llvm-profgen/CSPreInliner.cpp
llvm/tools/llvm-readobj/ELFDumper.cpp
llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp
llvm/tools/obj2yaml/elf2yaml.cpp
llvm/unittests/CodeGen/InstrRefLDVTest.cpp
llvm/unittests/CodeGen/LowLevelTypeTest.cpp
llvm/utils/TableGen/CodeGenRegisters.h
Removed:
################################################################################
diff --git a/llvm/docs/PDB/DbiStream.rst b/llvm/docs/PDB/DbiStream.rst
index 9e939a938c50c3..d8b3d665ed5072 100644
--- a/llvm/docs/PDB/DbiStream.rst
+++ b/llvm/docs/PDB/DbiStream.rst
@@ -210,7 +210,7 @@ record in the array has the format:
uint16_t Dirty : 1;
// ``true`` if EC information is present for this module. EC is presumed to
// stand for "Edit & Continue", which LLVM does not support. So this flag
- // will always be be false.
+ // will always be false.
uint16_t EC : 1;
uint16_t Unused : 6;
// Type Server Index for this module. This is assumed to be related to /Zi,
diff --git a/llvm/include/llvm/ADT/CombinationGenerator.h b/llvm/include/llvm/ADT/CombinationGenerator.h
index a0bec68eaad60d..6100aa98122937 100644
--- a/llvm/include/llvm/ADT/CombinationGenerator.h
+++ b/llvm/include/llvm/ADT/CombinationGenerator.h
@@ -73,7 +73,7 @@ class CombinationGenerator {
SmallVector<WrappingIterator<choice_type>, variable_smallsize>
VariablesState;
- // 'increment' of the the whole VariablesState is defined identically to the
+ // 'increment' of the whole VariablesState is defined identically to the
// increment of a number: starting from the least significant element,
// increment it, and if it wrapped, then propagate that carry by also
// incrementing next (more significant) element.
diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h
index ea934a5a05e835..6aeabd17b7dde4 100644
--- a/llvm/include/llvm/ADT/FloatingPointMode.h
+++ b/llvm/include/llvm/ADT/FloatingPointMode.h
@@ -197,7 +197,7 @@ parseDenormalFPAttributeComponent(StringRef Str) {
.Default(DenormalMode::Invalid);
}
-/// Return the name used for the denormal handling mode used by the the
+/// Return the name used for the denormal handling mode used by the
/// expected names from the denormal-fp-math attribute.
inline StringRef denormalModeKindName(DenormalMode::DenormalModeKind Mode) {
switch (Mode) {
diff --git a/llvm/include/llvm/ADT/IntervalMap.h b/llvm/include/llvm/ADT/IntervalMap.h
index c68d816a8e7d75..99bf3087722337 100644
--- a/llvm/include/llvm/ADT/IntervalMap.h
+++ b/llvm/include/llvm/ADT/IntervalMap.h
@@ -1221,7 +1221,7 @@ branchRoot(unsigned Position) {
unsigned size[Nodes];
IdxPair NewOffset(0, Position);
- // Is is very common for the root node to be smaller than external nodes.
+ // It is very common for the root node to be smaller than external nodes.
if (Nodes == 1)
size[0] = rootSize;
else
@@ -1262,7 +1262,7 @@ splitRoot(unsigned Position) {
unsigned Size[Nodes];
IdxPair NewOffset(0, Position);
- // Is is very common for the root node to be smaller than external nodes.
+ // It is very common for the root node to be smaller than external nodes.
if (Nodes == 1)
Size[0] = rootSize;
else
@@ -1814,7 +1814,7 @@ iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
// Insert into the branch node at Level-1.
if (P.size(Level) == Branch::Capacity) {
- // Branch node is full, handle handle the overflow.
+ // Branch node is full, handle the overflow.
assert(!SplitRoot && "Cannot overflow after splitting the root");
SplitRoot = overflow<Branch>(Level);
Level += SplitRoot;
diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index a75f65c0dfc17e..54d56f8472c2bc 100644
--- a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -1382,7 +1382,7 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::applyIterativeInference() {
if (ReachableBlocks.empty())
return;
- // The map is used to to index successors/predecessors of reachable blocks in
+ // The map is used to index successors/predecessors of reachable blocks in
// the ReachableBlocks vector
DenseMap<const BlockT *, size_t> BlockIndex;
// Extract initial frequencies for the reachable blocks
diff --git a/llvm/include/llvm/Analysis/GuardUtils.h b/llvm/include/llvm/Analysis/GuardUtils.h
index 18485185702ddc..208f08b82d9873 100644
--- a/llvm/include/llvm/Analysis/GuardUtils.h
+++ b/llvm/include/llvm/Analysis/GuardUtils.h
@@ -49,7 +49,7 @@ bool parseWidenableBranch(const User *U, Value *&Condition,
Value *&WidenableCondition, BasicBlock *&IfTrueBB,
BasicBlock *&IfFalseBB);
-/// Analgous to the above, but return the Uses so that that they can be
+/// Analogous to the above, but return the Uses so that they can be
/// modified. Unlike previous version, Condition is optional and may be null.
bool parseWidenableBranch(User *U, Use *&Cond, Use *&WC, BasicBlock *&IfTrueBB,
BasicBlock *&IfFalseBB);
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 6763eaadf3c73d..2765f1286d8bce 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -328,7 +328,7 @@ class SCEVWrapPredicate final : public SCEVPredicate {
/// If Signed is a function that takes an n-bit tuple and maps to the
/// integer domain as the tuples value interpreted as twos complement,
/// and Unsigned a function that takes an n-bit tuple and maps to the
- /// integer domain as as the base two value of input tuple, then a + b
+ /// integer domain as the base two value of input tuple, then a + b
/// has IncrementNUSW iff:
///
/// 0 <= Unsigned(a) + Signed(b) < 2^n
diff --git a/llvm/include/llvm/AsmParser/Parser.h b/llvm/include/llvm/AsmParser/Parser.h
index c57e7abe554dbd..b3adfd7fd76cfb 100644
--- a/llvm/include/llvm/AsmParser/Parser.h
+++ b/llvm/include/llvm/AsmParser/Parser.h
@@ -97,7 +97,7 @@ ParsedModuleAndIndex parseAssemblyFileWithIndexNoUpgradeDebugInfo(
/// This function is a main interface to the LLVM Assembly Parser. It parses
/// an ASCII file that (presumably) contains LLVM Assembly code for a module
-/// summary. It returns a a ModuleSummaryIndex with the corresponding features.
+/// summary. It returns a ModuleSummaryIndex with the corresponding features.
/// Note that this does not verify that the generated Index is valid, so you
/// should run the verifier after parsing the file to check that it is okay.
/// Parse LLVM Assembly Index from a file
diff --git a/llvm/include/llvm/BinaryFormat/MachO.h b/llvm/include/llvm/BinaryFormat/MachO.h
index a6d64b4c04eee7..6ce0dec14a3e36 100644
--- a/llvm/include/llvm/BinaryFormat/MachO.h
+++ b/llvm/include/llvm/BinaryFormat/MachO.h
@@ -1073,7 +1073,7 @@ struct dyld_chained_fixups_header {
};
/// dyld_chained_starts_in_image is embedded in LC_DYLD_CHAINED_FIXUPS payload.
-/// Each each seg_info_offset entry is the offset into this struct for that
+/// Each seg_info_offset entry is the offset into this struct for that
/// segment followed by pool of dyld_chain_starts_in_segment data.
struct dyld_chained_starts_in_image {
uint32_t seg_count;
diff --git a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
index b77bcdb8902401..9236f14a3c8645 100644
--- a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
@@ -288,7 +288,7 @@ template <typename DerivedT> class CodeGenPassBuilder {
/// all virtual registers.
///
/// Note if the target overloads addRegAssignAndRewriteOptimized, this may not
- /// be honored. This is also not generally used for the the fast variant,
+ /// be honored. This is also not generally used for the fast variant,
/// where the allocation and rewriting are done in one pass.
void addPreRewrite(AddMachinePass &) const {}
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 4d26af3e3e6d22..0f803bf6c538ea 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -357,7 +357,7 @@ class IRTranslator : public MachineFunctionPass {
void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
MachineIRBuilder &MIB);
- /// Generate for for the BitTest header block, which precedes each sequence of
+ /// Generate for the BitTest header block, which precedes each sequence of
/// BitTestCases.
void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
MachineBasicBlock *SwitchMBB);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index d36f27ea6e5af3..aec603a225d779 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -246,7 +246,7 @@ class LegalizerHelper {
/// needs to be widened to evenly cover \p DstReg, inserts high bits
/// corresponding to the extension opcode \p PadStrategy.
///
- /// \p VRegs will be cleared, and the the result \p NarrowTy register pieces
+ /// \p VRegs will be cleared, and the result \p NarrowTy register pieces
/// will replace it. Returns The complete LCMTy that \p VRegs will cover when
/// merged.
LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index d38ff71b1589b5..e51a3ec9400543 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -709,7 +709,7 @@ class LegalizeRuleSet {
using namespace LegalityPredicates;
return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1);
}
- /// The instruction is lowered when when type indexes 0, 1, and 2 are all in
+ /// The instruction is lowered when type indexes 0, 1, and 2 are all in
/// their respective lists.
LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
std::initializer_list<LLT> Types1,
@@ -857,7 +857,7 @@ class LegalizeRuleSet {
std::initializer_list<LLT> Types1) {
return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1);
}
- /// The instruction is custom when when type indexes 0, 1, and 2 are all in
+ /// The instruction is custom when type indexes 0, 1, and 2 are all in
/// their respective lists.
LegalizeRuleSet &
customForCartesianProduct(std::initializer_list<LLT> Types0,
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index fbe920e6b3251c..e7db9547f03b69 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -823,7 +823,7 @@ class MachineIRBuilder {
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p TablePtr must be a generic virtual register with pointer type.
- /// \pre \p JTI must be be a jump table index.
+ /// \pre \p JTI must be a jump table index.
/// \pre \p IndexReg must be a generic virtual register with pointer type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
index 609326e28e3074..3f85d22be53399 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -190,7 +190,7 @@ class RegBankSelect : public MachineFunctionPass {
/// Frequency of the insertion point.
/// \p P is used to access the various analysis that will help to
/// get that information, like MachineBlockFrequencyInfo. If \p P
- /// does not contain enough enough to return the actual frequency,
+ /// does not contain enough to return the actual frequency,
/// this returns 1.
virtual uint64_t frequency(const Pass &P) const { return 1; }
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 0307cd705ef7d6..67779a23a19131 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -920,7 +920,7 @@ enum NodeType {
FP_TO_BF16,
/// Perform various unary floating-point operations inspired by libm. For
- /// FPOWI, the result is undefined if if the integer operand doesn't fit into
+ /// FPOWI, the result is undefined if the integer operand doesn't fit into
/// sizeof(int).
FNEG,
FABS,
diff --git a/llvm/include/llvm/CodeGen/LiveInterval.h b/llvm/include/llvm/CodeGen/LiveInterval.h
index c2741f5dd228e0..ef74f056dcb34c 100644
--- a/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -857,7 +857,7 @@ namespace llvm {
/// V2: sub0 sub1 sub2 sub3
/// V1: <offset> sub0 sub1
///
- /// This offset will look like a composed subregidx in the the class:
+ /// This offset will look like a composed subregidx in the class:
/// V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32>
/// => V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32>
///
diff --git a/llvm/include/llvm/CodeGen/LiveIntervals.h b/llvm/include/llvm/CodeGen/LiveIntervals.h
index 3b3a4e12f79407..903e458854f6cd 100644
--- a/llvm/include/llvm/CodeGen/LiveIntervals.h
+++ b/llvm/include/llvm/CodeGen/LiveIntervals.h
@@ -322,7 +322,7 @@ class VirtRegMap;
/// OrigRegs is a vector of registers that were originally used by the
/// instructions in the range between the two iterators.
///
- /// Currently, the only only changes that are supported are simple removal
+ /// Currently, the only changes that are supported are simple removal
/// and addition of uses.
void repairIntervalsInRange(MachineBasicBlock *MBB,
MachineBasicBlock::iterator Begin,
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 97c9649471fb61..ed9fc8f7ec3d75 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -233,7 +233,7 @@ class MachineBasicBlock
/// Return a formatted string to identify this block and its parent function.
std::string getFullName() const;
- /// Test whether this block is used as as something other than the target
+ /// Test whether this block is used as something other than the target
/// of a terminator, exception-handling target, or jump table. This is
/// either the result of an IR-level "blockaddress", or some form
/// of target-specific branch lowering.
diff --git a/llvm/include/llvm/CodeGen/ScheduleDAG.h b/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 89b71167a43a89..c5172e8c542b73 100644
--- a/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -766,7 +766,7 @@ class TargetRegisterInfo;
/// be added from SUnit \p X to SUnit \p Y.
void AddPredQueued(SUnit *Y, SUnit *X);
- /// Updates the topological ordering to accommodate an an edge to be
+ /// Updates the topological ordering to accommodate an edge to be
/// removed from the specified node \p N from the predecessors of the
/// current node \p M.
void RemovePred(SUnit *M, SUnit *N);
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index be89509b4b0beb..fe0bce8df32974 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3498,7 +3498,7 @@ class TargetLoweringBase {
/// is[Z|FP]ExtFree of the related types is not true.
virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
- /// Depth that GatherAllAliases should should continue looking for chain
+ /// Depth that GatherAllAliases should continue looking for chain
/// dependencies when trying to find a more preferable chain. As an
/// approximation, this should be more than the number of consecutive stores
/// expected to be merged.
@@ -3811,7 +3811,7 @@ class TargetLowering : public TargetLoweringBase {
/// \p AssumeSingleUse When this parameter is true, this function will
/// attempt to simplify \p Op even if there are multiple uses.
/// Callers are responsible for correctly updating the DAG based on the
- /// results of this function, because simply replacing replacing TLO.Old
+ /// results of this function, because simply replacing TLO.Old
/// with TLO.New will be incorrect when this parameter is true and TLO.Old
/// has multiple uses.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
@@ -3869,7 +3869,7 @@ class TargetLowering : public TargetLoweringBase {
/// \p AssumeSingleUse When this parameter is true, this function will
/// attempt to simplify \p Op even if there are multiple uses.
/// Callers are responsible for correctly updating the DAG based on the
- /// results of this function, because simply replacing replacing TLO.Old
+ /// results of this function, because simply replacing TLO.Old
/// with TLO.New will be incorrect when this parameter is true and TLO.Old
/// has multiple uses.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
diff --git a/llvm/include/llvm/CodeGen/TargetPassConfig.h b/llvm/include/llvm/CodeGen/TargetPassConfig.h
index 9cdd9e30a361b3..9e8052de12376c 100644
--- a/llvm/include/llvm/CodeGen/TargetPassConfig.h
+++ b/llvm/include/llvm/CodeGen/TargetPassConfig.h
@@ -401,7 +401,7 @@ class TargetPassConfig : public ImmutablePass {
/// all virtual registers.
///
/// Note if the target overloads addRegAssignAndRewriteOptimized, this may not
- /// be honored. This is also not generally used for the the fast variant,
+ /// be honored. This is also not generally used for the fast variant,
/// where the allocation and rewriting are done in one pass.
virtual bool addPreRewrite() {
return false;
diff --git a/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h b/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h
index 1914f499f0ed4c..e92ec4855b252a 100644
--- a/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h
+++ b/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h
@@ -71,7 +71,7 @@ enum class GlobalTypeHashAlg : uint16_t {
/// TypeIndex that refers to B with a previously-computed global hash for B. As
/// this is a recursive algorithm (e.g. the global hash of B also depends on the
/// global hashes of the types that B refers to), a global hash can uniquely
-/// identify identify that A occurs in another stream that has a completely
+/// identify that A occurs in another stream that has a completely
///
diff erent graph structure. Although the hash itself is slower to compute,
/// probing is much faster with a globally hashed type, because the hash itself
/// is considered "as good as" the original type. Since type records can be
diff --git a/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h b/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h
index 759ed895d48983..616f35d82e1c72 100644
--- a/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h
+++ b/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h
@@ -90,7 +90,7 @@ struct InlineInfo {
/// exists for \a Addr, then \a SrcLocs will be left untouched. If there is
/// inline information for \a Addr, then \a SrcLocs will be modifiied to
/// contain the deepest most inline function's SourceLocation at index zero
- /// in the array and proceed up the the concrete function source file and
+ /// in the array and proceed up the concrete function source file and
/// line at the end of the array.
///
/// \param GR The GSYM reader that contains the string and file table that
diff --git a/llvm/include/llvm/Demangle/Demangle.h b/llvm/include/llvm/Demangle/Demangle.h
index e1f73c422db833..0f06bbaf904aca 100644
--- a/llvm/include/llvm/Demangle/Demangle.h
+++ b/llvm/include/llvm/Demangle/Demangle.h
@@ -102,7 +102,7 @@ struct ItaniumPartialDemangler {
char *getFunctionParameters(char *Buf, size_t *N) const;
char *getFunctionReturnType(char *Buf, size_t *N) const;
- /// If this function has any any cv or reference qualifiers. These imply that
+ /// If this function has any cv or reference qualifiers. These imply that
/// the function is a non-static member function.
bool hasFunctionQualifiers() const;
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
index 50bebf335de1a3..8d3f29b545f21a 100644
--- a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
@@ -176,7 +176,7 @@ enum EdgeKind_aarch64 : Edge::Kind {
/// Errors:
/// - The result of the unshifted part of the fixup expression must be
/// 32-bit aligned otherwise an alignment error will be returned.
- /// - The result of the fixup expression must fit into an an int19 or an
+ /// - The result of the fixup expression must fit into an int19 or an
/// out-of-range error will be returned.
LDRLiteral19,
diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h
index 12952f25cbda97..f9b5d0c843907e 100644
--- a/llvm/include/llvm/IR/AutoUpgrade.h
+++ b/llvm/include/llvm/IR/AutoUpgrade.h
@@ -52,7 +52,7 @@ namespace llvm {
/// so that it can update all calls to the old function.
void UpgradeCallsToIntrinsic(Function* F);
- /// This checks for global variables which should be upgraded. It it requires
+ /// This checks for global variables which should be upgraded. If it requires
/// upgrading, returns a pointer to the upgraded variable.
GlobalVariable *UpgradeGlobalVariable(GlobalVariable *GV);
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 99e96c4cccc73d..5e8739812260d4 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -2336,7 +2336,7 @@ class ShuffleVectorInst : public Instruction {
/// Return true if this shuffle mask is an insert subvector mask.
/// A valid insert subvector mask inserts the lowest elements of a second
- /// source operand into an in-place first source operand operand.
+ /// source operand into an in-place first source operand.
/// Both the sub vector width and the insertion index is returned.
static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
int &NumSubElts, int &Index);
diff --git a/llvm/include/llvm/IR/LegacyPassManagers.h b/llvm/include/llvm/IR/LegacyPassManagers.h
index 41c11d26aa456c..6c490791fda4b8 100644
--- a/llvm/include/llvm/IR/LegacyPassManagers.h
+++ b/llvm/include/llvm/IR/LegacyPassManagers.h
@@ -422,8 +422,8 @@ class PMDataManager {
SmallVector<Pass *, 16> PassVector;
// Collection of Analysis provided by Parent pass manager and
- // used by current pass manager. At at time there can not be more
- // then PMT_Last active pass mangers.
+ // used by current pass manager. At any time there can not be more
+ // then PMT_Last active pass managers.
DenseMap<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last];
/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
diff --git a/llvm/include/llvm/IR/PassManagerInternal.h b/llvm/include/llvm/IR/PassManagerInternal.h
index 8e19f0aa16a9bb..bcfdcb8206c45e 100644
--- a/llvm/include/llvm/IR/PassManagerInternal.h
+++ b/llvm/include/llvm/IR/PassManagerInternal.h
@@ -53,7 +53,7 @@ struct PassConcept {
/// Polymorphic method to access the name of a pass.
virtual StringRef name() const = 0;
- /// Polymorphic method to to let a pass optionally exempted from skipping by
+ /// Polymorphic method to let a pass optionally exempted from skipping by
/// PassInstrumentation.
/// To opt-in, pass should implement `static bool isRequired()`. It's no-op
/// to have `isRequired` always return false since that is the default.
diff --git a/llvm/include/llvm/MC/MCSectionWasm.h b/llvm/include/llvm/MC/MCSectionWasm.h
index 579f92a750567e..23eba093a3b21f 100644
--- a/llvm/include/llvm/MC/MCSectionWasm.h
+++ b/llvm/include/llvm/MC/MCSectionWasm.h
@@ -33,7 +33,7 @@ class MCSectionWasm final : public MCSection {
// itself and does not include the size of the section header.
uint64_t SectionOffset = 0;
- // For data sections, this is the index of of the corresponding wasm data
+ // For data sections, this is the index of the corresponding wasm data
// segment
uint32_t SegmentIndex = 0;
diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h
index dcd84a6efda0e8..3bf2d22e182353 100644
--- a/llvm/include/llvm/MC/MCStreamer.h
+++ b/llvm/include/llvm/MC/MCStreamer.h
@@ -270,7 +270,7 @@ class MCStreamer {
virtual void emitRawTextImpl(StringRef String);
- /// Returns true if the the .cv_loc directive is in the right section.
+ /// Returns true if the .cv_loc directive is in the right section.
bool checkCVLocSection(unsigned FuncId, unsigned FileNo, SMLoc Loc);
public:
diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h
index 08c5b112d903a4..2c7ceda7998eda 100644
--- a/llvm/include/llvm/Passes/PassBuilder.h
+++ b/llvm/include/llvm/Passes/PassBuilder.h
@@ -333,7 +333,7 @@ class PassBuilder {
/// mpass1,fpass1,fpass2,mpass2,lpass1
///
/// This pipeline uses only one pass manager: the top-level module manager.
- /// fpass1,fpass2 and lpass1 are added into the the top-level module manager
+ /// fpass1,fpass2 and lpass1 are added into the top-level module manager
/// using only adaptor passes. No nested function/loop pass managers are
/// added. The purpose is to allow easy pass testing when the user
/// specifically want the pass to run under a adaptor directly. This is
diff --git a/llvm/include/llvm/Support/DynamicLibrary.h b/llvm/include/llvm/Support/DynamicLibrary.h
index f7db8fba39084c..94ee08c5938a05 100644
--- a/llvm/include/llvm/Support/DynamicLibrary.h
+++ b/llvm/include/llvm/Support/DynamicLibrary.h
@@ -103,7 +103,7 @@ class DynamicLibrary {
/// This function closes the dynamic library at the given path, using the
/// library close operation of the host operating system, and there is no
- /// guarantee if or when this will cause the the library to be unloaded.
+ /// guarantee if or when this will cause the library to be unloaded.
///
/// This function should be called only if the library was loaded using the
/// getLibrary() function.
diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h
index 033482977a105c..905ae961ec834c 100644
--- a/llvm/include/llvm/Support/FileSystem.h
+++ b/llvm/include/llvm/Support/FileSystem.h
@@ -794,7 +794,7 @@ enum OpenFlags : unsigned {
/// is false the current directory will be used instead.
///
/// This function does not check if the file exists. If you want to be sure
-/// that the file does not yet exist, you should use use enough '%' characters
+/// that the file does not yet exist, you should use enough '%' characters
/// in your model to ensure this. Each '%' gives 4-bits of entropy so you can
/// use 32 of them to get 128 bits of entropy.
///
diff --git a/llvm/include/llvm/Support/MemAlloc.h b/llvm/include/llvm/Support/MemAlloc.h
index d6012bd5a6985d..f3f378b7697a18 100644
--- a/llvm/include/llvm/Support/MemAlloc.h
+++ b/llvm/include/llvm/Support/MemAlloc.h
@@ -64,7 +64,7 @@ LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_realloc(void *Ptr, size_t Sz) {
/// Allocate a buffer of memory with the given size and alignment.
///
-/// When the compiler supports aligned operator new, this will use it to to
+/// When the compiler supports aligned operator new, this will use it to
/// handle even over-aligned allocations.
///
/// However, this doesn't make any attempt to leverage the fancier techniques
diff --git a/llvm/include/llvm/Support/SourceMgr.h b/llvm/include/llvm/Support/SourceMgr.h
index eced4574c82e15..6f5bee7f8cc220 100644
--- a/llvm/include/llvm/Support/SourceMgr.h
+++ b/llvm/include/llvm/Support/SourceMgr.h
@@ -59,7 +59,7 @@ class SourceMgr {
/// dynamically based on the size of Buffer.
mutable void *OffsetCache = nullptr;
- /// Look up a given \p Ptr in in the buffer, determining which line it came
+ /// Look up a given \p Ptr in the buffer, determining which line it came
/// from.
unsigned getLineNumber(const char *Ptr) const;
template <typename T>
diff --git a/llvm/include/llvm/Support/TrailingObjects.h b/llvm/include/llvm/Support/TrailingObjects.h
index f8a546b5c85aaa..9f7c421a87f4ed 100644
--- a/llvm/include/llvm/Support/TrailingObjects.h
+++ b/llvm/include/llvm/Support/TrailingObjects.h
@@ -37,7 +37,7 @@
/// determine the size needed for allocation via
/// 'additionalSizeToAlloc' and 'totalSizeToAlloc'.
///
-/// All the methods implemented by this class are are intended for use
+/// All the methods implemented by this class are intended for use
/// by the implementation of the class, not as part of its interface
/// (thus, private inheritance is suggested).
///
diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h
index 697343c7e763e5..44a56e54a0a09c 100644
--- a/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -66,7 +66,7 @@ class Status {
/// FIXME: Currently the external path is exposed by replacing the virtual
/// path in this Status object. Instead, we should leave the path in the
/// Status intact (matching the requested virtual path) - see
- /// FileManager::getFileRef for how how we plan to fix this.
+ /// FileManager::getFileRef for how we plan to fix this.
bool ExposesExternalVFSPath = false;
Status() = default;
@@ -888,7 +888,7 @@ class RedirectingFileSystem : public vfs::FileSystem {
LookupResult(Entry *E, sys::path::const_iterator Start,
sys::path::const_iterator End);
- /// If the found Entry maps the the input path to a path in the external
+ /// If the found Entry maps the input path to a path in the external
/// file system (i.e. it is a FileEntry or DirectoryRemapEntry), returns
/// that path.
std::optional<StringRef> getExternalRedirect() const {
diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index d384cebdddd478..94a57e5c0f6cff 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -315,7 +315,7 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
string DiagnosticType = "";
// A diagnostic message to emit when an invalid value is provided for this
- // register class when it is being used an an assembly operand. If this is
+ // register class when it is being used as an assembly operand. If this is
// non-empty, an anonymous diagnostic type enum value will be generated, and
// the assembly matcher will provide a function to map from diagnostic types
// to message strings.
diff --git a/llvm/include/llvm/Transforms/Utils/MisExpect.h b/llvm/include/llvm/Transforms/Utils/MisExpect.h
index 216c99a26259a1..be6deb1957f13f 100644
--- a/llvm/include/llvm/Transforms/Utils/MisExpect.h
+++ b/llvm/include/llvm/Transforms/Utils/MisExpect.h
@@ -28,7 +28,7 @@ namespace misexpect {
/// checkBackendInstrumentation - compares PGO counters to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range. It extracts the expected weights from the MD_prof weights attatched
-/// to the instruction, which are are assumed to come from lowered llvm.expect
+/// to the instruction, which are assumed to come from lowered llvm.expect
/// intrinsics. The RealWeights parameter and the extracted expected weights are
/// then passed to verifyMisexpect() for verification
///
@@ -40,7 +40,7 @@ void checkBackendInstrumentation(Instruction &I,
/// checkFrontendInstrumentation - compares PGO counters to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range. It extracts the expected weights from the MD_prof weights attatched
-/// to the instruction, which are are assumed to come from profiling data
+/// to the instruction, which are assumed to come from profiling data
/// attached by the frontend prior to llvm.expect intrinsic lowering. The
/// ExpectedWeights parameter and the extracted real weights are then passed to
/// verifyMisexpect() for verification
@@ -64,7 +64,7 @@ void verifyMisExpect(Instruction &I, ArrayRef<uint32_t> RealWeights,
/// checkExpectAnnotations - compares PGO counters to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range. It extracts the expected weights from the MD_prof weights attatched
-/// to the instruction, which are are assumed to come from lowered llvm.expect
+/// to the instruction, which are assumed to come from lowered llvm.expect
/// intrinsics. The RealWeights parameter and the extracted expected weights are
/// then passed to verifyMisexpect() for verification. It is a thin wrapper
/// around the checkFrontendInstrumentation and checkBackendInstrumentation APIs
diff --git a/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/llvm/include/llvm/Transforms/Utils/ValueMapper.h
index 5f15af7f99903b..e80951d50d56e8 100644
--- a/llvm/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -90,7 +90,7 @@ enum RemapFlags {
/// Instruct the remapper to reuse and mutate distinct metadata (remapping
/// them in place) instead of cloning remapped copies. This flag has no
- /// effect when when RF_NoModuleLevelChanges, since that implies an identity
+ /// effect when RF_NoModuleLevelChanges, since that implies an identity
/// mapping.
RF_ReuseAndMutateDistinctMDs = 4,
diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp
index facb9c897da3fe..2246887afe68a6 100644
--- a/llvm/lib/Analysis/CGSCCPassManager.cpp
+++ b/llvm/lib/Analysis/CGSCCPassManager.cpp
@@ -235,7 +235,7 @@ ModuleToPostOrderCGSCCPassAdaptor::run(Module &M, ModuleAnalysisManager &AM) {
// rather one pass of the RefSCC creating one child RefSCC at a time.
// Ensure we can proxy analysis updates from the CGSCC analysis manager
- // into the the Function analysis manager by getting a proxy here.
+ // into the Function analysis manager by getting a proxy here.
// This also needs to update the FunctionAnalysisManager, as this may be
// the first time we see this SCC.
CGAM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG).updateFAM(
diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index 0250d69b636dc7..46629e381bc366 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -123,7 +123,7 @@ static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
// meaning that we will use sext instructions instead of zext
// instructions to restore the original type.
IsSigned = true;
- // Make sure at at least one sign bit is included in the result, so it
+ // Make sure at least one sign bit is included in the result, so it
// will get properly sign-extended.
++MaxBitWidth;
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index cc68991f59b312..c43ca255505d77 100644
--- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -3033,7 +3033,7 @@ void CodeViewDebug::collectLexicalBlockInfo(
if (!BlockInsertion.second)
return;
- // Create a lexical block containing the variables and collect the the
+ // Create a lexical block containing the variables and collect the
// lexical block information for the children.
const InsnRange &Range = Ranges.front();
assert(Range.first && Range.second);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 027bcd4b24320c..ff59b782943513 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -2634,7 +2634,7 @@ void DwarfDebug::emitDebugLocValue(const AsmPrinter &AP, const DIBasicType *BT,
DIExpressionCursor ExprCursor(DIExpr);
DwarfExpr.addFragmentOffset(DIExpr);
- // If the DIExpr is is an Entry Value, we want to follow the same code path
+ // If the DIExpr is an Entry Value, we want to follow the same code path
// regardless of whether the DBG_VALUE is variadic or not.
if (DIExpr && DIExpr->isEntryValue()) {
// Entry values can only be a single register with no additional DIExpr,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 10ea0ce2b23391..f5ca614e568ef5 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3229,7 +3229,7 @@ bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
unsigned BinOpcode = MI.getOpcode();
- // We know know one of the operands is a select of constants. Now verify that
+ // We know that one of the operands is a select of constants. Now verify that
// the other binary operator operand is either a constant, or we can handle a
// variable.
bool CanFoldNonConst =
diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
index d0ad6e45b4d3ef..88e43e67493f6b 100644
--- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
+++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
@@ -628,7 +628,7 @@ static raw_ostream &operator<<(raw_ostream &OS, const Polynomial &S) {
/// VectorInfo stores abstract the following information for each vector
/// element:
///
-/// 1) The the memory address loaded into the element as Polynomial
+/// 1) The memory address loaded into the element as Polynomial
/// 2) a set of load instruction necessary to construct the vector,
/// 3) a set of all other instructions that are necessary to create the vector and
/// 4) a pointer value that can be used as relative base for all elements.
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index adcbf65259cdda..788ff5b3b5acdf 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -2635,7 +2635,7 @@ bool SMSchedule::isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi) {
/// v1 = phi(v2, v3)
/// (Def) v3 = op v1
/// (MO) = v1
-/// If MO appears before Def, then then v1 and v3 may get assigned to the same
+/// If MO appears before Def, then v1 and v3 may get assigned to the same
/// register.
bool SMSchedule::isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD,
MachineInstr *Def, MachineOperand &MO) {
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index da3e4bcbe267d3..4add33ba0996af 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -3969,7 +3969,7 @@ struct ILPOrder {
if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
return ScheduledTrees->test(SchedTreeB);
- // Trees with shallower connections have have lower priority.
+ // Trees with shallower connections have lower priority.
if (DFSResult->getSubtreeLevel(SchedTreeA)
!= DFSResult->getSubtreeLevel(SchedTreeB)) {
return DFSResult->getSubtreeLevel(SchedTreeA)
diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
index 75fbc8ba35b164..61a668907be77d 100644
--- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
+++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
@@ -181,7 +181,7 @@ void ReachingDefAnalysis::reprocessBasicBlock(MachineBasicBlock *MBB) {
MBBReachingDefs[MBBNumber][Unit].insert(Start, Def);
}
- // Update reaching def at end of of BB. Keep in mind that these are
+ // Update reaching def at end of BB. Keep in mind that these are
// adjusted relative to the end of the basic block.
if (MBBOutRegsInfos[MBBNumber][Unit] < Def - NumInsts)
MBBOutRegsInfos[MBBNumber][Unit] = Def - NumInsts;
diff --git a/llvm/lib/CodeGen/RegAllocPBQP.cpp b/llvm/lib/CodeGen/RegAllocPBQP.cpp
index 925a0f085c4bcb..b8ee5dc0f8494b 100644
--- a/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -192,7 +192,7 @@ class SpillCosts : public PBQPRAConstraint {
void apply(PBQPRAGraph &G) override {
LiveIntervals &LIS = G.getMetadata().LIS;
- // A minimum spill costs, so that register constraints can can be set
+ // A minimum spill costs, so that register constraints can be set
// without normalization in the [0.0:MinSpillCost( interval.
const PBQP::PBQPNum MinSpillCost = 10.0;
diff --git a/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp b/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp
index feb31e59f5fd2c..ba8dd49ba9291f 100644
--- a/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp
+++ b/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp
@@ -105,7 +105,7 @@ static bool reduceDbgValsForwardScan(MachineBasicBlock &MBB) {
MachineOperand &Loc = MI.getDebugOperand(0);
if (!Loc.isReg()) {
- // If it it's not a register, just stop tracking such variable.
+ // If it's not a register, just stop tracking such variable.
if (VMI != VariableMap.end())
VariableMap.erase(VMI);
continue;
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 8e896dbe7962ed..df2d7fe654e44e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5608,7 +5608,7 @@ static SDValue PerformUMinFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
SelectionDAG &DAG) {
// We are looking for UMIN(FPTOUI(X), (2^n)-1), which may have come via a
// select/vselect/select_cc. The two operands pairs for the select (N2/N3) may
- // be truncated versions of the the setcc (N0/N1).
+ // be truncated versions of the setcc (N0/N1).
if ((N0 != N2 &&
(N2.getOpcode() != ISD::TRUNCATE || N0 != N2.getOperand(0))) ||
N0.getOpcode() != ISD::FP_TO_UINT || CC != ISD::SETULT)
@@ -27467,7 +27467,7 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
}
case ISD::CopyFromReg:
- // Always forward past past CopyFromReg.
+ // Always forward past CopyFromReg.
C = C.getOperand(0);
return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 38b658cd7dfae8..d3456d574666d0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2189,7 +2189,7 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
bool IgnoreChains) {
if (OptLevel == CodeGenOpt::None) return false;
- // If Root use can somehow reach N through a path that that doesn't contain
+ // If Root use can somehow reach N through a path that doesn't contain
// U then folding N would create a cycle. e.g. In the following
// diagram, Root can reach N through X. If N is folded into Root, then
// X is both a predecessor and a successor of U.
diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 584c8c91780e4d..5ea05028cee082 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -525,7 +525,7 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
// deopt argument length, deopt arguments.., gc arguments...
// Figure out what lowering strategy we're going to use for each part
- // Note: Is is conservatively correct to lower both "live-in" and "live-out"
+ // Note: It is conservatively correct to lower both "live-in" and "live-out"
// as "live-through". A "live-through" variable is one which is "live-in",
// "live-out", and live throughout the lifetime of the call (i.e. we can find
// it from any PC within the transitive callee of the statepoint). In
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 35631a125ff979..cc7f29e686077a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6097,7 +6097,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
// Multiply the numerator (operand 0) by the magic value.
// FIXME: We should support doing a MUL in a wider type.
auto GetMULHS = [&](SDValue X, SDValue Y) {
- // If the type isn't legal, use a wider mul of the the type calculated
+ // If the type isn't legal, use a wider mul of the type calculated
// earlier.
if (!isTypeLegal(VT)) {
X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X);
@@ -6284,7 +6284,7 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
// FIXME: We should support doing a MUL in a wider type.
auto GetMULHU = [&](SDValue X, SDValue Y) {
- // If the type isn't legal, use a wider mul of the the type calculated
+ // If the type isn't legal, use a wider mul of the type calculated
// earlier.
if (!isTypeLegal(VT)) {
X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X);
diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
index f68bae2230aa49..e38347f15e3ae8 100644
--- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
+++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
@@ -347,7 +347,7 @@ static void convertFunctionLineTable(raw_ostream *Log, CUInfo &CUI,
LineEntry LE(RowAddress, FileIdx, Row.Line);
if (RowIndex != RowVector[0] && Row.Address < PrevRow.Address) {
// We have seen full duplicate line tables for functions in some
- // DWARF files. Watch for those here by checking the the last
+ // DWARF files. Watch for those here by checking the last
// row was the function's end address (HighPC) and that the
// current line table entry's address is the same as the first
// line entry we already have in our "function_info.Lines". If
diff --git a/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp b/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp
index ab458341a0bd50..4469092099daca 100644
--- a/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp
+++ b/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp
@@ -1058,7 +1058,7 @@ void LVELFReader::processLocationMember(dwarf::Attribute Attr,
CurrentSymbol->addLocationConstant(Attr, *FormValue.getAsUnsignedConstant(),
OffsetOnEntry);
else
- // This is a a location description, or a reference to one.
+ // This is a location description, or a reference to one.
processLocationList(Attr, FormValue, Die, OffsetOnEntry);
}
diff --git a/llvm/lib/Debuginfod/HTTPServer.cpp b/llvm/lib/Debuginfod/HTTPServer.cpp
index a5e992254ead5a..1264353ce4b33a 100644
--- a/llvm/lib/Debuginfod/HTTPServer.cpp
+++ b/llvm/lib/Debuginfod/HTTPServer.cpp
@@ -51,7 +51,7 @@ bool llvm::streamFile(HTTPServerRequest &Request, StringRef FilePath) {
Request.setResponse({404u, "text/plain", "Could not memory-map file.\n"});
return false;
}
- // Lambdas are copied on conversion to to std::function, preventing use of
+ // Lambdas are copied on conversion to std::function, preventing use of
// smart pointers.
MemoryBuffer *MB = MBOrErr->release();
Request.setResponse({200u, "application/octet-stream", MB->getBufferSize(),
diff --git a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
index b78f90aabc4063..e2b5ce49ba2ec1 100644
--- a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
+++ b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
@@ -447,7 +447,7 @@ void PerfJITEventListener::NotifyDebug(uint64_t CodeAddr,
rec.CodeAddr = CodeAddr;
rec.NrEntry = Lines.size();
- // compute total size size of record (variable due to filenames)
+ // compute total size of record (variable due to filenames)
DILineInfoTable::iterator Begin = Lines.begin();
DILineInfoTable::iterator End = Lines.end();
for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index 501417db421a6f..6435dc05cdc925 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -301,7 +301,7 @@ class RuntimeDyldImpl {
// won't be interleaved between modules. It is also used in mapSectionAddress
// and resolveRelocations to protect write access to internal data structures.
//
- // loadObject may be called on the same thread during the handling of of
+ // loadObject may be called on the same thread during the handling of
// processRelocations, and that's OK. The handling of the relocation lists
// is written in such a way as to work correctly if new elements are added to
// the end of the list while the list is being processed.
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index c36d45fd55729f..1a9ce08f08bf29 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5830,7 +5830,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
"vector_extract index must be a constant multiple of "
"the result type's known minimum vector length.");
- // If this extraction is not the 'mixed' case where a fixed vector is is
+ // If this extraction is not the 'mixed' case where a fixed vector is
// extracted from a scalable vector, ensure that the extraction does not
// overrun the parent vector.
if (VecEC.isScalable() == ResultEC.isScalable()) {
diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp
index 2b886449f052f2..bb8c68410a6b5c 100644
--- a/llvm/lib/MC/WasmObjectWriter.cpp
+++ b/llvm/lib/MC/WasmObjectWriter.cpp
@@ -550,7 +550,7 @@ void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
TargetObjectWriter->getRelocType(Target, Fixup, FixupSection, IsLocRel);
// Absolute offset within a section or a function.
- // Currently only supported for for metadata sections.
+ // Currently only supported for metadata sections.
// See: test/MC/WebAssembly/blockaddress.ll
if ((Type == wasm::R_WASM_FUNCTION_OFFSET_I32 ||
Type == wasm::R_WASM_FUNCTION_OFFSET_I64 ||
diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp
index 2e8898cb6c5b38..f1cce9b8c94d36 100644
--- a/llvm/lib/MC/XCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/XCOFFObjectWriter.cpp
@@ -1563,7 +1563,7 @@ void XCOFFObjectWriter::writeSectionForControlSectionEntry(
}
// The size of the tail padding in a section is the end virtual address of
- // the current section minus the the end virtual address of the last csect
+ // the current section minus the end virtual address of the last csect
// in that section.
if (uint64_t PaddingSize =
CsectEntry.Address + CsectEntry.Size - CurrentAddressLocation) {
diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp
index abd25c4a846094..19d2eff8b9fa9d 100644
--- a/llvm/lib/Object/ArchiveWriter.cpp
+++ b/llvm/lib/Object/ArchiveWriter.cpp
@@ -698,7 +698,7 @@ computeMemberData(raw_ostream &StringTable, raw_ostream &SymNames,
// UniqueTimestamps is a special case to improve debugging on Darwin:
//
// The Darwin linker does not link debug info into the final
- // binary. Instead, it emits entries of type N_OSO in in the output
+ // binary. Instead, it emits entries of type N_OSO in the output
// binary's symbol table, containing references to the linked-in
// object files. Using that reference, the debugger can read the
// debug data directly from the object files. Alternatively, an
diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index 9e3d6f8e10c33f..a02c9842e85839 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -526,7 +526,7 @@ static Error getExtensionVersion(StringRef Ext, StringRef In, unsigned &Major,
return createStringError(errc::invalid_argument, Error);
}
- // If experimental extension, require use of current version number number
+ // If experimental extension, require use of current version number
if (auto ExperimentalExtension = isExperimentalExtension(Ext)) {
if (!EnableExperimentalExtension) {
std::string Error = "requires '-menable-experimental-extensions' for "
diff --git a/llvm/lib/Support/SourceMgr.cpp b/llvm/lib/Support/SourceMgr.cpp
index 8065f0ad663a9f..ebeff87c395497 100644
--- a/llvm/lib/Support/SourceMgr.cpp
+++ b/llvm/lib/Support/SourceMgr.cpp
@@ -117,7 +117,7 @@ unsigned SourceMgr::SrcBuffer::getLineNumberSpecialized(const char *Ptr) const {
return llvm::lower_bound(Offsets, PtrOffset) - Offsets.begin() + 1;
}
-/// Look up a given \p Ptr in in the buffer, determining which line it came
+/// Look up a given \p Ptr in the buffer, determining which line it came
/// from.
unsigned SourceMgr::SrcBuffer::getLineNumber(const char *Ptr) const {
size_t Sz = Buffer->getBufferSize();
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 48efe7ae298f07..4c54dcea693668 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11281,7 +11281,7 @@ static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
};
// For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We
- // get the lane to move from from the PFID, which is always from the
+ // get the lane to move from the PFID, which is always from the
// original vectors (V1 or V2).
SDValue OpLHS = GeneratePerfectShuffle(
LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
@@ -16421,7 +16421,7 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
// Multiplication of a power of two plus/minus one can be done more
- // cheaply as as shift+add/sub. For now, this is true unilaterally. If
+ // cheaply as shift+add/sub. For now, this is true unilaterally. If
// future CPUs have a cheaper MADD instruction, this may need to be
// gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
// 64-bit is 5 cycles, so this is always a win.
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 2ccadd35d7345d..24ff676218cbe9 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -108,7 +108,7 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
/// Returns the base register operator of a load/store.
static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
- /// Returns the the immediate offset operator of a load/store.
+ /// Returns the immediate offset operator of a load/store.
static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
/// Returns whether the instruction is FP or NEON.
diff --git a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
index 351b6abf870c85..d054fe509be0b6 100644
--- a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
@@ -61,7 +61,7 @@ class AArch64LowerHomogeneousPE {
/// Lower a HOM_Prolog pseudo instruction into a helper call
/// or a sequence of homogeneous stores.
- /// When a a fp setup follows, it can be optimized.
+ /// When a fp setup follows, it can be optimized.
bool lowerProlog(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
/// Lower a HOM_Epilog pseudo instruction into a helper call
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 98bee05743c8f4..9c2b270faef125 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -2477,7 +2477,7 @@ let Predicates = [HasSVEorSME] in {
// FIXME: BigEndian requires an additional REV instruction to satisfy the
// constraint that none of the bits change when stored to memory as one
- // type, and and reloaded as another type.
+ // type, and reloaded as another type.
let Predicates = [IsLE] in {
def : Pat<(nxv16i8 (bitconvert (nxv8i16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
def : Pat<(nxv16i8 (bitconvert (nxv4i32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index cf5e16b075e31e..17b5490a914b27 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1174,7 +1174,7 @@ bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
// MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so
// by creating a MOVK that sets bits 48-63 of the register to (global address
// + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to
- // prevent an incorrect tag being generated during relocation when the the
+ // prevent an incorrect tag being generated during relocation when the
// global appears before the code section. Without the offset, a global at
// `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced
// by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 =
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index 303cf11d4f30c3..934432a6d5a749 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -149,7 +149,7 @@ bool matchAArch64MulConstCombine(
APInt ConstValue = Const->Value.sext(Ty.getSizeInBits());
// The following code is ported from AArch64ISelLowering.
// Multiplication of a power of two plus/minus one can be done more
- // cheaply as as shift+add/sub. For now, this is true unilaterally. If
+ // cheaply as shift+add/sub. For now, this is true unilaterally. If
// future CPUs have a cheaper MADD instruction, this may need to be
// gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
// 64-bit is 5 cycles, so this is always a win.
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index a918e9f36e6914..f84b0254fe3f31 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -234,7 +234,7 @@ bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
// Try simplify G_UADDO with 8 or 16 bit operands to wide G_ADD and TBNZ if
// result is only used in the no-overflow case. It is restricted to cases
// where we know that the high-bits of the operands are 0. If there's an
- // overflow, then the the 9th or 17th bit must be set, which can be checked
+ // overflow, then the 9th or 17th bit must be set, which can be checked
// using TBNZ.
//
// Change (for UADDOs on 8 and 16 bits):
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index ee81bfa65c6b65..def54f95409d65 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -6204,7 +6204,7 @@ static DecodeStatus DecoderForMRRC2AndMCRR2(MCInst &Inst, unsigned Val,
// We have to check if the instruction is MRRC2
// or MCRR2 when constructing the operands for
// Inst. Reason is because MRRC2 stores to two
- // registers so it's tablegen desc has has two
+ // registers so it's tablegen desc has two
// outputs whereas MCRR doesn't store to any
// registers so all of it's operands are listed
// as inputs, therefore the operand order for
diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h b/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h
index 1f7a926edb5cd0..4f13a26334b7f3 100644
--- a/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h
+++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h
@@ -68,10 +68,10 @@ enum Fixups {
/// with the upper 8 bits of a negated 16-bit value (bits 8-15).
fixup_hi8_ldi_neg,
/// Replaces the immediate operand of a 16-bit `Rd, K` instruction
- /// with the upper 8 bits of a negated negated 24-bit value (bits 16-23).
+ /// with the upper 8 bits of a negated 24-bit value (bits 16-23).
fixup_hh8_ldi_neg,
/// Replaces the immediate operand of a 16-bit `Rd, K` instruction
- /// with the upper 8 bits of a negated negated 32-bit value (bits 24-31).
+ /// with the upper 8 bits of a negated 32-bit value (bits 24-31).
fixup_ms8_ldi_neg,
/// Replaces the immediate operand of a 16-bit `Rd, K` instruction
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index 2bbc2f644f58df..65b68eeeb43b02 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -653,7 +653,7 @@ HexagonShuffler::tryAuction(HexagonPacketSummary const &Summary) {
bool HexagonShuffler::shuffle() {
if (size() > HEXAGON_PACKET_SIZE) {
- // Ignore a packet with with more than what a packet can hold
+ // Ignore a packet with more than what a packet can hold
// or with compound or duplex insns for now.
reportError("invalid instruction packet");
return false;
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
index 1803a936701fb2..8d36e94d8e6960 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.cpp
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
@@ -769,7 +769,7 @@ void M68kInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DstReg), FrameIndex);
}
-/// Return a virtual register initialized with the the global base register
+/// Return a virtual register initialized with the global base register
/// value. Output instructions required to initialize the register in the
/// function entry block, if necessary.
///
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.h b/llvm/lib/Target/M68k/M68kInstrInfo.h
index b6057a39bc826d..577967f2fdfc97 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.h
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.h
@@ -324,7 +324,7 @@ class M68kInstrInfo : public M68kGenInstrInfo {
bool ExpandMOVEM(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
bool IsRM) const;
- /// Return a virtual register initialized with the the global base register
+ /// Return a virtual register initialized with the global base register
/// value. Output instructions required to initialize the register in the
/// function entry block, if necessary.
unsigned getGlobalBaseReg(MachineFunction *MF) const;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 25a98739d5fc62..33b2867b5dd8b8 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -5286,7 +5286,7 @@ static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
// inserted into the DAG as part of call lowering. The restore of the TOC
// pointer is modeled by using a pseudo instruction for the call opcode that
// represents the 2 instruction sequence of an indirect branch and link,
- // immediately followed by a load of the TOC pointer from the the stack save
+ // immediately followed by a load of the TOC pointer from the stack save
// slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
// as it is not saved or used.
RetOpc = isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
index 79adc83e8d6535..9bc100981f2f7b 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -61,7 +61,7 @@ class RISCVFrameLowering : public TargetFrameLowering {
const TargetRegisterInfo *TRI) const override;
// Get the first stack adjustment amount for SplitSPAdjust.
- // Return 0 if we don't want to to split the SP adjustment in prologue and
+ // Return 0 if we don't want to split the SP adjustment in prologue and
// epilogue.
uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const;
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 70003daa4866a2..0267aefd1e914e 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -1130,7 +1130,7 @@ SDValue VETargetLowering::lowerATOMIC_FENCE(SDValue Op,
case AtomicOrdering::AcquireRelease:
case AtomicOrdering::SequentiallyConsistent:
// Generate "fencem 3" as acq_rel and seq_cst fence.
- // FIXME: "fencem 3" doesn't wait for for PCIe deveices accesses,
+ // FIXME: "fencem 3" doesn't wait for PCIe deveices accesses,
// so seq_cst may require more instruction for them.
return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
DAG.getTargetConstant(3, DL, MVT::i32),
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 44ef14e13d2bfb..f8a4b95a95515e 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -368,7 +368,7 @@ static void basicCheckForEHAndSjLj(TargetMachine *TM) {
// to TargetOptions and MCAsmInfo. But when clang compiles bitcode directly,
// clang's LangOptions is not used and thus the exception model info is not
// correctly transferred to TargetOptions and MCAsmInfo, so we make sure we
- // have the correct exception model in in WebAssemblyMCAsmInfo constructor.
+ // have the correct exception model in WebAssemblyMCAsmInfo constructor.
// But in this case TargetOptions is still not updated, so we make sure they
// are the same.
TM->Options.ExceptionModel = TM->getMCAsmInfo()->getExceptionHandlingType();
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 289642ac37bbfe..fbb2fc138d7ccd 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -418,7 +418,7 @@ isRightAfterData(MCFragment *CurrentFragment,
// - If it's not the fragment where the previous instruction is,
// returns true.
// - If it's the fragment holding the previous instruction but its
- // size changed since the the previous instruction was emitted into
+ // size changed since the previous instruction was emitted into
// it, returns true.
// - Otherwise returns false.
// - If the fragment is not a DataFragment, returns false.
@@ -569,7 +569,7 @@ void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst)
if (!needAlign(Inst) || !PendingBA)
return;
- // Tie the aligned instructions into a a pending BoundaryAlign.
+ // Tie the aligned instructions into a pending BoundaryAlign.
PendingBA->setLastFragment(CF);
PendingBA = nullptr;
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index fa5530d56512d9..7416936bc64e57 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -3819,7 +3819,7 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
}
if (Subtarget->hasBMI2()) {
- // Great, just emit the the BZHI..
+ // Great, just emit the BZHI..
if (NVT != MVT::i32) {
// But have to place the bit count into the wide-enough register first.
NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 021634156eb95c..520d8ed74a817e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -4221,7 +4221,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
unsigned ShiftLeft = NumElems - SubVecNumElems;
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
- // Do an optimization for the the most frequently used types.
+ // Do an optimization for the most frequently used types.
if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
Mask0.flipAllBits();
diff --git a/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp b/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
index 3baf73344b62fa..785bdd83cd998b 100644
--- a/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
+++ b/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
@@ -169,7 +169,7 @@ bool X86IndirectBranchTrackingPass::runOnMachineFunction(MachineFunction &MF) {
break;
} else if (I->isEHLabel()) {
// Old Landingpad BB (is not Landingpad now) with
- // the the old "callee" EHLabel.
+ // the old "callee" EHLabel.
MCSymbol *Sym = I->getOperand(0).getMCSymbol();
if (!MF.hasCallSiteLandingPad(Sym))
continue;
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index ecab0c7e61795e..aed26bfcee3d48 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -975,7 +975,7 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
if (MinSize == 2 && Subtarget->is32Bit() &&
Subtarget->isTargetWindowsMSVC() &&
(Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) {
- // For compatibility reasons, when targetting MSVC, is is important to
+ // For compatibility reasons, when targetting MSVC, it is important to
// generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools
// rely specifically on this pattern to be able to patch a function.
// This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE.
diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp
index e258299c6a4c56..21e8a5c39fcb30 100644
--- a/llvm/lib/Transforms/IPO/IROutliner.cpp
+++ b/llvm/lib/Transforms/IPO/IROutliner.cpp
@@ -557,7 +557,7 @@ collectRegionsConstants(OutlinableRegion &Region,
// Iterate over the operands in an instruction. If the global value number,
// assigned by the IRSimilarityCandidate, has been seen before, we check if
- // the the number has been found to be not the same value in each instance.
+ // the number has been found to be not the same value in each instance.
for (Value *V : ID.OperVals) {
std::optional<unsigned> GVNOpt = C.getGVN(V);
assert(GVNOpt && "Expected a GVN for operand?");
@@ -766,7 +766,7 @@ static void moveFunctionData(Function &Old, Function &New,
}
}
-/// Find the the constants that will need to be lifted into arguments
+/// Find the constants that will need to be lifted into arguments
/// as they are not the same in each instance of the region.
///
/// \param [in] C - The IRSimilarityCandidate containing the region we are
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 2ffdb1e31087d8..c7ccdfb6745a19 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1104,7 +1104,7 @@ void LowerTypeTestsModule::importFunction(
replaceCfiUses(F, FDecl, isJumpTableCanonical);
// Set visibility late because it's used in replaceCfiUses() to determine
- // whether uses need to to be replaced.
+ // whether uses need to be replaced.
F->setVisibility(Visibility);
}
diff --git a/llvm/lib/Transforms/IPO/PartialInlining.cpp b/llvm/lib/Transforms/IPO/PartialInlining.cpp
index b88ba2dec24bae..421b8636b21573 100644
--- a/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -161,7 +161,7 @@ struct FunctionOutliningInfo {
// The dominating block of the region to be outlined.
BasicBlock *NonReturnBlock = nullptr;
- // The set of blocks in Entries that that are predecessors to ReturnBlock
+ // The set of blocks in Entries that are predecessors to ReturnBlock
SmallVector<BasicBlock *, 4> ReturnBlockPreds;
};
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index b3d90dba37c6be..67a2e167b6bf7d 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -1919,7 +1919,7 @@ SampleProfileLoader::buildFunctionOrder(Module &M, LazyCallGraph &CG) {
// on the profile to favor more inlining. This is only a problem with CS
// profile.
// 3. Transitive indirect call edges due to inlining. When a callee function
- // (say B) is inlined into into a caller function (say A) in LTO prelink,
+ // (say B) is inlined into a caller function (say A) in LTO prelink,
// every call edge originated from the callee B will be transferred to
// the caller A. If any transferred edge (say A->C) is indirect, the
// original profiled indirect edge B->C, even if considered, would not
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 4be28aaee8746b..186a9fee901f85 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2988,7 +2988,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
auto *DstTy = dyn_cast<FixedVectorType>(ReturnType);
auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
- // Only canonicalize if the the destination vector and Vec are fixed
+ // Only canonicalize if the destination vector and Vec are fixed
// vectors.
if (DstTy && VecTy) {
unsigned DstNumElts = DstTy->getNumElements();
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index cf8c3a4a09d4df..a6c98392be07e0 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -3617,7 +3617,7 @@ Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI,
Value *StartV = StartU->get();
BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
- // We can't insert freeze if the the start value is the result of the
+ // We can't insert freeze if the start value is the result of the
// terminator (e.g. an invoke).
if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
return nullptr;
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 8caee5bed8ed08..2f237d051dbfbc 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -564,7 +564,7 @@ class DataFlowSanitizer {
/// getShadowTy([n x T]) = [n x getShadowTy(T)]
/// getShadowTy(other type) = i16
Type *getShadowTy(Type *OrigTy);
- /// Returns the shadow type of of V's type.
+ /// Returns the shadow type of V's type.
Type *getShadowTy(Value *V);
const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index db0e9b5e59cf58..bf40b355651c3a 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -1245,7 +1245,7 @@ LoopConstrainer::calculateSubRanges(bool IsSignedPredicate) const {
// `End`, decrementing by one every time.
//
// * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the
- // induction variable is decreasing we know that that the smallest value
+ // induction variable is decreasing we know that the smallest value
// the loop body is actually executed with is `INT_SMIN` == `Smallest`.
//
// * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 41e0710dfbb984..2d987b4defa49c 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -993,7 +993,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
// loop invariant). If so make them unconditional by moving them to their
// immediate dominator. We iterate through the instructions in reverse order
// which ensures that when we rehoist an instruction we rehoist its operands,
- // and also keep track of where in the block we are rehoisting to to make sure
+ // and also keep track of where in the block we are rehoisting to make sure
// that we rehoist instructions before the instructions that use them.
Instruction *HoistPoint = nullptr;
if (ControlFlowHoisting) {
diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index d35b562be0aa55..e15d8d17ffa9d1 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -1411,7 +1411,7 @@ struct LoopFuser {
}
// Walk through all uses in FC1. For each use, find the reaching def. If the
- // def is located in FC0 then it is is not safe to fuse.
+ // def is located in FC0 then it is not safe to fuse.
for (BasicBlock *BB : FC1.L->blocks())
for (Instruction &I : *BB)
for (auto &Op : I.operands())
@@ -1491,7 +1491,7 @@ struct LoopFuser {
/// 2. The successors of the guard have the same flow into/around the loop.
/// If the compare instructions are identical, then the first successor of the
/// guard must go to the same place (either the preheader of the loop or the
- /// NonLoopBlock). In other words, the the first successor of both loops must
+ /// NonLoopBlock). In other words, the first successor of both loops must
/// both go into the loop (i.e., the preheader) or go around the loop (i.e.,
/// the NonLoopBlock). The same must be true for the second successor.
bool haveIdenticalGuards(const FusionCandidate &FC0,
@@ -1624,7 +1624,7 @@ struct LoopFuser {
// first, or undef otherwise. This is sound as exiting the first implies the
// second will exit too, __without__ taking the back-edge. [Their
// trip-counts are equal after all.
- // KB: Would this sequence be simpler to just just make FC0.ExitingBlock go
+ // KB: Would this sequence be simpler to just make FC0.ExitingBlock go
// to FC1.Header? I think this is basically what the three sequences are
// trying to accomplish; however, doing this directly in the CFG may mean
// the DT/PDT becomes invalid
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index b8dbdb1ec469d3..a58ab093a1f75d 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -282,7 +282,7 @@ class LoopPredication {
Instruction *findInsertPt(Instruction *User, ArrayRef<Value*> Ops);
/// Same as above, *except* that this uses the SCEV definition of invariant
/// which is that an expression *can be made* invariant via SCEVExpander.
- /// Thus, this version is only suitable for finding an insert point to be be
+ /// Thus, this version is only suitable for finding an insert point to be
/// passed to SCEVExpander!
Instruction *findInsertPt(const SCEVExpander &Expander, Instruction *User,
ArrayRef<const SCEV *> Ops);
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index c0149af7328ade..3c31d4a4cd3786 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -2177,7 +2177,7 @@ class LowerMatrixIntrinsics {
/// Returns true if \p V is a matrix value in the given subprogram.
bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); }
- /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to
+ /// If \p V is a matrix value, print its shape as NumRows x NumColumns to
/// \p SS.
void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) {
auto M = Inst2Matrix.find(V);
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index d4e9e4c6ab74a7..69fb656c9ba096 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -3535,7 +3535,7 @@ struct NewGVN::ValueDFS {
// the second. We only want it to be less than if the DFS orders are equal.
//
// Each LLVM instruction only produces one value, and thus the lowest-level
- //
diff erentiator that really matters for the stack (and what we use as as a
+ //
diff erentiator that really matters for the stack (and what we use as a
// replacement) is the local dfs number.
// Everything else in the structure is instruction level, and only affects
// the order in which we will replace operands of a given instruction.
diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp
index d701cf110154d1..31f065b691f864 100644
--- a/llvm/lib/Transforms/Utils/LoopPeel.cpp
+++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp
@@ -624,7 +624,7 @@ struct WeightInfo {
/// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to
/// go to exit.
/// Then, Estimated ExitCount = F / E.
-/// For I-th (counting from 0) peeled off iteration we set the the weights for
+/// For I-th (counting from 0) peeled off iteration we set the weights for
/// the peeled exit as (EC - I, 1). It gives us reasonable distribution,
/// The probability to go to exit 1/(EC-I) increases. At the same time
/// the estimated exit count in the remainder loop reduces by I.
diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp
index 1b2b3710ae693a..07fca7420ec984 100644
--- a/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -1449,7 +1449,7 @@ int llvm::rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
// Note that we must not perform expansions until after
// we query *all* the costs, because if we perform temporary expansion
// inbetween, one that we might not intend to keep, said expansion
- // *may* affect cost calculation of the the next SCEV's we'll query,
+ // *may* affect cost calculation of the next SCEV's we'll query,
// and next SCEV may errneously get smaller cost.
// Collect all the candidate PHINodes to be rewritten.
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index d7f45d637e2445..3f6e6be2601ad6 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -1392,7 +1392,7 @@ Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilderBase &B) {
if (isOnlyUsedInEqualityComparison(CI, SrcStr))
// S is dereferenceable so it's safe to load from it and fold
// memchr(S, C, N) == S to N && *S == C for any C and N.
- // TODO: This is safe even even for nonconstant S.
+ // TODO: This is safe even for nonconstant S.
return memChrToCharCompare(CI, Size, B, DL);
// From now on we need a constant length and constant array.
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f60ea9b9fe2bd5..750f8ff22a22ab 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -6992,7 +6992,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
// With the exception of GEPs and PHIs, after scalarization there should
// only be one copy of the instruction generated in the loop. This is
// because the VF is either 1, or any instructions that need scalarizing
- // have already been dealt with by the the time we get here. As a result,
+ // have already been dealt with by the time we get here. As a result,
// it means we don't have to multiply the instruction cost by VF.
assert(I->getOpcode() == Instruction::GetElementPtr ||
I->getOpcode() == Instruction::PHI ||
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index b4a392c66803df..4eb240c08ac3c6 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -702,7 +702,7 @@ static Value *isOneOf(const InstructionsState &S, Value *Op) {
return S.OpValue;
}
-/// \returns true if \p Opcode is allowed as part of of the main/alternate
+/// \returns true if \p Opcode is allowed as part of the main/alternate
/// instruction for SLP vectorization.
///
/// Example of unsupported opcode is SDIV that can potentially cause UB if the
@@ -2205,7 +2205,7 @@ class BoUpSLP {
for (int Pass = 0; Pass != 2; ++Pass) {
// Check if no need to reorder operands since they're are perfect or
// shuffled diamond match.
- // Need to to do it to avoid extra external use cost counting for
+ // Need to do it to avoid extra external use cost counting for
// shuffled matches, which may cause regressions.
if (SkipReordering())
break;
@@ -2523,9 +2523,9 @@ class BoUpSLP {
collectUserStores(const BoUpSLP::TreeEntry *TE) const;
/// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the
- /// stores in \p StoresVec can form a vector instruction. If so it returns true
- /// and populates \p ReorderIndices with the shuffle indices of the the stores
- /// when compared to the sorted vector.
+ /// stores in \p StoresVec can form a vector instruction. If so it returns
+ /// true and populates \p ReorderIndices with the shuffle indices of the
+ /// stores when compared to the sorted vector.
bool canFormVector(ArrayRef<StoreInst *> StoresVec,
OrdersType &ReorderIndices) const;
@@ -6241,7 +6241,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
ReuseShuffleIndicies);
TE->setOperandsInOrder();
for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
- // For scalar operands no need to to create an entry since no need to
+ // For scalar operands no need to create an entry since no need to
// vectorize it.
if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
continue;
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 62b314d1c5c5c6..0d45882adb3586 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -696,7 +696,7 @@ class VPLiveOut : public VPUser {
};
/// VPRecipeBase is a base class modeling a sequence of one or more output IR
-/// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
+/// instructions. VPRecipeBase owns the VPValues it defines through VPDef
/// and is responsible for deleting its defined values. Single-value
/// VPRecipeBases that also inherit from VPValue must make sure to inherit from
/// VPRecipeBase before VPValue.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
index f6e3a2a16db85d..1184f8b34afb2d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
@@ -323,7 +323,7 @@ void PlainCFGBuilder::buildPlainCFG() {
// 2. Process outermost loop exit. We created an empty VPBB for the loop
// single exit BB during the RPO traversal of the loop body but Instructions
- // weren't visited because it's not part of the the loop.
+ // weren't visited because it's not part of the loop.
BasicBlock *LoopExitBB = TheLoop->getUniqueExitBlock();
assert(LoopExitBB && "Loops with multiple exits are not supported.");
VPBasicBlock *LoopExitVPBB = BB2VPBB[LoopExitBB];
diff --git a/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll b/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll
index fc61852dffa16d..3ca3c81e619c9f 100644
--- a/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll
+++ b/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll
@@ -17,7 +17,7 @@
; extern bool b;
; extern int x;
; void test() {
-; // i's value is 7 for the first call in in the if block. With basic
+; // i's value is 7 for the first call in the if block. With basic
; // block sections, this would split the range across sections and would
; // result in an extra entry than without sections.
; int i = 7;
diff --git a/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp b/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
index 5680c8c5a449ed..672e8073beb7c8 100644
--- a/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
+++ b/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
@@ -230,7 +230,7 @@ static bool filterArch(MachOObjectFile &Obj) {
/// Determine the virtual address that is considered the base address of an ELF
/// object file.
///
-/// The base address of an ELF file is the the "p_vaddr" of the first program
+/// The base address of an ELF file is the "p_vaddr" of the first program
/// header whose "p_type" is PT_LOAD.
///
/// \param ELFFile An ELF object file we will search.
diff --git a/llvm/tools/llvm-profgen/CSPreInliner.cpp b/llvm/tools/llvm-profgen/CSPreInliner.cpp
index ae0fd6d0b06929..9421118a3bb1b5 100644
--- a/llvm/tools/llvm-profgen/CSPreInliner.cpp
+++ b/llvm/tools/llvm-profgen/CSPreInliner.cpp
@@ -186,7 +186,7 @@ bool CSPreInliner::shouldInline(ProfiledInlineCandidate &Candidate) {
(NormalizationUpperBound - NormalizationLowerBound);
if (NormalizedHotness > 1.0)
NormalizedHotness = 1.0;
- // Add 1 to to ensure hot callsites get a non-zero threshold, which could
+ // Add 1 to ensure hot callsites get a non-zero threshold, which could
// happen when SampleColdCallSiteThreshold is 0. This is when we do not
// want any inlining for cold callsites.
SampleThreshold = SampleHotCallSiteThreshold * NormalizedHotness * 100 +
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index 902f6c4cbc7243..4872d055766248 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -2741,7 +2741,7 @@ void ELFDumper<ELFT>::printHashHistogram(const Elf_Hash &HashTable) const {
return;
std::vector<size_t> ChainLen(NBucket, 0);
- // Go over all buckets and and note chain lengths of each bucket (total
+ // Go over all buckets and note chain lengths of each bucket (total
// unique chain lengths).
for (size_t B = 0; B < NBucket; ++B) {
BitVector Visited(NChain);
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp b/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp
index 29863e06f174ba..dccc18fdb9d6c0 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp
@@ -149,7 +149,7 @@ opportunities(Function &F,
// Regardless whether referenced, add the function arguments as
// replacement possibility with the goal of reducing the number of (used)
- // function arguments, possibly created by the the operands-to-args.
+ // function arguments, possibly created by the operands-to-args.
for (Argument &Arg : F.args())
ReferencedVals.insert(&Arg);
@@ -185,7 +185,7 @@ opportunities(Function &F,
std::reverse(Candidates.begin(), Candidates.end());
// Independency of collectReferencedValues's idea of reductive power,
- // ensure the the partial order of IsMoreReduced is enforced.
+ // ensure the partial order of IsMoreReduced is enforced.
llvm::stable_sort(Candidates, IsMoreReduced);
Callback(Op, Candidates);
diff --git a/llvm/tools/obj2yaml/elf2yaml.cpp b/llvm/tools/obj2yaml/elf2yaml.cpp
index b261b9dc6f6e3f..40f6d5217949bc 100644
--- a/llvm/tools/obj2yaml/elf2yaml.cpp
+++ b/llvm/tools/obj2yaml/elf2yaml.cpp
@@ -300,7 +300,7 @@ template <class ELFT> Expected<ELFYAML::Object *> ELFDumper<ELFT>::dump() {
}
// Normally an object that does not have sections has e_shnum == 0.
- // Also, e_shnum might be 0, when the the number of entries in the section
+ // Also, e_shnum might be 0, when the number of entries in the section
// header table is larger than or equal to SHN_LORESERVE (0xff00). In this
// case the real number of entries is held in the sh_size member of the
// initial entry. We have a section header table when `e_shoff` is not 0.
diff --git a/llvm/unittests/CodeGen/InstrRefLDVTest.cpp b/llvm/unittests/CodeGen/InstrRefLDVTest.cpp
index 76464f783d92f5..aad99a1099d29b 100644
--- a/llvm/unittests/CodeGen/InstrRefLDVTest.cpp
+++ b/llvm/unittests/CodeGen/InstrRefLDVTest.cpp
@@ -3155,7 +3155,7 @@ TEST_F(InstrRefLDVTest, VLocSimpleLoop) {
VLocs[1].Vars.clear();
// Test that we can eliminate PHIs. A PHI will be placed at the loop head
- // because there's a def in in.
+ // because there's a def in it.
MInLocs[1][0] = LiveInRsp;
MOutLocs[1][0] = LiveInRsp;
VLocs[0].Vars.insert({Var, DbgValue(LiveInRspID, EmptyProps)});
diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index ccf39636bf2752..cd55dc273e19d7 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -159,7 +159,7 @@ TEST(LowLevelTypeTest, ChangeElementType) {
EXPECT_EQ(V2P1, V2P0.changeElementType(P1));
EXPECT_EQ(V2S32, V2P0.changeElementType(S32));
- // Similar tests for for scalable vectors.
+ // Similar tests for scalable vectors.
const LLT NXV2S32 = LLT::scalable_vector(2, 32);
const LLT NXV2S64 = LLT::scalable_vector(2, 64);
diff --git a/llvm/utils/TableGen/CodeGenRegisters.h b/llvm/utils/TableGen/CodeGenRegisters.h
index 6243f9dfd1eed4..97f60811a7d864 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/llvm/utils/TableGen/CodeGenRegisters.h
@@ -368,7 +368,7 @@ namespace llvm {
llvm_unreachable("VTNum greater than number of ValueTypes in RegClass!");
}
- // Return true if this this class contains the register.
+ // Return true if this class contains the register.
bool contains(const CodeGenRegister*) const;
// Returns true if RC is a subclass.
More information about the llvm-commits
mailing list