[llvm] [AArch64] Fix a multitude of AArch64 typos (NFC) (PR #143370)
Jonathan Thackray via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 9 04:32:49 PDT 2025
https://github.com/jthackray created https://github.com/llvm/llvm-project/pull/143370
Fix a multitude of typos in the AArch64 codebase using the https://github.com/crate-ci/typos Rust package.
>From 7b6b678aa1c574301621b0477b5805da0271b301 Mon Sep 17 00:00:00 2001
From: Jonathan Thackray <jonathan.thackray at arm.com>
Date: Mon, 9 Jun 2025 12:29:45 +0100
Subject: [PATCH] [AArch64] Fix a multitude of AArch64 typos (NFC)
Fix a multitude of typos in the AArch64 codebase using the
https://github.com/crate-ci/typos Rust package.
---
.../AArch64/AArch64AdvSIMDScalarPass.cpp | 10 +++----
llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp | 6 ++--
llvm/lib/Target/AArch64/AArch64CollectLOH.cpp | 4 +--
.../AArch64/AArch64ConditionalCompares.cpp | 6 ++--
.../AArch64DeadRegisterDefinitionsPass.cpp | 4 +--
llvm/lib/Target/AArch64/AArch64FastISel.cpp | 2 +-
.../Target/AArch64/AArch64FrameLowering.cpp | 18 +++++------
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 12 ++++----
.../Target/AArch64/AArch64ISelLowering.cpp | 30 +++++++++----------
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 +-
.../lib/Target/AArch64/AArch64InstrFormats.td | 8 ++---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 6 ++--
llvm/lib/Target/AArch64/AArch64InstrInfo.h | 2 +-
llvm/lib/Target/AArch64/AArch64InstrInfo.td | 4 +--
.../AArch64/AArch64LoadStoreOptimizer.cpp | 16 +++++-----
.../AArch64LowerHomogeneousPrologEpilog.cpp | 2 +-
llvm/lib/Target/AArch64/AArch64Processors.td | 2 +-
.../Target/AArch64/AArch64RegisterInfo.cpp | 2 +-
.../lib/Target/AArch64/AArch64RegisterInfo.td | 2 +-
.../Target/AArch64/AArch64SIMDInstrOpt.cpp | 2 +-
.../Target/AArch64/AArch64SLSHardening.cpp | 2 +-
llvm/lib/Target/AArch64/AArch64SchedA53.td | 2 +-
llvm/lib/Target/AArch64/AArch64SchedOryon.td | 10 +++----
.../AArch64/AArch64SpeculationHardening.cpp | 2 +-
llvm/lib/Target/AArch64/AArch64Subtarget.h | 2 +-
.../Target/AArch64/AArch64SystemOperands.td | 4 +--
.../AArch64/AArch64TargetTransformInfo.cpp | 8 ++---
.../AArch64/AsmParser/AArch64AsmParser.cpp | 16 +++++-----
.../AArch64ExternalSymbolizer.cpp | 4 +--
.../AArch64/GISel/AArch64CallLowering.cpp | 2 +-
.../GISel/AArch64InstructionSelector.cpp | 4 +--
.../AArch64/GISel/AArch64RegisterBankInfo.cpp | 6 ++--
.../AArch64/GISel/AArch64RegisterBankInfo.h | 2 +-
.../MCTargetDesc/AArch64AddressingModes.h | 6 ++--
.../MCTargetDesc/AArch64ELFStreamer.cpp | 8 ++---
.../MCTargetDesc/AArch64InstPrinter.cpp | 4 +--
.../MCTargetDesc/AArch64MCTargetDesc.cpp | 2 +-
.../MCTargetDesc/AArch64TargetStreamer.cpp | 14 ++++-----
.../MCTargetDesc/AArch64TargetStreamer.h | 10 +++----
llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp | 2 +-
40 files changed, 125 insertions(+), 125 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 08a6fa2ea8db0..c85adcf85f8dc 100644
--- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -73,11 +73,11 @@ class AArch64AdvSIMDScalar : public MachineFunctionPass {
bool isProfitableToTransform(const MachineInstr &MI) const;
// transformInstruction - Perform the transformation of an instruction
- // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
+ // to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void transformInstruction(MachineInstr &MI);
- // processMachineBasicBlock - Main optimzation loop.
+ // processMachineBasicBlock - Main optimization loop.
bool processMachineBasicBlock(MachineBasicBlock *MBB);
public:
@@ -231,7 +231,7 @@ bool AArch64AdvSIMDScalar::isProfitableToTransform(
// If any of the uses of the original instructions is a cross class copy,
// that's a copy that will be removable if we transform. Likewise, if
- // any of the uses is a transformable instruction, it's likely the tranforms
+ // any of the uses is a transformable instruction, it's likely the transforms
// will chain, enabling us to save a copy there, too. This is an aggressive
// heuristic that approximates the graph based cost analysis described above.
Register Dst = MI.getOperand(0).getReg();
@@ -280,7 +280,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
}
// transformInstruction - Perform the transformation of an instruction
-// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
+// to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
LLVM_DEBUG(dbgs() << "Scalar transform: " << MI);
@@ -372,7 +372,7 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
++NumScalarInsnsUsed;
}
-// processMachineBasicBlock - Main optimzation loop.
+// processMachineBasicBlock - Main optimization loop.
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 0d019bda36130..3f92c1dbfbf49 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -467,7 +467,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
if (PAuthABIPlatform || PAuthABIVersion) {
- TS->emitAtributesSubsection(
+ TS->emitAttributesSubsection(
AArch64BuildAttributes::getVendorName(
AArch64BuildAttributes::AEABI_PAUTHABI),
AArch64BuildAttributes::SubsectionOptional::REQUIRED,
@@ -490,7 +490,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
(Flags & AArch64BuildAttributes::Feature_GCS_Flag) ? 1 : 0;
if (BTIValue || PACValue || GCSValue) {
- TS->emitAtributesSubsection(
+ TS->emitAttributesSubsection(
AArch64BuildAttributes::getVendorName(
AArch64BuildAttributes::AEABI_FEATURE_AND_BITS),
AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
@@ -3531,7 +3531,7 @@ const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
char AArch64AsmPrinter::ID = 0;
INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
- "AArch64 Assmebly Printer", false, false)
+ "AArch64 Assembly Printer", false, false)
// Force static initialization.
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
diff --git a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
index 64f21c4cb2297..53e8e438c5e57 100644
--- a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -232,7 +232,7 @@ static bool isCandidateLoad(const MachineInstr &MI) {
}
}
-/// Check whether the given instruction can load a litteral.
+/// Check whether the given instruction can load a literal.
static bool supportLoadFromLiteral(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
@@ -247,7 +247,7 @@ static bool supportLoadFromLiteral(const MachineInstr &MI) {
}
}
-/// Number of GPR registers traked by mapRegToGPRIndex()
+/// Number of GPR registers tracked by mapRegToGPRIndex()
static const unsigned N_GPR_REGS = 31;
/// Map register number to index from 0-30.
static int mapRegToGPRIndex(MCRegister Reg) {
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 9b59ee6483cd9..484bc2a4be8fa 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -573,7 +573,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
// Update the CFG first.
updateTailPHIs();
- // Save successor probabilties before removing CmpBB and Tail from their
+ // Save successor probabilities before removing CmpBB and Tail from their
// parents.
BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB);
BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail);
@@ -581,7 +581,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
Head->removeSuccessor(CmpBB);
CmpBB->removeSuccessor(Tail);
- // If Head and CmpBB had successor probabilties, udpate the probabilities to
+ // If Head and CmpBB had successor probabilities, update the probabilities to
// reflect the ccmp-conversion.
if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) {
@@ -596,7 +596,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
Head2Tail + Head2CmpBB * CmpBB2Tail);
// We will transfer successors of CmpBB to Head in a moment without
- // normalizing the successor probabilities. Set the successor probabilites
+ // normalizing the successor probabilities. Set the successor probabilities
// before doing so.
//
// Pr(I|Head) = Pr(CmpBB|Head) * Pr(I|CmpBB).
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 71284b0574e57..987dfbcdd53e9 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -64,10 +64,10 @@ static bool usesFrameIndex(const MachineInstr &MI) {
return false;
}
-// Instructions that lose their 'read' operation for a subesquent fence acquire
+// Instructions that lose their 'read' operation for a subsequent fence acquire
// (DMB LD) once the zero register is used.
//
-// WARNING: The aquire variants of the instructions are also affected, but they
+// WARNING: The acquire variants of the instructions are also affected, but they
// are split out into `atomicBarrierDroppedOnZero()` to support annotations on
// assembly.
static bool atomicReadDroppedOnZero(unsigned Opcode) {
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index bb7e6b662f80e..9d74bb5a8661d 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -508,7 +508,7 @@ Register AArch64FastISel::materializeGV(const GlobalValue *GV) {
// also uses BuildMI for making an ADRP (+ MOVK) + ADD, but the operands
// are not exactly 1:1 with FastISel so we cannot easily abstract this
// out. At some point, it would be nice to find a way to not have this
- // duplciate code.
+ // duplicate code.
Register DstReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::MOVKXi),
DstReg)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 22683237fa0a8..3335ee04bb0e0 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -399,7 +399,7 @@ static const unsigned DefaultSafeSPDisplacement = 255;
/// size limit beyond which some of these instructions will require a scratch
/// register during their expansion later.
static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
- // FIXME: For now, just conservatively guestimate based on unscaled indexing
+ // FIXME: For now, just conservatively guesstimate based on unscaled indexing
// range. We'll end up allocating an unnecessary spill slot a lot, but
// realistically that's not a big deal at this stage of the game.
for (MachineBasicBlock &MBB : MF) {
@@ -647,7 +647,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations(
continue;
// Not all unwinders may know about SVE registers, so assume the lowest
- // common demoninator.
+ // common denominator.
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
MCRegister Reg = Info.getReg();
if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
@@ -801,7 +801,7 @@ void AArch64FrameLowering::allocateStackSpace(
.addImm(InitialOffset.getFixed())
.addImm(InitialOffset.getScalable());
// The fixed allocation may leave unprobed bytes at the top of the
- // stack. If we have subsequent alocation (e.g. if we have variable-sized
+ // stack. If we have subsequent allocation (e.g. if we have variable-sized
// objects), we need to issue an extra probe, so these allocations start in
// a known state.
if (FollowupAllocs) {
@@ -2054,7 +2054,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
HasWinCFI = true;
// alloc_l can hold at most 256MB, so assume that NumBytes doesn't
// exceed this amount. We need to move at most 2^24 - 1 into x15.
- // This is at most two instructions, MOVZ follwed by MOVK.
+ // This is at most two instructions, MOVZ followed by MOVK.
// TODO: Fix to use multiple stack alloc unwind codes for stacks
// exceeding 256MB in size.
if (NumBytes >= (1 << 28))
@@ -2400,7 +2400,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineInstr::FrameDestroy, PrologueSaveSize);
} else {
// If not, make sure to emit an add after the last ldp.
- // We're doing this by transfering the size to be restored from the
+ // We're doing this by transferring the size to be restored from the
// adjustment *before* the CSR pops to the adjustment *after* the CSR
// pops.
AfterCSRPopSize += PrologueSaveSize;
@@ -2949,7 +2949,7 @@ static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
const TargetRegisterInfo *TRI) {
// If we are generating register pairs for a Windows function that requires
// EH support, then pair consecutive registers only. There are no unwind
- // opcodes for saves/restores of non-consectuve register pairs.
+ // opcodes for saves/restores of non-consecutive register pairs.
// The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x,
// save_lrpair.
// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
@@ -3187,7 +3187,7 @@ static void computeCalleeSaveRegisterPairs(
RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair
RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
- // Realign the scalable offset if necesary. This is relevant when
+ // Realign the scalable offset if necessary. This is relevant when
// spilling predicates on Windows.
if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
ScalableByteOffset = alignTo(ScalableByteOffset, Scale);
@@ -5022,7 +5022,7 @@ MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II,
}
// Find contiguous runs of tagged memory and emit shorter instruction
- // sequencies for them when possible.
+ // sequences for them when possible.
TagStoreEdit TSE(MBB, FirstZeroData);
std::optional<int64_t> EndOffset;
for (auto &Instr : Instrs) {
@@ -5591,7 +5591,7 @@ void AArch64FrameLowering::emitRemarks(
unsigned RegTy = StackAccess::AccessType::GPR;
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
- // spill/fill the predicate as a data vector (so are an FPR acess).
+ // spill/fill the predicate as a data vector (so are an FPR access).
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
AArch64::PPRRegClass.contains(MI.getOperand(0).getReg())) {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 34f6db9374cb5..11cb91fbe02d4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -991,7 +991,7 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
}
/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
-/// operand is refered by the instructions have SP operand
+/// operand is referred by the instructions have SP operand
bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
SDValue &Shift) {
unsigned ShiftVal = 0;
@@ -2841,7 +2841,7 @@ static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
// After #1, x useful bits are 0x7, then the useful bits of x, live through
// y.
// After #2, the useful bits of x are 0x4.
-// However, if x is used on an unpredicatable instruction, then all its bits
+// However, if x is used on an unpredictable instruction, then all its bits
// are useful.
// E.g.
// 1. y = x & 0x7
@@ -3611,7 +3611,7 @@ static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
DstLSB = 0;
Width = ImmS - ImmR + 1;
// FIXME: This constraint is to catch bitfield insertion we may
- // want to widen the pattern if we want to grab general bitfied
+ // want to widen the pattern if we want to grab general bitfield
// move case
if (Width <= 0)
continue;
@@ -3999,7 +3999,7 @@ static int getIntOperandFromRegisterString(StringRef RegString) {
// Lower the read_register intrinsic to an MRS instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
-// form described in getIntOperandsFromRegsterString) or is a named register
+// form described in getIntOperandsFromRegisterString) or is a named register
// known by the MRS SysReg mapper.
bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
@@ -4060,7 +4060,7 @@ bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
// Lower the write_register intrinsic to an MSR instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
-// form described in getIntOperandsFromRegsterString) or is a named register
+// form described in getIntOperandsFromRegisterString) or is a named register
// known by the MSR SysReg mapper.
bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
@@ -7278,7 +7278,7 @@ static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
}
/// Return the EVT of the data associated to a memory operation in \p
-/// Root. If such EVT cannot be retrived, it returns an invalid EVT.
+/// Root. If such EVT cannot be retrieved, it returns an invalid EVT.
static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Root))
return MemIntr->getMemoryVT();
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 121720e7defd4..d30cfa257721f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5367,7 +5367,7 @@ static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
return AArch64ISD::UMULL;
} else if (VT == MVT::v2i64 && DAG.MaskedValueIsZero(N0, Mask) &&
DAG.MaskedValueIsZero(N1, Mask)) {
- // For v2i64 we look more aggresively at both operands being zero, to avoid
+ // For v2i64 we look more aggressively at both operands being zero, to avoid
// scalarization.
return AArch64ISD::UMULL;
}
@@ -5844,7 +5844,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
} else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
} else {
- report_fatal_error("Unexpected type for AArch64 NEON intrinic");
+ report_fatal_error("Unexpected type for AArch64 NEON intrinsic");
}
}
case Intrinsic::aarch64_neon_pmull64: {
@@ -8630,9 +8630,9 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
if (SizeInBits < 8)
return false;
- APInt RequredZero(SizeInBits, 0xFE);
+ APInt RequiredZero(SizeInBits, 0xFE);
KnownBits Bits = DAG.computeKnownBits(Arg, 4);
- bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
+ bool ZExtBool = (Bits.Zero & RequiredZero) == RequiredZero;
return ZExtBool;
}
@@ -13536,7 +13536,7 @@ static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
} else {
assert(VT.getScalarSizeInBits() == 32 &&
- "Expected 16 or 32 bit shuffle elemements");
+ "Expected 16 or 32 bit shuffle elements");
Input = DAG.getBitcast(MVT::v2f64, Input);
OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
}
@@ -13941,7 +13941,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
V1 = DAG.getBitcast(NewVecTy, V1);
- // Constuct the DUP instruction
+ // Construct the DUP instruction
V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
// Cast back to the original type
return DAG.getBitcast(VT, V1);
@@ -16900,12 +16900,12 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
}
bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
- Align &RequiredAligment) const {
+ Align &RequiredAlignment) const {
if (!LoadedType.isSimple() ||
(!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
return false;
// Cyclone supports unaligned accesses.
- RequiredAligment = Align(1);
+ RequiredAlignment = Align(1);
unsigned NumBits = LoadedType.getSizeInBits();
return NumBits == 32 || NumBits == 64;
}
@@ -18028,7 +18028,7 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
return SDValue();
- // Pattern is dectected. Let's convert it to sequence of nodes.
+ // Pattern is detected. Let's convert it to sequence of nodes.
SDLoc DL(N);
// First, create the node pattern of UABD/SABD.
@@ -18246,10 +18246,10 @@ static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(I * 16, DL, MVT::i64));
SDValue Dot =
DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, Vec8Op0, Vec8Op1);
- SDValue VecReudceAdd8 =
+ SDValue VecReduceAdd8 =
DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
return DAG.getNode(ISD::ADD, DL, N->getValueType(0), VecReduceAdd16,
- VecReudceAdd8);
+ VecReduceAdd8);
}
// Given an (integer) vecreduce, we know the order of the inputs does not
@@ -21474,7 +21474,7 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
case Intrinsic::aarch64_neon_ushl:
// For positive shift amounts we can use SHL, as ushl/sshl perform a regular
// left shift for positive shift amounts. For negative shifts we can use a
- // VASHR/VLSHR as appropiate.
+ // VASHR/VLSHR as appropriate.
if (ShiftAmount < 0) {
Opcode = IID == Intrinsic::aarch64_neon_sshl ? AArch64ISD::VASHR
: AArch64ISD::VLSHR;
@@ -22880,7 +22880,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
}
static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
- assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
+ assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexpected Opcode!");
// splice(pg, op1, undef) -> op1
if (N->getOperand(2).isUndef())
@@ -23616,10 +23616,10 @@ static SDValue performLOADCombine(SDNode *N,
LD->getMemOperand()->getFlags(), LD->getAAInfo());
SDValue UndefVector = DAG.getUNDEF(NewVT);
SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL);
- SDValue ExtendedReminingLoad =
+ SDValue ExtendedRemainingLoad =
DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT,
{UndefVector, RemainingLoad, InsertIdx});
- LoadOps.push_back(ExtendedReminingLoad);
+ LoadOps.push_back(ExtendedRemainingLoad);
LoadOpsChain.push_back(SDValue(cast<SDNode>(RemainingLoad), 1));
EVT ConcatVT =
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index d76c4ce31d008..e0b6c1b8c0baf 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -207,7 +207,7 @@ class AArch64TargetLowering : public TargetLowering {
bool optimizeExtendOrTruncateConversion(
Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
- bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
+ bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 4796c277ab99e..9078748c14834 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -282,7 +282,7 @@ def CondCode : AsmOperandClass {
let DiagnosticType = "InvalidCondCode";
}
-// A 32-bit register pasrsed as 64-bit
+// A 32-bit register parsed as 64-bit
def GPR32as64Operand : AsmOperandClass {
let Name = "GPR32as64";
let ParserMethod =
@@ -292,7 +292,7 @@ def GPR32as64 : RegisterOperand<GPR32> {
let ParserMatchClass = GPR32as64Operand;
}
-// A 64-bit register pasrsed as 32-bit
+// A 64-bit register parsed as 32-bit
def GPR64as32Operand : AsmOperandClass {
let Name = "GPR64as32";
let ParserMethod =
@@ -580,7 +580,7 @@ def uimm5s8 : Operand<i64>, ImmLeaf<i64,
let PrintMethod = "printImmScale<8>";
}
-// tuimm5sN predicate - similiar to uimm5sN, but use TImmLeaf (TargetConstant)
+// tuimm5sN predicate - similar to uimm5sN, but use TImmLeaf (TargetConstant)
// instead of ImmLeaf (Constant)
def tuimm5s2 : Operand<i64>, TImmLeaf<i64,
[{ return Imm >= 0 && Imm < (32*2) && ((Imm % 2) == 0); }],
@@ -3776,7 +3776,7 @@ multiclass StoreUI<bits<2> sz, bit V, bits<2> opc, DAGOperand regtype,
// Same as StoreUI, but take a RegisterOperand. This is used by GlobalISel to
// substitute zero-registers automatically.
//
-// TODO: Roll out zero-register subtitution to GPR32/GPR64 and fold this back
+// TODO: Roll out zero-register substitution to GPR32/GPR64 and fold this back
// into StoreUI.
multiclass StoreUIz<bits<2> sz, bit V, bits<2> opc, RegisterOperand regtype,
Operand indextype, string asm, list<dag> pattern> {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index a229b71b4b6e7..951cb93ea8f8c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -8775,7 +8775,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
return false;
// Find Definition.
- assert(MI.getParent() && "Incomplete machine instruciton\n");
+ assert(MI.getParent() && "Incomplete machine instruction\n");
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
@@ -9077,7 +9077,7 @@ AArch64InstrInfo::getOutliningCandidateInfo(
// address signing attributes, i.e., all share the same value for the
// attribute "sign-return-address" and all share the same type of key they
// are signed with.
- // Additionally we require all functions to simultaniously either support
+ // Additionally we require all functions to simultaneously either support
// v8.3a features or not. Otherwise an outlined function could get signed
// using dedicated v8.3 instructions and a call from a function that doesn't
// support v8.3 instructions would therefore be invalid.
@@ -10319,7 +10319,7 @@ unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) {
MachineBasicBlock::iterator
AArch64InstrInfo::probedStackAlloc(MachineBasicBlock::iterator MBBI,
Register TargetReg, bool FrameSetup) const {
- assert(TargetReg != AArch64::SP && "New top of stack cannot aleady be in SP");
+ assert(TargetReg != AArch64::SP && "New top of stack cannot already be in SP");
MachineBasicBlock &MBB = *MBBI->getParent();
MachineFunction &MF = *MBB.getParent();
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 0ffaca9af4006..7c255da333e4b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -519,7 +519,7 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
/// Returns true if the instruction has a shift by immediate that can be
/// executed in one cycle less.
static bool isFalkorShiftExtFast(const MachineInstr &MI);
- /// Return true if the instructions is a SEH instruciton used for unwinding
+ /// Return true if the instructions is a SEH instruction used for unwinding
/// on Windows.
static bool isSEHInstruction(const MachineInstr &MI);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 71589432a7552..f5b66b75eb407 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -865,7 +865,7 @@ def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
-// Vector immedate moves
+// Vector immediate moves
def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
@@ -1487,7 +1487,7 @@ let Predicates = [HasPCDPHINT] in {
}
// In order to be able to write readable assembly, LLVM should accept assembly
-// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
+// inputs that use Branch Target Identification mnemonics, even with BTI disabled.
// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
// should not emit these mnemonics unless BTI is enabled.
def : InstAlias<"bti", (HINT 32), 0>;
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index b7da07a95c7b4..f51f0d11ef9d8 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -10,7 +10,7 @@
// optimizations. This pass should be run after register allocation.
//
// The pass runs after the PrologEpilogInserter where we emit the CFI
-// instructions. In order to preserve the correctness of the unwind informaiton,
+// instructions. In order to preserve the correctness of the unwind information,
// the pass should not change the order of any two instructions, one of which
// has the FrameSetup/FrameDestroy flag or, alternatively, apply an add-hoc fix
// to unwind information.
@@ -189,7 +189,7 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass {
// pre or post indexed addressing with writeback. Scan backwards.
// `MergeEither` is set to true if the combined instruction may be placed
// either at the location of the load/store instruction or at the location of
- // the update intruction.
+ // the update instruction.
MachineBasicBlock::iterator
findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit,
bool &MergeEither);
@@ -1281,7 +1281,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
// instruction contains the final value we care about we give it a new
// debug-instr-number 3. Whereas, $w1 contains the final value that we care
// about, therefore the LDP instruction is also given a new
- // debug-instr-number 4. We have to add these subsitutions to the
+ // debug-instr-number 4. We have to add these substitutions to the
// debugValueSubstitutions table. However, we also have to ensure that the
// OpIndex that pointed to debug-instr-number 1 gets updated to 1, because
// $w1 is the second operand of the LDP instruction.
@@ -2602,7 +2602,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
ModifiedRegUnits.clear();
UsedRegUnits.clear();
unsigned Count = 0;
- bool MemAcessBeforeSPPreInc = false;
+ bool MemAccessBeforeSPPreInc = false;
MergeEither = true;
do {
MBBI = prev_nodbg(MBBI, B);
@@ -2617,7 +2617,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset)) {
// Check that the update value is within our red zone limit (which may be
// zero).
- if (MemAcessBeforeSPPreInc && MBBI->getOperand(2).getImm() > RedZoneSize)
+ if (MemAccessBeforeSPPreInc && MBBI->getOperand(2).getImm() > RedZoneSize)
return E;
return MBBI;
}
@@ -2648,7 +2648,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
// case we need to validate later that the update amount respects the red
// zone.
if (BaseRegSP && MBBI->mayLoadOrStore())
- MemAcessBeforeSPPreInc = true;
+ MemAccessBeforeSPPreInc = true;
} while (MBBI != B && Count < Limit);
return E;
}
@@ -2745,7 +2745,7 @@ bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
if (!TII->isCandidateToMergeOrPair(MI))
return false;
- // Look ahead up to LdStLimit instructions for a mergable instruction.
+ // Look ahead up to LdStLimit instructions for a mergeable instruction.
LdStPairFlags Flags;
MachineBasicBlock::iterator MergeMI =
findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
@@ -2941,7 +2941,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo<AArch64FunctionInfo>();
bool Modified = false;
- // Four tranformations to do here:
+ // Four transformations to do here:
// 1) Find loads that directly read from stores and promote them by
// replacing with mov instructions. If the store is wider than the load,
// the load will be replaced with a bitfield extract.
diff --git a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
index 66f14b67a31ff..d67182d5bb513 100644
--- a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp
@@ -402,7 +402,7 @@ static bool shouldUseFrameHelper(MachineBasicBlock &MBB,
InstCount--;
break;
case FrameHelperType::PrologFrame: {
- // Effecitvely no change in InstCount since FpAdjusment is included.
+ // Effectively no change in InstCount since FpAdjustment is included.
break;
}
case FrameHelperType::Epilog:
diff --git a/llvm/lib/Target/AArch64/AArch64Processors.td b/llvm/lib/Target/AArch64/AArch64Processors.td
index e7a3527202f6a..c7ea6393e2ad3 100644
--- a/llvm/lib/Target/AArch64/AArch64Processors.td
+++ b/llvm/lib/Target/AArch64/AArch64Processors.td
@@ -302,7 +302,7 @@ def TuneOlympus : SubtargetFeature<"olympus", "ARMProcFamily", "Olympus",
FeatureUseFixedOverScalableIfEqualCost]>;
// Note that cyclone does not fuse AES instructions, but newer apple chips do
-// perform the fusion and cyclone is used by default when targetting apple OSes.
+// perform the fusion and cyclone is used by default when targeting apple OSes.
def TuneAppleA7 : SubtargetFeature<"apple-a7", "ARMProcFamily", "AppleA7",
"Apple A7 (the CPU formerly known as Cyclone)", [
FeatureAlternateSExtLoadCVTF32Pattern,
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index b0c69b8aca806..fb472ddc719fc 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -1218,7 +1218,7 @@ bool AArch64RegisterInfo::getRegAllocationHints(
// is valid but { z1, z2, z3, z5 } is not.
// * One or more of the registers used by FORM_TRANSPOSED_X4 is already
// assigned a physical register, which means only checking that a
- // consectutive range of free tuple registers exists which includes
+ // consecutive range of free tuple registers exists which includes
// the assigned register.
// e.g. in the example above, if { z0, z8 } is already allocated for
// %v0, we just need to ensure that { z1, z9 }, { z2, z10 } and
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
index d3252ea54321e..61bf87fe71441 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
@@ -1849,7 +1849,7 @@ def ZTR : RegisterClass<"AArch64", [untyped], 512, (add ZT0)> {
// * Tile vectors:
//
// Their representation is similar to regular tiles, but they have an extra
-// 'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile,
+// 'h' or 'v' to tell how the vector at [reg+offset] is laid out in the tile,
// horizontally or vertically.
//
// e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and
diff --git a/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp b/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
index b3159b444e5b7..d695f2678fd4e 100644
--- a/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
@@ -147,7 +147,7 @@ struct AArch64SIMDInstrOpt : public MachineFunctionPass {
};
// A costly instruction is replaced in this work by N efficient instructions
- // The maximum of N is curently 10 and it is for ST4 case.
+ // The maximum of N is currently 10 and it is for ST4 case.
static const unsigned MaxNumRepl = 10;
AArch64SIMDInstrOpt() : MachineFunctionPass(ID) {}
diff --git a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
index 91410a5af3dc3..0dc57f7353885 100644
--- a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
@@ -449,7 +449,7 @@ void SLSHardeningInserter::convertBLRToBL(
// Now copy the implicit operands from BLR to BL and copy other necessary
// info.
- // However, both BLR and BL instructions implictly use SP and implicitly
+ // However, both BLR and BL instructions implicitly use SP and implicitly
// define LR. Blindly copying implicit operands would result in SP and LR
// operands to be present multiple times. While this may not be too much of
// an issue, let's avoid that for cleanliness, by removing those implicit
diff --git a/llvm/lib/Target/AArch64/AArch64SchedA53.td b/llvm/lib/Target/AArch64/AArch64SchedA53.td
index c714bad92b7fb..66715b9d1db8b 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA53.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA53.td
@@ -19,7 +19,7 @@ def CortexA53Model : SchedMachineModel {
let MicroOpBufferSize = 0; // Explicitly set to zero since A53 is in-order.
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
let LoadLatency = 3; // Optimistic load latency assuming bypass.
- // This is overriden by OperandCycles if the
+ // This is overridden by OperandCycles if the
// Itineraries are queried instead.
let MispredictPenalty = 9; // Based on "Cortex-A53 Software Optimisation
// Specification - Instruction Timings"
diff --git a/llvm/lib/Target/AArch64/AArch64SchedOryon.td b/llvm/lib/Target/AArch64/AArch64SchedOryon.td
index 09d1af248f0ec..5b597b91e7459 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedOryon.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedOryon.td
@@ -187,7 +187,7 @@ def ORYONFP2 : ProcResGroup<[ORYONP14FP2]> {
let BufferSize = 48;
}
-// Reciprocal, Squre root on FP0.
+// Reciprocal, Square root on FP0.
def ORYONFP0 : ProcResGroup<[ORYONP12FP0]> {
let BufferSize = 48;
}
@@ -701,7 +701,7 @@ def : InstRW<[ORYONWrite_1Cyc_I0123],
"^CSNEG(W|X)r", "^CSINC(W|X)r")>;
//---
-//Compare Instruciton
+//Compare Instruction
//---
// We have CCMP, CCMN as LLVM DAG node
@@ -1512,7 +1512,7 @@ def : InstRW<[ORYONWrite_10Cyc_FP3_RC], (instregex "^FSQRTv.*32$")>;
def : InstRW<[ORYONWrite_13Cyc_FP3_RC], (instregex "^FSQRTv.*64$")>;
//==========
-// SIMD binary elememt arithmetic instructions
+// SIMD binary element arithmetic instructions
//==========
def : InstRW<[ORYONWrite_4Cyc_FP0123], (instregex "^FMLAv", "^FMLSv")>;
@@ -1568,7 +1568,7 @@ def : InstRW<[ORYONWrite_2Cyc_FP0123], (instregex "^ADDPv", "^FADDPv",
"^(FMAX|FMIN)(NMP|P)v",
"^(S|U)(MIN|MAX)Pv")>;
//==========
-// SIMD dot prodcut instructions
+// SIMD dot product instructions
//==========
def : InstRW<[ORYONWrite_3Cyc_FP0123], (instregex "^(U|S)DOTv")>;
@@ -1581,7 +1581,7 @@ def : InstRW<[ORYONWrite_2Cyc_FP0123], (instrs TBLv8i8One, TBLv16i8One,
TBXv8i8One, TBXv16i8One,
TBLv8i8Two, TBLv16i8Two)>;
-// TBL 3-reg/4-reg, 3uops, throughtput=4/3=1.33 latency=4
+// TBL 3-reg/4-reg, 3uops, throughput=4/3=1.33 latency=4
def : InstRW<[ORYONWrite_4Cyc_FP0123_FP0123_FP0123_RC],
(instrs TBLv8i8Three, TBLv16i8Three,
TBLv8i8Four, TBLv16i8Four)>;
diff --git a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
index 96707f20cd751..a591ba9aceb67 100644
--- a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
@@ -13,7 +13,7 @@
// register. That taint register can then be used to mask off registers with
// sensitive data when executing under miss-speculation, a.k.a. "transient
// execution".
-// This pass is aimed at mitigating against SpectreV1-style vulnarabilities.
+// This pass is aimed at mitigating against SpectreV1-style vulnerabilities.
//
// It also implements speculative load hardening, i.e. using the taint register
// to automatically mask off loaded data.
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index f5ffc72cae537..bd164537ae293 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -406,7 +406,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
}
// Return the known bit length of SVE data registers. A value of 0 means the
- // length is unkown beyond what's implied by the architecture.
+ // length is unknown beyond what's implied by the architecture.
unsigned getSVEVectorSizeInBits() const {
assert(isSVEorStreamingSVEAvailable() &&
"Tried to get SVE vector length without SVE support!");
diff --git a/llvm/lib/Target/AArch64/AArch64SystemOperands.td b/llvm/lib/Target/AArch64/AArch64SystemOperands.td
index 8f6c593d3e681..1f3d619f6dd8c 100644
--- a/llvm/lib/Target/AArch64/AArch64SystemOperands.td
+++ b/llvm/lib/Target/AArch64/AArch64SystemOperands.td
@@ -941,7 +941,7 @@ defm : TLBI<"RVAE3OS", 0b110, 0b1000, 0b0101, 0b001>;
defm : TLBI<"RVALE3OS", 0b110, 0b1000, 0b0101, 0b101>;
} //FeatureTLB_RMI
-// Armv9-A Realm Management Extention TLBI Instructions
+// Armv9-A Realm Management Extension TLBI Instructions
let Requires = ["AArch64::FeatureRME"] in {
defm : TLBI<"RPAOS", 0b110, 0b1000, 0b0100, 0b011>;
defm : TLBI<"RPALOS", 0b110, 0b1000, 0b0100, 0b111>;
@@ -1696,7 +1696,7 @@ def : RWSysReg<"PRLAR_EL2", 0b11, 0b100, 0b0110, 0b1000, 0b001>;
foreach n = 1-15 in {
foreach x = 1-2 in {
-//Direct acces to Protection Region Base Address Register for n th MPU region
+//Direct access to Protection Region Base Address Register for n th MPU region
def : RWSysReg<!strconcat("PRBAR"#n, "_EL"#x),
0b11, 0b000, 0b0110, 0b1000, 0b000>{
let Encoding{5-2} = n;
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 68aec80f07e1d..f8013ac90f1aa 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -1167,7 +1167,7 @@ struct SVEIntrinsicInfo {
}
// NOTE: Whilst not limited to only inactive lanes, the common use case is:
- // inactiveLanesAreZerod =
+ // inactiveLanesAreZeroed =
// resultIsZeroInitialized() && inactiveLanesAreUnused()
bool resultIsZeroInitialized() const { return ResultIsZeroInitialized; }
@@ -3958,7 +3958,7 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
scalarization of the division operation.
2. Constant divisors, either negative in whole or partially, don't result in
significantly different codegen as compared to positive constant divisors.
- So, we don't consider negative divisors seperately.
+ So, we don't consider negative divisors separately.
3. If the codegen is significantly different with SVE, it has been indicated
using comments at appropriate places.
@@ -3980,7 +3980,7 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
other sdiv/srem cases:
-------------------------------------------------------------------------
- commom codegen | + srem | + sdiv | pow-of-2 | Type
+ common codegen | + srem | + sdiv | pow-of-2 | Type
-------------------------------------------------------------------------
smulh + asr + add + add | - | - | N | i64
smull + lsr + add + add | - | - | N | i32
@@ -5921,7 +5921,7 @@ static bool areExtractShuffleVectors(Value *Op1, Value *Op2,
!match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
return false;
- // If we allow splats, set S1Op1/S2Op1 to nullptr for the relavant arg so that
+ // If we allow splats, set S1Op1/S2Op1 to nullptr for the relevant arg so that
// it is not checked as an extract below.
if (AllowSplat && isSplatShuffle(Op1))
S1Op1 = nullptr;
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index ce75c052a7123..2f67ff55f26b7 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -1275,7 +1275,7 @@ class AArch64Operand : public MCParsedAsmOperand {
RK = RegKind::SVEPredicateAsCounter;
break;
default:
- llvm_unreachable("Unsupport register class");
+ llvm_unreachable("Unsupported register class");
}
return (Kind == k_Register && Reg.Kind == RK) &&
@@ -1302,7 +1302,7 @@ class AArch64Operand : public MCParsedAsmOperand {
RK = RegKind::SVEPredicateVector;
break;
default:
- llvm_unreachable("Unsupport register class");
+ llvm_unreachable("Unsupported register class");
}
return (Kind == k_Register && Reg.Kind == RK) &&
@@ -5405,7 +5405,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
// A prefix only applies to the instruction following it. Here we extract
// prefix information for the next instruction before validating the current
- // one so that in the case of failure we don't erronously continue using the
+ // one so that in the case of failure we don't erroneously continue using the
// current prefix.
PrefixInfo Prefix = NextPrefix;
NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
@@ -5417,7 +5417,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
(Inst.getOpcode() != AArch64::BRK) &&
(Inst.getOpcode() != AArch64::HLT)) {
- // Prefixed intructions must have a destructive operand.
+ // Prefixed instructions must have a destructive operand.
if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
AArch64::NotDestructive)
return Error(IDLoc, "instruction is unpredictable when following a"
@@ -6407,7 +6407,7 @@ bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
- assert(!Operands.empty() && "Unexpect empty operand list!");
+ assert(!Operands.empty() && "Unexpected empty operand list!");
AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
assert(Op.isToken() && "Leading operand should always be a mnemonic!");
@@ -7942,7 +7942,7 @@ bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
}
std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
- getTargetStreamer().getAtributesSubsectionByName(SubsectionName);
+ getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
// Consume the first parameter (optionality parameter)
AArch64BuildAttributes::SubsectionOptional IsOptional;
@@ -8038,7 +8038,7 @@ bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
return true;
}
- getTargetStreamer().emitAtributesSubsection(SubsectionName, IsOptional, Type);
+ getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
return false;
}
@@ -8050,7 +8050,7 @@ bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
MCAsmParser &Parser = getParser();
std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
- getTargetStreamer().getActiveAtributesSubsection();
+ getTargetStreamer().getActiveAttributesSubsection();
if (nullptr == ActiveSubsection) {
Error(Parser.getTok().getLoc(),
"no active subsection, build attribute can not be added");
diff --git a/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp b/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
index 3f22292c95b6a..f2528bc121045 100644
--- a/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
+++ b/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
@@ -41,12 +41,12 @@ getMachOSpecifier(uint64_t LLVMDisassembler_VariantKind) {
}
}
-/// tryAddingSymbolicOperand - tryAddingSymbolicOperand trys to add a symbolic
+/// tryAddingSymbolicOperand - tryAddingSymbolicOperand tries to add a symbolic
/// operand in place of the immediate Value in the MCInst. The immediate
/// Value has not had any PC adjustment made by the caller. If the instruction
/// is a branch that adds the PC to the immediate Value then isBranch is
/// Success, else Fail. If GetOpInfo is non-null, then it is called to get any
-/// symbolic information at the Address for this instrution. If that returns
+/// symbolic information at the Address for this instruction. If that returns
/// non-zero then the symbolic information it returns is used to create an
/// MCExpr and that is added as an operand to the MCInst. If GetOpInfo()
/// returns zero and isBranch is Success then a symbol look up for
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index fd414977c56ad..010d0aaa46e7f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -282,7 +282,7 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
/// We need to fixup the reported store size for certain value types because
/// we invert the interpretation of ValVT and LocVT in certain cases. This is
- /// for compatability with the DAG call lowering implementation, which we're
+ /// for compatibility with the DAG call lowering implementation, which we're
/// currently building on top of.
LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const override {
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index c7e959b5a9bfa..51b42325ef842 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -6631,7 +6631,7 @@ bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
Register SizeUse = I.getOperand(4).getReg();
// MOPSMemorySetTaggingPseudo has two defs; the intrinsic call has only one.
- // Therefore an additional virtual register is requried for the updated size
+ // Therefore an additional virtual register is required for the updated size
// operand. This value is not accessible via the semantics of the intrinsic.
Register SizeDef = MRI.createGenericVirtualRegister(LLT::scalar(64));
@@ -7419,7 +7419,7 @@ AArch64InstructionSelector::selectAddrModeXRO(MachineOperand &Root,
unsigned Scale = Log2_32(SizeInBytes);
int64_t ImmOff = ValAndVReg->Value.getSExtValue();
- // Skip immediates that can be selected in the load/store addresing
+ // Skip immediates that can be selected in the load/store addressing
// mode.
if (ImmOff % SizeInBytes == 0 && ImmOff >= 0 &&
ImmOff < (0x1000 << Scale))
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 80e098eb1ea15..31954e7954c03 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -490,7 +490,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
}
}
-bool AArch64RegisterBankInfo::isPHIWithFPContraints(
+bool AArch64RegisterBankInfo::isPHIWithFPConstraints(
const MachineInstr &MI, const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI, const unsigned Depth) const {
if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
@@ -500,7 +500,7 @@ bool AArch64RegisterBankInfo::isPHIWithFPContraints(
[&](const MachineInstr &UseMI) {
if (onlyUsesFP(UseMI, MRI, TRI, Depth + 1))
return true;
- return isPHIWithFPContraints(UseMI, MRI, TRI, Depth + 1);
+ return isPHIWithFPConstraints(UseMI, MRI, TRI, Depth + 1);
});
}
@@ -897,7 +897,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Int->FP conversion operations are also captured in
// onlyDefinesFP().
- if (isPHIWithFPContraints(UseMI, MRI, TRI))
+ if (isPHIWithFPConstraints(UseMI, MRI, TRI))
return true;
return onlyUsesFP(UseMI, MRI, TRI) ||
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
index 941499b08d05d..3abbc1b68b5be 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
@@ -122,7 +122,7 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
/// \returns true if \p MI is a PHI that its def is used by
/// any instruction that onlyUsesFP.
- bool isPHIWithFPContraints(const MachineInstr &MI,
+ bool isPHIWithFPConstraints(const MachineInstr &MI,
const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI,
unsigned Depth = 0) const;
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 03cbd272757e7..f542592d22c5f 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -97,7 +97,7 @@ static inline unsigned getShiftValue(unsigned Imm) {
/// {5-0} = imm
static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
unsigned Imm) {
- assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
+ assert((Imm & 0x3f) == Imm && "Illegal shifted immediate value!");
unsigned STEnc = 0;
switch (ST) {
default: llvm_unreachable("Invalid shift requested");
@@ -169,7 +169,7 @@ inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
/// {2-0} = imm3
static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
unsigned Imm) {
- assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
+ assert((Imm & 0x7) == Imm && "Illegal shifted immediate value!");
return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
}
@@ -594,7 +594,7 @@ static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
#if defined(_MSC_VER) && _MSC_VER == 1937 && !defined(__clang__) && \
defined(_M_ARM64)
// The MSVC compiler 19.37 for ARM64 has an optimization bug that
- // causes an incorrect behavior with the orignal version. Work around
+ // causes an incorrect behavior with the original version. Work around
// by using a slightly different variation.
// https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261
constexpr uint64_t Mask = 0xFFULL;
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index 6ee12ccf5494d..f2144375fd95e 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -231,7 +231,7 @@ class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
OS << "\n";
}
- void emitAtributesSubsection(
+ void emitAttributesSubsection(
StringRef SubsectionName,
AArch64BuildAttributes::SubsectionOptional Optional,
AArch64BuildAttributes::SubsectionType ParameterType) override {
@@ -278,7 +278,7 @@ class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
<< ", " << ParameterStr;
// Keep the data structure consistent with the case of ELF emission
// (important for llvm-mc asm parsing)
- AArch64TargetStreamer::emitAtributesSubsection(SubsectionName, Optional,
+ AArch64TargetStreamer::emitAttributesSubsection(SubsectionName, Optional,
ParameterType);
OS << "\n";
}
@@ -433,10 +433,10 @@ AArch64ELFStreamer &AArch64TargetELFStreamer::getStreamer() {
return static_cast<AArch64ELFStreamer &>(Streamer);
}
-void AArch64TargetELFStreamer::emitAtributesSubsection(
+void AArch64TargetELFStreamer::emitAttributesSubsection(
StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional,
AArch64BuildAttributes::SubsectionType ParameterType) {
- AArch64TargetStreamer::emitAtributesSubsection(VendorName, IsOptional,
+ AArch64TargetStreamer::emitAttributesSubsection(VendorName, IsOptional,
ParameterType);
}
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
index 5552cea78694d..9d9e23e99ab3b 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
@@ -216,7 +216,7 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
(ImmR == 0 || ImmS < ImmR) && STI.hasFeature(AArch64::HasV8_2aOps)) {
- // BFC takes precedence over its entire range, sligtly differently to BFI.
+ // BFC takes precedence over its entire range, slightly differently to BFI.
int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
int LSB = (BitWidth - ImmR) % BitWidth;
int Width = ImmS + 1;
@@ -2051,7 +2051,7 @@ void AArch64InstPrinter::printImm8OptLsl(const MCInst *MI, unsigned OpNum,
unsigned UnscaledVal = MI->getOperand(OpNum).getImm();
unsigned Shift = MI->getOperand(OpNum + 1).getImm();
assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL &&
- "Unexepected shift type!");
+ "Unexpected shift type!");
// #0 lsl #8 is never pretty printed
if ((UnscaledVal == 0) && (AArch64_AM::getShiftValue(Shift) != 0)) {
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index dd3ecb41494e0..b7959e02ec268 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -436,7 +436,7 @@ class AArch64MCInstrAnalysis : public MCInstrAnalysis {
// architecturally defined to zero extend the upper 32 bits on a write.
if (GPR32RC.contains(Reg))
return true;
- // SIMD&FP instructions operating on scalar data only acccess the lower
+ // SIMD&FP instructions operating on scalar data only access the lower
// bits of a register, the upper bits are zero extended on a write. For
// SIMD vector registers smaller than 128-bits, the upper 64-bits of the
// register are zero extended on a write.
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
index c5fb7f56e3ef7..d742b282b617c 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
@@ -29,7 +29,7 @@ static cl::opt<bool> MarkBTIProperty(
cl::init(false));
//
-// AArch64TargetStreamer Implemenation
+// AArch64TargetStreamer Implementation
//
AArch64TargetStreamer::AArch64TargetStreamer(MCStreamer &S)
: MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
@@ -153,14 +153,14 @@ MCTargetStreamer *llvm::createAArch64NullTargetStreamer(MCStreamer &S) {
return new AArch64TargetStreamer(S);
}
-void AArch64TargetStreamer::emitAtributesSubsection(
+void AArch64TargetStreamer::emitAttributesSubsection(
StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional,
AArch64BuildAttributes::SubsectionType ParameterType) {
// If exists, return.
for (MCELFStreamer::AttributeSubSection &SubSection : AttributeSubSections) {
if (VendorName == SubSection.VendorName) {
- activateAtributesSubsection(VendorName);
+ activateAttributesSubsection(VendorName);
return;
}
}
@@ -170,11 +170,11 @@ void AArch64TargetStreamer::emitAtributesSubsection(
AttSubSection.IsOptional = IsOptional;
AttSubSection.ParameterType = ParameterType;
AttributeSubSections.push_back(AttSubSection);
- activateAtributesSubsection(VendorName);
+ activateAttributesSubsection(VendorName);
}
std::unique_ptr<MCELFStreamer::AttributeSubSection>
-AArch64TargetStreamer::getActiveAtributesSubsection() {
+AArch64TargetStreamer::getActiveAttributesSubsection() {
for (MCELFStreamer::AttributeSubSection &SubSection : AttributeSubSections) {
if (SubSection.IsActive) {
return std::make_unique<MCELFStreamer::AttributeSubSection>(SubSection);
@@ -184,7 +184,7 @@ AArch64TargetStreamer::getActiveAtributesSubsection() {
}
std::unique_ptr<MCELFStreamer::AttributeSubSection>
-AArch64TargetStreamer::getAtributesSubsectionByName(StringRef Name) {
+AArch64TargetStreamer::getAttributesSubsectionByName(StringRef Name) {
for (MCELFStreamer::AttributeSubSection &SubSection : AttributeSubSections) {
if (Name == SubSection.VendorName) {
return std::make_unique<MCELFStreamer::AttributeSubSection>(SubSection);
@@ -238,7 +238,7 @@ void AArch64TargetStreamer::emitAttribute(StringRef VendorName, unsigned Tag,
"not exist");
}
-void AArch64TargetStreamer::activateAtributesSubsection(StringRef VendorName) {
+void AArch64TargetStreamer::activateAttributesSubsection(StringRef VendorName) {
for (MCELFStreamer::AttributeSubSection &SubSection : AttributeSubSections) {
if (VendorName == SubSection.VendorName) {
SubSection.IsActive = true;
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
index aa26acd85bdae..d878f1e044b8f 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
@@ -102,16 +102,16 @@ class AArch64TargetStreamer : public MCTargetStreamer {
/// Build attributes implementation
virtual void
- emitAtributesSubsection(StringRef VendorName,
+ emitAttributesSubsection(StringRef VendorName,
AArch64BuildAttributes::SubsectionOptional IsOptional,
AArch64BuildAttributes::SubsectionType ParameterType);
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value,
std::string String);
- void activateAtributesSubsection(StringRef VendorName);
+ void activateAttributesSubsection(StringRef VendorName);
std::unique_ptr<MCELFStreamer::AttributeSubSection>
- getActiveAtributesSubsection();
+ getActiveAttributesSubsection();
std::unique_ptr<MCELFStreamer::AttributeSubSection>
- getAtributesSubsectionByName(StringRef Name);
+ getAttributesSubsectionByName(StringRef Name);
void
insertAttributeInPlace(const MCELFStreamer::AttributeItem &Attr,
MCELFStreamer::AttributeSubSection &AttSubSection);
@@ -129,7 +129,7 @@ class AArch64TargetELFStreamer : public AArch64TargetStreamer {
MCSection *AttributeSection = nullptr;
/// Build attributes implementation
- void emitAtributesSubsection(
+ void emitAttributesSubsection(
StringRef VendorName,
AArch64BuildAttributes::SubsectionOptional IsOptional,
AArch64BuildAttributes::SubsectionType ParameterType) override;
diff --git a/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp b/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp
index fc8bef4ad8420..bd28716118880 100644
--- a/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp
@@ -225,7 +225,7 @@ bool SMEPeepholeOpt::optimizeStartStopPairs(
}
// Using the FORM_TRANSPOSED_REG_TUPLE pseudo can improve register allocation
-// of multi-vector intrinsics. However, the psuedo should only be emitted if
+// of multi-vector intrinsics. However, the pseudo should only be emitted if
// the input registers of the REG_SEQUENCE are copy nodes where the source
// register is in a StridedOrContiguous class. For example:
//
More information about the llvm-commits
mailing list