[llvm-branch-commits] [ARM] Fix more typos (NFC) (PR #183087)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Feb 24 08:04:32 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-arm

Author: Jonathan Thackray (jthackray)

<details>
<summary>Changes</summary>

Fix more typos in the AArch64 codebase using the
https://github.com/crate-ci/typos Rust package.

commit-id:33a1bb8d


---

Patch is 37.40 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/183087.diff


29 Files Affected:

- (modified) llvm/lib/Target/ARM/A15SDOptimizer.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/ARMAsmPrinter.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp (+9-9) 
- (modified) llvm/lib/Target/ARM/ARMCallLowering.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/ARMConstantIslandPass.cpp (+5-5) 
- (modified) llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMFrameLowering.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+9-9) 
- (modified) llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (+5-5) 
- (modified) llvm/lib/Target/ARM/ARMParallelDSP.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMSLSHardening.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/ARMSubtarget.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (+4-4) 
- (modified) llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (+6-6) 
- (modified) llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MLxExpansionPass.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MVETailPredication.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/Thumb1FrameLowering.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/Utils/ARMBaseInfo.cpp (+1-1) 


``````````diff
diff --git a/llvm/lib/Target/ARM/A15SDOptimizer.cpp b/llvm/lib/Target/ARM/A15SDOptimizer.cpp
index cd775dd5aa7a6..9a86d0ea9f148 100644
--- a/llvm/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/llvm/lib/Target/ARM/A15SDOptimizer.cpp
@@ -577,12 +577,12 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
   //                      lane, and the other lane(s) of the DPR/QPR register
   //                      that we are inserting in are undefined, use the
   //                      original DPR/QPR value.
-  //                    * Otherwise, fall back on the same stategy as COPY.
+  //                    * Otherwise, fall back on the same strategy as COPY.
   //
   //   * REG_SEQUENCE:  * If all except one of the input operands are
   //                      IMPLICIT_DEFs, insert the VDUP pattern for just the
   //                      defined input operand
-  //                    * Otherwise, fall back on the same stategy as COPY.
+  //                    * Otherwise, fall back on the same strategy as COPY.
   //
 
   // First, get all the reads of D-registers done by this instruction.
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 1f894bb76c027..2f37fe458e1ec 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -420,7 +420,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
 
       // 'Q' should correspond to the low order register and 'R' to the high
       // order register.  Whether this corresponds to the upper or lower half
-      // depends on the endianess mode.
+      // depends on the endianness mode.
       if (ExtraCode[0] == 'Q')
         FirstHalf = ATM.isLittleEndian();
       else
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index bc5a89bc5d7f4..52990402cf24e 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -567,7 +567,7 @@ bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const {
   if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
     return false;
 
-  // Make indirect control flow changes unpredicable when SLS mitigation is
+  // Make indirect control flow changes unpredictable when SLS mitigation is
   // enabled.
   const ARMSubtarget &ST = MF->getSubtarget<ARMSubtarget>();
   if (ST.hardenSlsRetBr() && isIndirectControlFlowNotComingBack(MI))
@@ -893,7 +893,7 @@ ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
   // VMOVRRD is also a copy instruction but it requires
   // special way of handling. It is more complex copy version
   // and since that we are not considering it. For recognition
-  // of such instruction isExtractSubregLike MI interface fuction
+  // of such instruction isExtractSubregLike MI interface function
   // could be used.
   // VORRq is considered as a move only if two inputs are
   // the same register.
@@ -1854,7 +1854,7 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
 
 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
-/// be scheduled togther. On some targets if two loads are loading from
+/// be scheduled together. On some targets if two loads are loading from
 /// addresses in the same cache line, it's better if they are scheduled
 /// together. This function takes two integers that represent the load offsets
 /// from the common base address. It returns true if it decides it's desirable
@@ -1980,7 +1980,7 @@ isProfitableToIfCvt(MachineBasicBlock &TBB,
 
   // In thumb code we often end up trading one branch for a IT block, and
   // if we are cloning the instruction can increase code size. Prevent
-  // blocks with multiple predecesors from being ifcvted to prevent this
+  // blocks with multiple predecessors from being ifcvted to prevent this
   // cloning.
   if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) {
     if (TBB.pred_size() != 1 || FBB.pred_size() != 1)
@@ -2012,7 +2012,7 @@ isProfitableToIfCvt(MachineBasicBlock &TBB,
       // discount it from PredCost.
       PredCost -= 1 * ScalingUpFactor;
     }
-    // The total cost is the cost of each path scaled by their probabilites
+    // The total cost is the cost of each path scaled by their probabilities
     unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor);
     unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor);
     UnpredCost = TUnpredCost + FUnpredCost;
@@ -2227,7 +2227,7 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
   SeenMIs.erase(DefMI);
 
   // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
-  // DefMI would be invalid when tranferred inside the loop.  Checking for a
+  // DefMI would be invalid when transferred inside the loop.  Checking for a
   // loop is expensive, but at least remove kill flags if they are in different
   // BBs.
   if (DefMI->getParent() != MI.getParent())
@@ -2494,7 +2494,7 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
       return true;
     }
 
-    // Otherwise, pull as much of the immedidate into this ADDri/SUBri
+    // Otherwise, pull as much of the immediate into this ADDri/SUBri
     // as possible.
     unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
     unsigned ThisImmVal = Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
@@ -6307,7 +6307,7 @@ void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB,
   int LROffset = Auth ? Align - 4 : Align;
   CFIBuilder.buildOffset(ARM::LR, -LROffset);
   if (Auth) {
-    // Add a CFI for the location of the return adddress PAC.
+    // Add a CFI for the location of the return address PAC.
     CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -Align);
   }
 }
@@ -6388,7 +6388,7 @@ void ARMBaseInstrInfo::buildOutlinedFrame(
       Et = std::prev(MBB.end());
 
     // We have to save and restore LR, we need to add it to the liveins if it
-    // is not already part of the set.  This is suffient since outlined
+    // is not already part of the set.  This is sufficient since outlined
     // functions only have one block.
     if (!MBB.isLiveIn(ARM::LR))
       MBB.addLiveIn(ARM::LR);
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index 95fe605bb5c4b..499759e311c37 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -141,7 +141,7 @@ struct ARMOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
   unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
                              ArrayRef<CCValAssign> VAs,
                              std::function<void()> *Thunk) override {
-    assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
+    assert(Arg.Regs.size() == 1 && "Can't handle multiple regs yet");
 
     const CCValAssign &VA = VAs[0];
     assert(VA.needsCustom() && "Value doesn't need custom handling");
@@ -317,7 +317,7 @@ struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
   unsigned assignCustomValue(ARMCallLowering::ArgInfo &Arg,
                              ArrayRef<CCValAssign> VAs,
                              std::function<void()> *Thunk) override {
-    assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
+    assert(Arg.Regs.size() == 1 && "Can't handle multiple regs yet");
 
     const CCValAssign &VA = VAs[0];
     assert(VA.needsCustom() && "Value doesn't need custom handling");
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 80494d993f425..7028a6a22cf05 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -349,14 +349,14 @@ static bool AlignBlocks(MachineFunction *MF, const ARMSubtarget *STI) {
     return false;
 
   bool Changed = false;
-  bool PrevCanFallthough = true;
+  bool PrevCanFallthrough = true;
   for (auto &MBB : *MF) {
-    if (!PrevCanFallthough) {
+    if (!PrevCanFallthrough) {
       Changed = true;
       MBB.setAlignment(Alignment);
     }
 
-    PrevCanFallthough = MBB.canFallThrough();
+    PrevCanFallthrough = MBB.canFallThrough();
 
     // For LOB's, the ARMLowOverheadLoops pass may remove the unconditional
     // branch later in the pipeline.
@@ -367,7 +367,7 @@ static bool AlignBlocks(MachineFunction *MF, const ARMSubtarget *STI) {
           continue;
         if (isLoopStart(MI) || MI.getOpcode() == ARM::t2LoopEnd ||
             MI.getOpcode() == ARM::t2LoopEndDec) {
-          PrevCanFallthough = true;
+          PrevCanFallthrough = true;
           break;
         }
         // Any other terminator - nothing to do
@@ -1436,7 +1436,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
     // If the CP is referenced(ie, UserOffset) is in first four instructions
     // after IT, this recalculated BaseInsertOffset could be in the middle of
     // an IT block. If it is, change the BaseInsertOffset to just after the
-    // IT block. This still make the CP Entry is in range becuase of the
+    // IT block. This still make the CP Entry is in range because of the
     // following reasons.
     //   1. The initial BaseseInsertOffset calculated is (UserOffset +
     //   U.getMaxDisp() - UPad).
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 14e4c19a8ac1a..276736333fb5d 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -2532,7 +2532,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
       case ARM::t2MOVCClsr: NewOpc = ARM::t2LSRri; break;
       case ARM::t2MOVCCasr: NewOpc = ARM::t2ASRri; break;
       case ARM::t2MOVCCror: NewOpc = ARM::t2RORri; break;
-      default: llvm_unreachable("unexpeced conditional move");
+      default: llvm_unreachable("unexpected conditional move");
       }
       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
               MI.getOperand(1).getReg())
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index a0b975072bd8e..8e1571fac0de1 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -2473,7 +2473,7 @@ checkNumAlignedDPRCS2Regs(MachineFunction &MF, BitVector &SavedRegs) {
 
 bool ARMFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
   // For CMSE entry functions, we want to save the FPCXT_NS immediately
-  // upon function entry (resp. restore it immmediately before return)
+  // upon function entry (resp. restore it immediately before return)
   if (STI.hasV8_1MMainlineOps() &&
       MF.getInfo<ARMFunctionInfo>()->isCmseNSEntryFunction())
     return false;
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 26b5e5a22386e..5388bd5150896 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -2915,7 +2915,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
     Ops.push_back(PredReg);
   }
 
-  // Create the CDE intruction
+  // Create the CDE instruction
   SDNode *InstrNode = CurDAG->getMachineNode(Opcode, Loc, MVT::Untyped, Ops);
   SDValue ResultPair = SDValue(InstrNode, 0);
 
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a086589705bb5..3ad953c567c40 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3543,7 +3543,7 @@ static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
   // size (implying the constant is no larger than 4 bytes).
   const Function &F = DAG.getMachineFunction().getFunction();
 
-  // We rely on this decision to inline being idemopotent and unrelated to the
+  // We rely on this decision to inline being idempotent and unrelated to the
   // use-site. We know that if we inline a variable at one use site, we'll
   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
   // doesn't know about this optimization, so bail out if it's enabled else
@@ -4985,7 +4985,7 @@ static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
 // pattern. This function tries to match one of these and will return a SSAT
 // node if successful.
 //
-// USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1
+// USAT works similarly to SSAT but bounds on the interval [0, k] where k + 1
 // is a power of 2.
 static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) {
   EVT VT = Op.getValueType();
@@ -7313,7 +7313,7 @@ static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) {
   if (NumElts != M.size())
     return false;
 
-  // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are
+  // Test if the Trunc can be convertible to a VMOVN with this shuffle. We are
   // looking for patterns of:
   // !rev: 0 N/2 1 N/2+1 2 N/2+2 ...
   //  rev: N/2 0 N/2+1 1 N/2+2 2 ...
@@ -14924,7 +14924,7 @@ static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) {
 }
 
 static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) {
-  // Fold away an unneccessary CMPZ/CSINC
+  // Fold away an unnecessary CMPZ/CSINC
   // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) ->
   // if C1==EQ -> CSXYZ A, B, C2, D
   // if C1==NE -> CSXYZ A, B, NOT(C2), D
@@ -15918,7 +15918,7 @@ static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target,
   //   intrinsics, so, likewise, there's nothing to do.
   // - generic load/store instructions: the alignment is specified as an
   //   explicit operand, rather than implicitly as the standard alignment
-  //   of the memory type (like the intrisics).  We need to change the
+  //   of the memory type (like the intrinsics).  We need to change the
   //   memory type to match the explicit alignment.  That way, we don't
   //   generate non-standard-aligned ARMISD::VLDx nodes.
   if (isa<LSBaseSDNode>(N)) {
@@ -16548,7 +16548,7 @@ static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
   if (FromVT.getVectorNumElements() % NumElements != 0)
     return SDValue();
 
-  // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so
+  // Test if the Trunc will be convertible to a VMOVN with a shuffle, and if so
   // use the VMOVN over splitting the store. We are looking for patterns of:
   // !rev: 0 N 1 N+1 2 N+2 ...
   //  rev: N 0 N+1 1 N+2 2 ...
@@ -16808,7 +16808,7 @@ static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
     // These instructions only exist converting from f32 to i32. We can handle
     // smaller integers by generating an extra truncate, but larger ones would
     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
-    // these intructions only support v2i32/v4i32 types.
+    // these instructions only support v2i32/v4i32 types.
     return SDValue();
   }
 
@@ -16949,7 +16949,7 @@ static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG,
     // These instructions only exist converting from i32 to f32. We can handle
     // smaller integers by generating an extra extend, but larger ones would
     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
-    // these intructions only support v2i32/v4i32 types.
+    // these instructions only support v2i32/v4i32 types.
     return SDValue();
   }
 
@@ -18335,7 +18335,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
   if (!VT.isInteger())
       return SDValue();
 
-  // Fold away an unneccessary CMPZ/CMOV
+  // Fold away an unnecessary CMPZ/CMOV
   // CMOV A, B, C1, (CMPZ (CMOV 1, 0, C2, D), 0) ->
   // if C1==EQ -> CMOV A, B, C2, D
   // if C1==NE -> CMOV A, B, NOT(C2), D
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index db37b769efcad..821094dfad20e 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1577,7 +1577,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
   } else {
     MachineOperand &MO = MI->getOperand(0);
     // FIXME: post-indexed stores use am2offset_imm, which still encodes
-    // the vestigal zero-reg offset register. When that's fixed, this clause
+    // the vestigial zero-reg offset register. When that's fixed, this clause
     // can be removed entirely.
     if (isAM2 && NewOpc == ARM::STR_POST_IMM) {
       int Imm = ARM_AM::getAM2Opc(AddSub, abs(Offset), ARM_AM::no_shift);
@@ -2363,7 +2363,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(
       if (NumMove == InstReorderLimit)
         break;
 
-      // Found a mergable instruction; save information about it.
+      // Found a mergeable instruction; save information about it.
       ++NumMove;
       LastOffset = Offset;
       LastBytes = Bytes;
@@ -2984,7 +2984,7 @@ static bool isPreIndex(MachineInstr &MI) {
 // could be easily converted to one where that was valid. For example converting
 // t2LDRi12 to t2LDRi8 for negative offsets. Works in conjunction with
 // AdjustBaseAndOffset below.
-static bool isLegalOrConvertableAddressImm(unsigned Opcode, int Imm,
+static bool isLegalOrConvertibleAddressImm(unsigned Opcode, int Imm,
                                            const TargetInstrInfo *TII,
                                            int &CodesizeEstimate) {
   if (isLegalAddressImm(Opcode, Imm, TII))
@@ -3042,7 +3042,7 @@ static void AdjustBaseAndOffset(MachineInstr *MI, Register NewBaseReg,
       ConvOpcode = ARM::t2STRBi8;
       break;
     default:
-      llvm_unreachable("Unhandled convertable opcode");
+      llvm_unreachable("Unhandled convertible opcode");
     }
     assert(isLegalAddressImm(ConvOpcode, OldOffset - Offset, TII) &&
            "Illegal Address Immediate after convert!");
@@ -3229,7 +3229,7 @@ bool ARMPreAllocLoadStoreOpt::DistributeIncrements(Register Base) {
     if (DT->dominates(BaseAccess, Use)) {
       SuccessorAccesses.insert(Use);
       unsigned BaseOp = getBaseOperandIndex(*Use);
-      if (!isLegalOrConvertableAddressImm(Use->getOpcode(),
+      if (!isLegalOrConvertibleAddressImm(Use->getOpcode(),
                                           Use->getOperand(BaseOp + 1).getImm() -
                                               IncrementOffset,
                                           TII, CodesizeEstimate)) {
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index 05667f4f8619e..5324e97d259fe 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -424,7 +424,7 @@ bool ARMParallelDSP::RecordMemoryOps(BasicBlock *BB) {
 // Search recursively back through the operands to find a tree of values that
 // form a multiply-accumulate chain. The search records the Add and Mul
 // instructions that form the reduction and allows us to find a single value
-// to be used as the initial input to the accumlator.
+// to be used as the initial input to the accumulator.
 bool ARMParallelDSP::Search(Value *V, BasicBlock *BB, Reduction &R) {
   // If we find a non-instruction, try to use it as the initial accumulator
   // value. This may have already been found during the search in which case
diff --git a/llvm/lib/Target/ARM/ARMSLSHardening.cpp b/llvm/lib/Target/ARM/ARMSLSHardening.cpp
index 23acc3cfba68e..03f461f536eb0 100644
--- a/llvm/lib/Target/ARM/ARMSLSHardening.cpp
+++ b/llvm/lib/Target/ARM/ARMSLSHardening.cpp
@@ -322,7 +322,7 @@ MachineBasicBlock &ARMSLSHardening::ConvertIndirectCallToIndirectJump(
 
   // Now copy the implicit operands from IndirectCall to BL and copy other
   // necessary info.
-  // However, both IndirectCall and BL instructions implictly use SP and
+  // However, both IndirectCall and BL instructions implicitly use SP and
   // implicitly define LR. Blindly copying implicit operands would result in SP
   // and LR operands to be present multiple times. While this may not be too
   // much of an issue, let's avoid that for cleanliness, by removing those
diff --git a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 475fad2d02a12..981264aac372d 100644
--- a/llvm/l...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/183087


More information about the llvm-branch-commits mailing list