[clang-tools-extra] [mlir] [NFC][MLIR] Fix some typos (PR #108355)

via cfe-commits cfe-commits at lists.llvm.org
Thu Sep 12 03:11:46 PDT 2024


https://github.com/VitalyAnkh created https://github.com/llvm/llvm-project/pull/108355

None

>From 65c7658359b856bd736229d501def16027fd2a42 Mon Sep 17 00:00:00 2001
From: VitalyR <vitalyankh at gmail.com>
Date: Thu, 12 Sep 2024 18:10:02 +0800
Subject: [PATCH] [NFC][MLIR] Fix some typos

---
 .../RestrictSystemLibcHeadersCheck.cpp        | 14 ++++----
 mlir/docs/Canonicalization.md                 | 12 +++----
 mlir/docs/Dialects/Linalg/_index.md           |  2 +-
 mlir/docs/PDLL.md                             |  2 +-
 mlir/docs/Rationale/RationaleLinalgDialect.md |  2 +-
 mlir/docs/SPIRVToLLVMDialectConversion.md     |  2 +-
 mlir/docs/Tutorials/Toy/Ch-4.md               |  2 +-
 mlir/docs/doxygen.cfg.in                      |  2 +-
 mlir/include/mlir/AsmParser/AsmParser.h       |  4 +--
 .../mlir/Bytecode/BytecodeImplementation.h    |  2 +-
 mlir/include/mlir/Bytecode/BytecodeReader.h   |  2 +-
 .../Conversion/TosaToLinalg/TosaToLinalg.h    |  2 +-
 .../mlir/Conversion/VectorToGPU/VectorToGPU.h |  2 +-
 .../FileLineColLocBreakpointManager.h         |  2 +-
 .../mlir/Dialect/Arith/IR/ArithBase.td        |  2 +-
 .../IR/BufferizableOpInterface.h              |  2 +-
 mlir/include/mlir/Dialect/DLTI/DLTIAttrs.td   |  2 +-
 mlir/include/mlir/Dialect/DLTI/DLTIBase.td    |  6 ++--
 mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td |  2 +-
 .../Dialect/Linalg/Transforms/Transforms.h    |  2 +-
 .../include/mlir/Dialect/Linalg/Utils/Utils.h |  2 +-
 .../mlir/Dialect/MemRef/IR/MemRefOps.td       |  2 +-
 .../mlir/Dialect/SCF/Transforms/Transforms.h  |  2 +-
 mlir/include/mlir/Dialect/SCF/Utils/Utils.h   |  2 +-
 .../SparseTensor/IR/SparseTensorOps.td        |  2 +-
 .../Tensor/TransformOps/TensorTransformOps.td |  2 +-
 mlir/include/mlir/IR/AttrTypeBase.td          |  2 +-
 mlir/include/mlir/IR/OpImplementation.h       |  2 +-
 mlir/include/mlir/IR/OperationSupport.h       |  2 +-
 mlir/include/mlir/IR/Threading.h              |  2 +-
 .../mlir/Interfaces/DataLayoutInterfaces.h    |  4 +--
 .../mlir/Interfaces/DataLayoutInterfaces.td   |  6 ++--
 .../Interfaces/Utils/InferIntRangeCommon.h    |  2 +-
 mlir/include/mlir/Tools/PDLL/AST/Nodes.h      |  4 +--
 mlir/include/mlir/Tools/PDLL/AST/Types.h      |  4 +--
 mlir/include/mlir/Tools/PDLL/ODS/Context.h    |  2 +-
 mlir/include/mlir/Tools/PDLL/ODS/Dialect.h    |  2 +-
 mlir/include/mlir/Tools/PDLL/ODS/Operation.h  | 10 +++---
 mlir/include/mlir/Tools/ParseUtilities.h      |  2 +-
 .../mlir/Tools/lsp-server-support/Protocol.h  |  2 +-
 .../mlir/Transforms/DialectConversion.h       |  6 ++--
 mlir/include/mlir/Transforms/Inliner.h        |  2 +-
 .../mlir/Transforms/OneToNTypeConversion.h    |  4 +--
 mlir/include/mlir/Transforms/Passes.td        |  6 ++--
 mlir/include/mlir/Transforms/RegionUtils.h    |  2 +-
 mlir/lib/Bindings/Python/IRModule.h           |  2 +-
 .../Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp    |  2 +-
 .../PDLToPDLInterp/PDLToPDLInterp.cpp         |  2 +-
 .../Conversion/SCFToOpenMP/SCFToOpenMP.cpp    |  2 +-
 .../Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp    |  2 +-
 mlir/lib/Dialect/Affine/IR/AffineOps.cpp      |  2 +-
 mlir/lib/Dialect/Affine/Utils/Utils.cpp       |  2 +-
 .../ArmSME/Transforms/VectorLegalization.cpp  |  2 +-
 .../Bufferization/IR/BufferizationOps.cpp     |  2 +-
 .../Bufferization/Transforms/BufferUtils.cpp  |  2 +-
 mlir/lib/Dialect/DLTI/DLTI.cpp                | 20 ++++++------
 .../GPU/Transforms/AllReduceLowering.cpp      |  2 +-
 mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp |  2 +-
 .../Linalg/Transforms/DropUnitDims.cpp        |  2 +-
 .../Linalg/Transforms/ElementwiseOpFusion.cpp |  2 +-
 .../EraseUnusedOperandsAndResults.cpp         |  2 +-
 .../Linalg/Transforms/NamedOpConversions.cpp  |  2 +-
 .../Linalg/Transforms/Vectorization.cpp       |  4 +--
 mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp      |  2 +-
 .../MemRef/Transforms/EmulateWideInt.cpp      |  2 +-
 mlir/lib/Dialect/SCF/Utils/Utils.cpp          |  2 +-
 .../Transforms/UnifyAliasedResourcePass.cpp   |  2 +-
 .../IR/TensorInferTypeOpInterfaceImpl.cpp     |  8 ++---
 mlir/lib/Dialect/Tensor/IR/TensorOps.cpp      |  2 +-
 mlir/lib/IR/BuiltinTypes.cpp                  |  2 +-
 mlir/lib/Interfaces/DataLayoutInterfaces.cpp  | 10 +++---
 mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp | 12 +++----
 mlir/lib/Target/LLVMIR/DataLayoutImporter.h   |  2 +-
 mlir/lib/Target/LLVMIR/ModuleTranslation.cpp  |  4 +--
 mlir/lib/Tools/PDLL/ODS/Context.cpp           |  4 +--
 mlir/lib/Tools/PDLL/ODS/Dialect.cpp           |  4 +--
 mlir/lib/Tools/PDLL/ODS/Operation.cpp         |  4 +--
 mlir/lib/Tools/PDLL/Parser/Parser.cpp         | 32 +++++++++----------
 mlir/lib/Transforms/InlinerPass.cpp           |  2 +-
 mlir/lib/Transforms/RemoveDeadValues.cpp      |  8 ++---
 .../Utils/GreedyPatternRewriteDriver.cpp      |  2 +-
 mlir/lib/Transforms/Utils/Inliner.cpp         |  2 +-
 .../ArithToEmitC/arith-to-emitc.mlir          |  8 ++---
 .../Conversion/ArithToLLVM/arith-to-llvm.mlir |  8 ++---
 .../ArithToSPIRV/arith-to-spirv.mlir          |  2 +-
 .../convert-to-standard.mlir                  |  6 ++--
 .../SPIRVToLLVM/comparison-ops-to-llvm.mlir   |  4 +--
 mlir/test/Dialect/Arith/canonicalize.mlir     |  8 ++---
 mlir/test/Dialect/Arith/ops.mlir              |  2 +-
 .../Dialect/ControlFlow/canonicalize.mlir     | 16 +++++-----
 .../Linalg/convert-elementwise-to-linalg.mlir |  8 ++---
 .../Dialect/Linalg/match-ops-interpreter.mlir |  2 +-
 .../transform-op-peel-and-vectorize-conv.mlir |  2 +-
 .../transform-op-peel-and-vectorize.mlir      |  2 +-
 mlir/test/Dialect/Math/expand-math.mlir       | 10 +++---
 .../Math/polynomial-approximation.mlir        | 16 +++++-----
 .../MemRef/expand-strided-metadata.mlir       |  2 +-
 mlir/test/Dialect/PDLInterp/ops.mlir          |  2 +-
 .../SCF/for-loop-canonicalization.mlir        |  4 +--
 mlir/test/Dialect/Vector/vector-sink.mlir     |  4 +--
 mlir/test/IR/core-ops.mlir                    |  4 +--
 .../SparseTensor/CPU/dense_output_bf16.mlir   |  2 +-
 .../SparseTensor/CPU/dense_output_f16.mlir    |  2 +-
 .../CPU/sparse_reduce_custom.mlir             |  4 +--
 .../SparseTensor/CPU/sparse_unary.mlir        |  2 +-
 .../GPU/SYCL/gpu-reluf32-to-spirv.mlir        |  2 +-
 mlir/test/Rewrite/pdl-bytecode.mlir           |  2 +-
 .../test/Target/LLVMIR/Import/instructions.ll |  4 +--
 mlir/test/Target/LLVMIR/llvmir.mlir           |  4 +--
 mlir/test/Transforms/constant-fold.mlir       |  6 ++--
 .../lib/Dialect/DLTI/TestDataLayoutQuery.cpp  |  2 +-
 mlir/test/lib/Dialect/Test/TestOps.td         |  2 +-
 mlir/test/lib/IR/TestSymbolUses.cpp           |  2 +-
 mlir/test/mlir-tblgen/trait.mlir              |  6 ++--
 .../mlir-vulkan-runner/VulkanRuntime.cpp      |  2 +-
 .../FileLineColLocBreakpointManagerTest.cpp   |  2 +-
 .../Dialect/OpenACC/OpenACCOpsTest.cpp        |  2 +-
 mlir/unittests/IR/AttrTypeReplacerTest.cpp    | 14 ++++----
 mlir/unittests/IR/InterfaceAttachmentTest.cpp |  6 ++--
 mlir/unittests/IR/OpPropertiesTest.cpp        |  2 +-
 .../Interfaces/DataLayoutInterfacesTest.cpp   | 18 +++++------
 mlir/utils/emacs/mlir-lsp-client.el           |  4 +--
 mlir/utils/generate-test-checks.py            |  4 +--
 mlir/utils/spirv/gen_spirv_dialect.py         |  8 ++---
 mlir/utils/tree-sitter-mlir/dialect/arith.js  |  2 +-
 .../tree-sitter-mlir/queries/highlights.scm   |  4 +--
 .../utils/tree-sitter-mlir/test/corpus/op.txt |  4 +--
 .../test/highlight/controlflow.mlir           |  2 +-
 .../tree-sitter-mlir/test/highlight/func.mlir |  4 +--
 mlir/utils/vscode/cpp-grammar.json            |  4 +--
 130 files changed, 270 insertions(+), 270 deletions(-)

diff --git a/clang-tools-extra/clang-tidy/llvmlibc/RestrictSystemLibcHeadersCheck.cpp b/clang-tools-extra/clang-tidy/llvmlibc/RestrictSystemLibcHeadersCheck.cpp
index b656917071a6ca..4e09b735b1580c 100644
--- a/clang-tools-extra/clang-tidy/llvmlibc/RestrictSystemLibcHeadersCheck.cpp
+++ b/clang-tools-extra/clang-tidy/llvmlibc/RestrictSystemLibcHeadersCheck.cpp
@@ -25,9 +25,9 @@ class RestrictedIncludesPPCallbacks
 public:
   explicit RestrictedIncludesPPCallbacks(
       RestrictSystemLibcHeadersCheck &Check, const SourceManager &SM,
-      const SmallString<128> CompilerIncudeDir)
+      const SmallString<128> CompilerIncludeDir)
       : portability::RestrictedIncludesPPCallbacks(Check, SM),
-        CompilerIncudeDir(CompilerIncudeDir) {}
+        CompilerIncludeDir(CompilerIncludeDir) {}
 
   void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
                           StringRef FileName, bool IsAngled,
@@ -38,7 +38,7 @@ class RestrictedIncludesPPCallbacks
                           SrcMgr::CharacteristicKind FileType) override;
 
 private:
-  const SmallString<128> CompilerIncudeDir;
+  const SmallString<128> CompilerIncludeDir;
 };
 
 } // namespace
@@ -49,7 +49,7 @@ void RestrictedIncludesPPCallbacks::InclusionDirective(
     StringRef SearchPath, StringRef RelativePath, const Module *SuggestedModule,
     bool ModuleImported, SrcMgr::CharacteristicKind FileType) {
   // Compiler provided headers are allowed (e.g stddef.h).
-  if (SrcMgr::isSystem(FileType) && SearchPath == CompilerIncudeDir)
+  if (SrcMgr::isSystem(FileType) && SearchPath == CompilerIncludeDir)
     return;
   portability::RestrictedIncludesPPCallbacks::InclusionDirective(
       HashLoc, IncludeTok, FileName, IsAngled, FilenameRange, File, SearchPath,
@@ -58,11 +58,11 @@ void RestrictedIncludesPPCallbacks::InclusionDirective(
 
 void RestrictSystemLibcHeadersCheck::registerPPCallbacks(
     const SourceManager &SM, Preprocessor *PP, Preprocessor *ModuleExpanderPP) {
-  SmallString<128> CompilerIncudeDir =
+  SmallString<128> CompilerIncludeDir =
       StringRef(PP->getHeaderSearchInfo().getHeaderSearchOpts().ResourceDir);
-  llvm::sys::path::append(CompilerIncudeDir, "include");
+  llvm::sys::path::append(CompilerIncludeDir, "include");
   PP->addPPCallbacks(std::make_unique<RestrictedIncludesPPCallbacks>(
-      *this, SM, CompilerIncudeDir));
+      *this, SM, CompilerIncludeDir));
 }
 
 } // namespace clang::tidy::llvm_libc
diff --git a/mlir/docs/Canonicalization.md b/mlir/docs/Canonicalization.md
index 03fd174229afe9..6d3eb9e6d2be10 100644
--- a/mlir/docs/Canonicalization.md
+++ b/mlir/docs/Canonicalization.md
@@ -12,7 +12,7 @@ Most compilers have canonicalization passes, and sometimes they have many
 different ones (e.g. instcombine, dag combine, etc in LLVM). Because MLIR is a
 multi-level IR, we can provide a single canonicalization infrastructure and
 reuse it across many different IRs that it represents. This document describes
-the general approach, global canonicalizations performed, and provides sections
+the general approach, global canonicalization performed, and provides sections
 to capture IR-specific rules for reference.
 
 [TOC]
@@ -28,7 +28,7 @@ exhausted. This is for efficiency reasons and to ensure that faulty patterns
 cannot cause infinite looping.
 
 Canonicalization patterns are registered with the operations themselves, which
-allows each dialect to define its own set of operations and canonicalizations
+allows each dialect to define its own set of operations and canonicalization
 together.
 
 Some important things to think about w.r.t. canonicalization patterns:
@@ -107,15 +107,15 @@ These transformations are applied to all levels of IR:
 
 ## Defining Canonicalizations
 
-Two mechanisms are available with which to define canonicalizations;
+Two mechanisms are available with which to define canonicalization;
 general `RewritePattern`s and the `fold` method.
 
 ### Canonicalizing with `RewritePattern`s
 
-This mechanism allows for providing canonicalizations as a set of
+This mechanism allows for providing canonicalization as a set of
 `RewritePattern`s, either imperatively defined in C++ or declaratively as
 [Declarative Rewrite Rules](DeclarativeRewrites.md). The pattern rewrite
-infrastructure allows for expressing many different types of canonicalizations.
+infrastructure allows for expressing many different types of canonicalization.
 These transformations may be as simple as replacing a multiplication with a
 shift, or even replacing a conditional branch with an unconditional one.
 
@@ -156,7 +156,7 @@ defining operation rewrites.
 ### Canonicalizing with the `fold` method
 
 The `fold` mechanism is an intentionally limited, but powerful mechanism that
-allows for applying canonicalizations in many places throughout the compiler.
+allows for applying canonicalization in many places throughout the compiler.
 For example, outside of the canonicalizer pass, `fold` is used within the
 [dialect conversion infrastructure](DialectConversion.md) as a legalization
 mechanism, and can be invoked directly anywhere with an `OpBuilder` via
diff --git a/mlir/docs/Dialects/Linalg/_index.md b/mlir/docs/Dialects/Linalg/_index.md
index fbd1a451dc094e..dc4375dc0d7993 100644
--- a/mlir/docs/Dialects/Linalg/_index.md
+++ b/mlir/docs/Dialects/Linalg/_index.md
@@ -478,7 +478,7 @@ The key implication is that this conversion to deep predication needs to be
 undone once we are done with Linalg transformations. After iterators and
 induction variables are materialized (i.e. after lowering out of
 `linalg.generic` occurred), the overall performance will be greatly influenced
-by the quality of canonicalizations, foldings and *Loop Independent Code Motion*
+by the quality of canonicalization, foldings and *Loop Independent Code Motion*
 (LICM).
 
 In the grander scheme, the reliance on late LICM was deemed a necessary risk.
diff --git a/mlir/docs/PDLL.md b/mlir/docs/PDLL.md
index f5651156a860ca..b4fb0723a4cac7 100644
--- a/mlir/docs/PDLL.md
+++ b/mlir/docs/PDLL.md
@@ -699,7 +699,7 @@ Pattern {
 }
 ```
 
-Below are the set of contexts in which result type inferrence is supported:
+Below are the set of contexts in which result type inference is supported:
 
 ##### Inferred Results of Replacement Operation
 
diff --git a/mlir/docs/Rationale/RationaleLinalgDialect.md b/mlir/docs/Rationale/RationaleLinalgDialect.md
index 7b5137ede3ae74..61137ba7c6b658 100644
--- a/mlir/docs/Rationale/RationaleLinalgDialect.md
+++ b/mlir/docs/Rationale/RationaleLinalgDialect.md
@@ -516,7 +516,7 @@ write, easy to verify and easy to maintain.
 declaratively. In turn this allows using local pattern rewrite rules in MLIR
 (i.e. [DRR](../DeclarativeRewrites.md)).
 - Allow creating customizable passes declaratively by simply selecting rewrite
-rules. This allows mixing transformations, canonicalizations, constant folding
+rules. This allows mixing transformations, canonicalization, constant folding
 and other enabling rewrites in a single pass. The result is a system where pass
 fusion is very simple to obtain and gives hope for solving certain
 [phase ordering issues](https://dl.acm.org/doi/10.1145/201059.201061).
diff --git a/mlir/docs/SPIRVToLLVMDialectConversion.md b/mlir/docs/SPIRVToLLVMDialectConversion.md
index 0aae02cff26be1..4186e5479b5f45 100644
--- a/mlir/docs/SPIRVToLLVMDialectConversion.md
+++ b/mlir/docs/SPIRVToLLVMDialectConversion.md
@@ -335,7 +335,7 @@ SPIR-V Dialect op            | LLVM Dialect op
 `spirv.FOrdEqual`              | `llvm.fcmp "oeq"`
 `spirv.FOrdGreaterThan`        | `llvm.fcmp "ogt"`
 `spirv.FOrdGreaterThanEqual`   | `llvm.fcmp "oge"`
-`spirv.FOrdLessThan`           | `llvm.fcmp "olt"`
+`spirv.FOrdLessThan`           | `llvm.fcmp "old"`
 `spirv.FOrdLessThanEqual`      | `llvm.fcmp "ole"`
 `spirv.FOrdNotEqual`           | `llvm.fcmp "one"`
 `spirv.FUnordEqual`            | `llvm.fcmp "ueq"`
diff --git a/mlir/docs/Tutorials/Toy/Ch-4.md b/mlir/docs/Tutorials/Toy/Ch-4.md
index b753ee7a5332f5..118ac5010d30b7 100644
--- a/mlir/docs/Tutorials/Toy/Ch-4.md
+++ b/mlir/docs/Tutorials/Toy/Ch-4.md
@@ -16,7 +16,7 @@ like Toy to get the information they need.
 
 MLIR provides a set of always available-hooks for certain core transformations,
 as seen in the [previous chapter](Ch-3.md), where we registered some
-canonicalizations via a hook on our operations (`getCanonicalizationPatterns`).
+canonicalization via a hook on our operations (`getCanonicalizationPatterns`).
 However, these types of hooks don't really scale well. Therefore, a more generic
 solution was designed, in the form of [interfaces](../../Interfaces.md), to make
 the MLIR infrastructure as extensible as the representation. Interfaces provide
diff --git a/mlir/docs/doxygen.cfg.in b/mlir/docs/doxygen.cfg.in
index 6c437ea138c219..c2da3c75e44b8a 100644
--- a/mlir/docs/doxygen.cfg.in
+++ b/mlir/docs/doxygen.cfg.in
@@ -931,7 +931,7 @@ REFERENCES_RELATION    = YES
 REFERENCES_LINK_SOURCE = YES
 
 # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
+# source code will show a tooldip with additional information such as prototype,
 # brief description and links to the definition and documentation. Since this
 # will make the HTML file larger and loading of large files a bit slower, you
 # can opt to disable this feature.
diff --git a/mlir/include/mlir/AsmParser/AsmParser.h b/mlir/include/mlir/AsmParser/AsmParser.h
index 3c1bff1fbc7f14..33daf7ca26f49e 100644
--- a/mlir/include/mlir/AsmParser/AsmParser.h
+++ b/mlir/include/mlir/AsmParser/AsmParser.h
@@ -47,7 +47,7 @@ parseAsmSourceFile(const llvm::SourceMgr &sourceMgr, Block *block,
 /// not, an error diagnostic is emitted to the context and a null value is
 /// returned.
 /// If `numRead` is provided, it is set to the number of consumed characters on
-/// succesful parse. Otherwise, parsing fails if the entire string is not
+/// successful parse. Otherwise, parsing fails if the entire string is not
 /// consumed.
 /// Some internal copying can be skipped if the source string is known to be
 /// null terminated.
@@ -58,7 +58,7 @@ Attribute parseAttribute(llvm::StringRef attrStr, MLIRContext *context,
 /// This parses a single MLIR type to an MLIR context if it was valid. If not,
 /// an error diagnostic is emitted to the context.
 /// If `numRead` is provided, it is set to the number of consumed characters on
-/// succesful parse. Otherwise, parsing fails if the entire string is not
+/// successful parse. Otherwise, parsing fails if the entire string is not
 /// consumed.
 /// Some internal copying can be skipped if the source string is known to be
 /// null terminated.
diff --git a/mlir/include/mlir/Bytecode/BytecodeImplementation.h b/mlir/include/mlir/Bytecode/BytecodeImplementation.h
index 0ddc531073e232..7379e5d4c8095e 100644
--- a/mlir/include/mlir/Bytecode/BytecodeImplementation.h
+++ b/mlir/include/mlir/Bytecode/BytecodeImplementation.h
@@ -438,7 +438,7 @@ class BytecodeDialectInterface
 
   /// Read a type belonging to this dialect from the given reader. This method
   /// should return null in the case of failure. Optionally, the dialect version
-  /// can be accessed thorugh the reader.
+  /// can be accessed through the reader.
   virtual Type readType(DialectBytecodeReader &reader) const {
     reader.emitError() << "dialect " << getDialect()->getNamespace()
                        << " does not support reading types from bytecode";
diff --git a/mlir/include/mlir/Bytecode/BytecodeReader.h b/mlir/include/mlir/Bytecode/BytecodeReader.h
index 19f74a0750cc90..e68fcc0c3bd18d 100644
--- a/mlir/include/mlir/Bytecode/BytecodeReader.h
+++ b/mlir/include/mlir/Bytecode/BytecodeReader.h
@@ -57,7 +57,7 @@ class BytecodeReader {
   /// The lazyOps call back is invoked for every ops that can be lazy-loaded.
   /// This let the client decide if the op should be materialized immediately or
   /// delayed.
-  /// !! Using this materialize withing an IR walk() can be confusing: make sure
+  /// !! Using this materialize within an IR walk() can be confusing: make sure
   /// to use a PreOrder traversal !!
   LogicalResult materialize(
       Operation *op, llvm::function_ref<bool(Operation *)> lazyOpsCallback =
diff --git a/mlir/include/mlir/Conversion/TosaToLinalg/TosaToLinalg.h b/mlir/include/mlir/Conversion/TosaToLinalg/TosaToLinalg.h
index c84e4f17c38d88..384ea215ce7451 100644
--- a/mlir/include/mlir/Conversion/TosaToLinalg/TosaToLinalg.h
+++ b/mlir/include/mlir/Conversion/TosaToLinalg/TosaToLinalg.h
@@ -32,7 +32,7 @@ std::unique_ptr<Pass> createTosaToLinalgNamed(
 /// Populates passes to convert from TOSA to Linalg on buffers. At the end of
 /// the pass, the function will only contain linalg ops or standard ops if the
 /// pipeline succeeds.  The option to disable decompositions is available for
-/// benchmarking performance improvements from the canonicalizations.
+/// benchmarking performance improvements from the canonicalization.
 void addTosaToLinalgPasses(
     OpPassManager &pm, const TosaToLinalgOptions &options,
     const TosaToLinalgNamedOptions &tosaToLinalgNamedOptions =
diff --git a/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h b/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h
index 10467e61ad0836..9521a79e65c0ff 100644
--- a/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h
+++ b/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h
@@ -31,7 +31,7 @@ void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns,
 /// The rest of the vector operations are left untouched.
 LogicalResult convertVectorToMMAOps(RewriterBase &rewriter, Operation *rootOp);
 
-/// Convert vector ops ops nested under `rootOp` to vector and GPU operaitons
+/// Convert vector ops ops nested under `rootOp` to vector and GPU operations
 /// compatible with the `nvvm.mma.sync` lowering path. This will convert a slice
 /// of operations that can be legally lowered on this path while the rest of
 /// the vector operations are left untouched.
diff --git a/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h b/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h
index e62b9c0bc0de58..75ba52b78c0a0a 100644
--- a/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h
+++ b/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h
@@ -21,7 +21,7 @@
 namespace mlir {
 namespace tracing {
 
-/// This breakpoing intends to match a FileLineColLocation, that is a tuple of
+/// This breakpoint intends to match a FileLineColLocation, that is a tuple of
 /// file name, line number, and column number. Using -1 for  the column and the
 /// line number will match any column and line number respectively.
 class FileLineColLocBreakpoint
diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td b/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td
index 19a2ade2e95a0e..e771976123772e 100644
--- a/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td
+++ b/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td
@@ -41,7 +41,7 @@ def Arith_CmpFPredicateAttr : I64EnumAttr<
       I64EnumAttrCase<"OEQ", 1, "oeq">,
       I64EnumAttrCase<"OGT", 2, "ogt">,
       I64EnumAttrCase<"OGE", 3, "oge">,
-      I64EnumAttrCase<"OLT", 4, "olt">,
+      I64EnumAttrCase<"OLT", 4, "old">,
       I64EnumAttrCase<"OLE", 5, "ole">,
       I64EnumAttrCase<"ONE", 6, "one">,
       I64EnumAttrCase<"ORD", 7, "ord">,
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index aceb9d059b95f3..32b6da0afb78e4 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -646,7 +646,7 @@ OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
 ///
 /// Note: Canonicalization patterns could clean up layout maps and infer more
 /// precise layout maps after bufferization. However, many possible
-/// canonicalizations are currently not implemented.
+/// canonicalization are currently not implemented.
 BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options,
                              MemRefLayoutAttrInterface layout = {},
                              Attribute memorySpace = nullptr);
diff --git a/mlir/include/mlir/Dialect/DLTI/DLTIAttrs.td b/mlir/include/mlir/Dialect/DLTI/DLTIAttrs.td
index 53d38407608bed..d9466b9c03b504 100644
--- a/mlir/include/mlir/Dialect/DLTI/DLTIAttrs.td
+++ b/mlir/include/mlir/Dialect/DLTI/DLTIAttrs.td
@@ -72,7 +72,7 @@ def DLTI_DataLayoutSpecAttr :
     DataLayoutSpecAttr combineWith(ArrayRef<DataLayoutSpecInterface> specs) const;
 
     /// Returns the endiannes identifier.
-    StringAttr getEndiannessIdentifier(MLIRContext *context) const;
+    StringAttr getEndiannesssIdentifier(MLIRContext *context) const;
 
     /// Returns the alloca memory space identifier.
     StringAttr getAllocaMemorySpaceIdentifier(MLIRContext *context) const;
diff --git a/mlir/include/mlir/Dialect/DLTI/DLTIBase.td b/mlir/include/mlir/Dialect/DLTI/DLTIBase.td
index f84149c43e0fcd..85ffd7620bd7d3 100644
--- a/mlir/include/mlir/Dialect/DLTI/DLTIBase.td
+++ b/mlir/include/mlir/Dialect/DLTI/DLTIBase.td
@@ -41,13 +41,13 @@ def DLTI_Dialect : Dialect {
 
     // Constants used in entries.
     constexpr const static ::llvm::StringLiteral
-    kDataLayoutEndiannessKey = "dlti.endianness";
+    kDataLayoutEndiannesssKey = "dlti.endianness";
 
     constexpr const static ::llvm::StringLiteral
-    kDataLayoutEndiannessBig = "big";
+    kDataLayoutEndiannesssBig = "big";
 
     constexpr const static ::llvm::StringLiteral
-    kDataLayoutEndiannessLittle = "little";
+    kDataLayoutEndiannesssLittle = "little";
 
     constexpr const static ::llvm::StringLiteral
     kDataLayoutAllocaMemorySpaceKey = "dlti.alloca_memory_space";
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index 4a43c16903394f..aac58ad5456f1d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -544,7 +544,7 @@ def FCmpPredicateFALSE : LLVM_EnumAttrCase<"_false", "_false", "FCMP_FALSE", 0>;
 def FCmpPredicateOEQ   : LLVM_EnumAttrCase<"oeq", "oeq", "FCMP_OEQ", 1>;
 def FCmpPredicateOGT   : LLVM_EnumAttrCase<"ogt", "ogt", "FCMP_OGT", 2>;
 def FCmpPredicateOGE   : LLVM_EnumAttrCase<"oge", "oge", "FCMP_OGE", 3>;
-def FCmpPredicateOLT   : LLVM_EnumAttrCase<"olt", "olt", "FCMP_OLT", 4>;
+def FCmpPredicateOLT   : LLVM_EnumAttrCase<"old", "old", "FCMP_OLT", 4>;
 def FCmpPredicateOLE   : LLVM_EnumAttrCase<"ole", "ole", "FCMP_OLE", 5>;
 def FCmpPredicateONE   : LLVM_EnumAttrCase<"one", "one", "FCMP_ONE", 6>;
 def FCmpPredicateORD   : LLVM_EnumAttrCase<"ord", "ord", "FCMP_ORD", 7>;
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 0208f854f799ec..9b9d689c743271 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1753,7 +1753,7 @@ void populateFuseTensorPadWithProducerLinalgOpPatterns(
     RewritePatternSet &patterns);
 
 /// Patterns to convert from one named op to another. These can be seen as
-/// canonicalizations of named ops into another named op.
+/// canonicalization of named ops into another named op.
 void populateLinalgNamedOpConversionPatterns(RewritePatternSet &patterns);
 
 /// Patterns to fold unit-extent dimensions in operands/results of linalg ops on
diff --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
index f1df49ce3eaa36..da4175cd0be74b 100644
--- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
@@ -213,7 +213,7 @@ void offsetIndices(RewriterBase &b, LinalgOp linalgOp,
 
 /// A struct containing the Linalg producer before and after fusion.
 /// When operating on tensors, `fusedProducer` may feed into a `tensor.cast`
-/// op before the consumer Linalg op, until enough canonicalizations have
+/// op before the consumer Linalg op, until enough canonicalization have
 /// applied.
 struct FusionInfo {
   LinalgOp originalProducer;
diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
index 2ff9d612a5efa7..4fa5130dd6043b 100644
--- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
+++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
@@ -937,7 +937,7 @@ def MemRef_ExtractStridedMetadataOp : MemRef_Op<"extract_strided_metadata", [
     This makes lowering more progressive and brings the following benefits:
       - not all users of MLIR want to lower to LLVM and the information to e.g.
         lower to library calls---like libxsmm---or to SPIR-V was not available.
-      - foldings and canonicalizations can happen at a higher level in MLIR:
+      - foldings and canonicalization can happen at a higher level in MLIR:
         before this op existed, lowering to LLVM would create large amounts of
         LLVMIR. Even when LLVM does a good job at folding the low-level IR from
         a performance perspective, it is unnecessarily opaque and inefficient to
diff --git a/mlir/include/mlir/Dialect/SCF/Transforms/Transforms.h b/mlir/include/mlir/Dialect/SCF/Transforms/Transforms.h
index ea2f457c4e8898..4db6fdcd8508bc 100644
--- a/mlir/include/mlir/Dialect/SCF/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/SCF/Transforms/Transforms.h
@@ -56,7 +56,7 @@ void naivelyFuseParallelOps(Region &region,
 /// `partialIteration`). This transformation is called "loop peeling".
 ///
 /// This transformation is beneficial for a wide range of transformations such
-/// as vectorization or loop tiling: It enables additional canonicalizations
+/// as vectorization or loop tiling: It enables additional canonicalization
 /// inside the peeled loop body such as rewriting masked loads into unmaked
 /// loads.
 ///
diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
index 4001ba3fc84c9d..131b1cd6ff0dfb 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
@@ -57,7 +57,7 @@ SmallVector<scf::ForOp> replaceLoopNestWithNewYields(
 /// Assumes the FuncOp result types is the type of the yielded operands of the
 /// single block. This constraint makes it easy to determine the result.
 /// This method also clones the `arith::ConstantIndexOp` at the start of
-/// `outlinedFuncBody` to alloc simple canonicalizations.
+/// `outlinedFuncBody` to alloc simple canonicalization.
 /// Creates a new FuncOp and thus cannot be used in a FuncOp pass.
 /// The client is responsible for providing a unique `funcName` that will not
 /// collide with another FuncOp name.  If `callOp` is provided, it will be set
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 96a61419a541f7..b5289541d505a7 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -1288,7 +1288,7 @@ def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResu
           %col = linalg.index 1 : index
           %result = sparse_tensor.select %a : f64 {
               ^bb0(%arg0: f64):
-                %keep = arith.cmpf "olt", %col, %row : f64
+                %keep = arith.cmpf "old", %col, %row : f64
                 sparse_tensor.yield %keep : i1
             }
           linalg.yield %result : f64
diff --git a/mlir/include/mlir/Dialect/Tensor/TransformOps/TensorTransformOps.td b/mlir/include/mlir/Dialect/Tensor/TransformOps/TensorTransformOps.td
index 81bab1b0c82f7a..05ee86076eca98 100644
--- a/mlir/include/mlir/Dialect/Tensor/TransformOps/TensorTransformOps.td
+++ b/mlir/include/mlir/Dialect/Tensor/TransformOps/TensorTransformOps.td
@@ -92,7 +92,7 @@ def ApplyMergeConsecutiveInsertExtractSlicePatternsOp : Op<Transform_Dialect,
     [DeclareOpInterfaceMethods<PatternDescriptorOpInterface>]> {
   let description = [{
     Indicates that consecutive tensor.extract_slice/tensor.insert_slice ops
-    should be merged into a single op. These patterns are not canonicalizations
+    should be merged into a single op. These patterns are not canonicalization
     because the bufferization is sensitive to IR structure.
   }];
 
diff --git a/mlir/include/mlir/IR/AttrTypeBase.td b/mlir/include/mlir/IR/AttrTypeBase.td
index d176b36068f7a5..46278b526f7229 100644
--- a/mlir/include/mlir/IR/AttrTypeBase.td
+++ b/mlir/include/mlir/IR/AttrTypeBase.td
@@ -164,7 +164,7 @@ class AttrOrTypeDef<string valueType, string name, list<Trait> defTraits,
   // Name of storage class to generate or use.
   string storageClass = name # valueType # "Storage";
 
-  // Namespace (withing dialect c++ namespace) in which the storage class
+  // Namespace (within dialect c++ namespace) in which the storage class
   // resides.
   string storageNamespace = "detail";
 
diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h
index e2472eea8a3714..7d8ec111826940 100644
--- a/mlir/include/mlir/IR/OpImplementation.h
+++ b/mlir/include/mlir/IR/OpImplementation.h
@@ -796,7 +796,7 @@ class AsmParser {
   /// least one element will be parsed.
   ///
   /// contextMessage is an optional message appended to "expected '('" sorts of
-  /// diagnostics when parsing the delimeters.
+  /// diagnostics when parsing the delimiters.
   virtual ParseResult
   parseCommaSeparatedList(Delimiter delimiter,
                           function_ref<ParseResult()> parseElementFn,
diff --git a/mlir/include/mlir/IR/OperationSupport.h b/mlir/include/mlir/IR/OperationSupport.h
index 1b93f3d3d04fe8..7dd610413fd3f2 100644
--- a/mlir/include/mlir/IR/OperationSupport.h
+++ b/mlir/include/mlir/IR/OperationSupport.h
@@ -257,7 +257,7 @@ class OperationName {
   ///     success.  The caller will remove the operation and use those results
   ///     instead.
   ///
-  /// This allows expression of some simple in-place canonicalizations (e.g.
+  /// This allows expression of some simple in-place canonicalization (e.g.
   /// "x+0 -> x", "min(x,y,x,z) -> min(x,y,z)", "x+y-x -> y", etc), as well as
   /// generalized constant folding.
   LogicalResult foldHook(Operation *op, ArrayRef<Attribute> operands,
diff --git a/mlir/include/mlir/IR/Threading.h b/mlir/include/mlir/IR/Threading.h
index 3ceab6b3e883a5..0f88f0863d82d4 100644
--- a/mlir/include/mlir/IR/Threading.h
+++ b/mlir/include/mlir/IR/Threading.h
@@ -6,7 +6,7 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file defines various utilies for multithreaded processing within MLIR.
+// This file defines various utilities for multithreaded processing within MLIR.
 // These utilities automatically handle many of the necessary threading
 // conditions, such as properly ordering diagnostics, observing if threading is
 // disabled, etc. These utilities should be used over other threading utilities
diff --git a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
index 848d2dee4a6309..73c9e8cd196b20 100644
--- a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
+++ b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
@@ -74,7 +74,7 @@ getDefaultIndexBitwidth(Type type, const DataLayout &dataLayout,
 
 /// Default handler for endianness request. Dispatches to the
 /// DataLayoutInterface if specified, otherwise returns the default.
-Attribute getDefaultEndianness(DataLayoutEntryInterface entry);
+Attribute getDefaultEndiannesss(DataLayoutEntryInterface entry);
 
 /// Default handler for alloca memory space request. Dispatches to the
 /// DataLayoutInterface if specified, otherwise returns the default.
@@ -227,7 +227,7 @@ class DataLayout {
   std::optional<uint64_t> getTypeIndexBitwidth(Type t) const;
 
   /// Returns the specified endianness.
-  Attribute getEndianness() const;
+  Attribute getEndiannesss() const;
 
   /// Returns the memory space used for AllocaOps.
   Attribute getAllocaMemorySpace() const;
diff --git a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td
index d6e955be4291a3..12010488769c6d 100644
--- a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td
+++ b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td
@@ -132,7 +132,7 @@ def DataLayoutSpecInterface : AttrInterface<"DataLayoutSpecInterface", [DLTIQuer
     InterfaceMethod<
       /*description=*/"Returns the endianness identifier.",
       /*retTy=*/"::mlir::StringAttr",
-      /*methodName=*/"getEndiannessIdentifier",
+      /*methodName=*/"getEndiannesssIdentifier",
       /*args=*/(ins "::mlir::MLIRContext *":$context)
     >,
     InterfaceMethod<
@@ -462,11 +462,11 @@ def DataLayoutOpInterface : OpInterface<"DataLayoutOpInterface"> {
                       "using the relevant entries. The data layout object "
                       "can be used for recursive queries.",
       /*retTy=*/"::mlir::Attribute",
-      /*methodName=*/"getEndianness",
+      /*methodName=*/"getEndiannesss",
       /*args=*/(ins "::mlir::DataLayoutEntryInterface":$entry),
       /*methodBody=*/"",
       /*defaultImplementation=*/[{
-        return ::mlir::detail::getDefaultEndianness(entry);
+        return ::mlir::detail::getDefaultEndiannesss(entry);
       }]
     >,
     StaticInterfaceMethod<
diff --git a/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h b/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h
index 3988a8826498a9..ece40efcbaf75b 100644
--- a/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h
+++ b/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h
@@ -27,7 +27,7 @@ namespace intrange {
 using InferRangeFn =
     std::function<ConstantIntRanges(ArrayRef<ConstantIntRanges>)>;
 
-/// Function that performs inferrence on an array of `IntegerValueRange`.
+/// Function that performs inference on an array of `IntegerValueRange`.
 using InferIntegerValueRangeFn =
     std::function<IntegerValueRange(ArrayRef<IntegerValueRange>)>;
 
diff --git a/mlir/include/mlir/Tools/PDLL/AST/Nodes.h b/mlir/include/mlir/Tools/PDLL/AST/Nodes.h
index aed2562e4d30dd..45ca499965c908 100644
--- a/mlir/include/mlir/Tools/PDLL/AST/Nodes.h
+++ b/mlir/include/mlir/Tools/PDLL/AST/Nodes.h
@@ -107,7 +107,7 @@ class DeclScope {
 /// nodes.
 class Node {
 public:
-  /// This CRTP class provides several utilies when defining new AST nodes.
+  /// This CRTP class provides several utilities when defining new AST nodes.
   template <typename T, typename BaseT>
   class NodeBase : public BaseT {
   public:
@@ -1312,7 +1312,7 @@ class Module final : public Node::NodeBase<Module, Node>,
 };
 
 //===----------------------------------------------------------------------===//
-// Defered Method Definitions
+// Deferred Method Definitions
 //===----------------------------------------------------------------------===//
 
 inline bool Decl::classof(const Node *node) {
diff --git a/mlir/include/mlir/Tools/PDLL/AST/Types.h b/mlir/include/mlir/Tools/PDLL/AST/Types.h
index 89c8e193ddc32b..da34527dea57bb 100644
--- a/mlir/include/mlir/Tools/PDLL/AST/Types.h
+++ b/mlir/include/mlir/Tools/PDLL/AST/Types.h
@@ -146,7 +146,7 @@ class AttributeType : public Type::TypeBase<detail::AttributeTypeStorage> {
 //===----------------------------------------------------------------------===//
 
 /// This class represents a PDLL type that corresponds to a constraint. This
-/// type has no MLIR C++ API correspondance.
+/// type has no MLIR C++ API correspondence.
 class ConstraintType : public Type::TypeBase<detail::ConstraintTypeStorage> {
 public:
   using Base::Base;
@@ -231,7 +231,7 @@ class ValueRangeType : public RangeType {
 //===----------------------------------------------------------------------===//
 
 /// This class represents a PDLL type that corresponds to a rewrite reference.
-/// This type has no MLIR C++ API correspondance.
+/// This type has no MLIR C++ API correspondence.
 class RewriteType : public Type::TypeBase<detail::RewriteTypeStorage> {
 public:
   using Base::Base;
diff --git a/mlir/include/mlir/Tools/PDLL/ODS/Context.h b/mlir/include/mlir/Tools/PDLL/ODS/Context.h
index 8a57bb791e6390..e0ec324bb6d4ef 100644
--- a/mlir/include/mlir/Tools/PDLL/ODS/Context.h
+++ b/mlir/include/mlir/Tools/PDLL/ODS/Context.h
@@ -63,7 +63,7 @@ class Context {
   /// operation already existed).
   std::pair<Operation *, bool>
   insertOperation(StringRef name, StringRef summary, StringRef desc,
-                  StringRef nativeClassName, bool supportsResultTypeInferrence,
+                  StringRef nativeClassName, bool supportsResultTypeInference,
                   SMLoc loc);
 
   /// Lookup an operation registered with the given name, or null if no
diff --git a/mlir/include/mlir/Tools/PDLL/ODS/Dialect.h b/mlir/include/mlir/Tools/PDLL/ODS/Dialect.h
index c5c60977dd2f14..84b25c2fe023c4 100644
--- a/mlir/include/mlir/Tools/PDLL/ODS/Dialect.h
+++ b/mlir/include/mlir/Tools/PDLL/ODS/Dialect.h
@@ -35,7 +35,7 @@ class Dialect {
   /// operation already existed).
   std::pair<Operation *, bool>
   insertOperation(StringRef name, StringRef summary, StringRef desc,
-                  StringRef nativeClassName, bool supportsResultTypeInferrence,
+                  StringRef nativeClassName, bool supportsResultTypeInference,
                   SMLoc loc);
 
   /// Lookup an operation registered with the given name, or null if no
diff --git a/mlir/include/mlir/Tools/PDLL/ODS/Operation.h b/mlir/include/mlir/Tools/PDLL/ODS/Operation.h
index e49db88ef9c93f..0408c3b9444b89 100644
--- a/mlir/include/mlir/Tools/PDLL/ODS/Operation.h
+++ b/mlir/include/mlir/Tools/PDLL/ODS/Operation.h
@@ -167,12 +167,12 @@ class Operation {
   /// Returns the results of this operation.
   ArrayRef<OperandOrResult> getResults() const { return results; }
 
-  /// Return if the operation is known to support result type inferrence.
-  bool hasResultTypeInferrence() const { return supportsTypeInferrence; }
+  /// Return if the operation is known to support result type inference.
+  bool hasResultTypeInference() const { return supportsTypeInference; }
 
 private:
   Operation(StringRef name, StringRef summary, StringRef desc,
-            StringRef nativeClassName, bool supportsTypeInferrence, SMLoc loc);
+            StringRef nativeClassName, bool supportsTypeInference, SMLoc loc);
 
   /// The name of the operation.
   std::string name;
@@ -184,8 +184,8 @@ class Operation {
   /// The native class name of the operation, used when generating native code.
   std::string nativeClassName;
 
-  /// Flag indicating if the operation is known to support type inferrence.
-  bool supportsTypeInferrence;
+  /// Flag indicating if the operation is known to support type inference.
+  bool supportsTypeInference;
 
   /// The source location of this operation.
   SMRange location;
diff --git a/mlir/include/mlir/Tools/ParseUtilities.h b/mlir/include/mlir/Tools/ParseUtilities.h
index f366f6826c9a1c..18582bc6381cad 100644
--- a/mlir/include/mlir/Tools/ParseUtilities.h
+++ b/mlir/include/mlir/Tools/ParseUtilities.h
@@ -6,7 +6,7 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file containts common utilities for implementing the file-parsing
+// This file contains common utilities for implementing the file-parsing
 // behaviour for MLIR tools.
 //
 //===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Tools/lsp-server-support/Protocol.h b/mlir/include/mlir/Tools/lsp-server-support/Protocol.h
index 5d2eb01a523a75..a69c91c1d63da4 100644
--- a/mlir/include/mlir/Tools/lsp-server-support/Protocol.h
+++ b/mlir/include/mlir/Tools/lsp-server-support/Protocol.h
@@ -957,7 +957,7 @@ struct ParameterInformation {
   /// The label of this parameter. Ignored when labelOffsets is set.
   std::string labelString;
 
-  /// Inclusive start and exclusive end offsets withing the containing signature
+  /// Inclusive start and exclusive end offsets within the containing signature
   /// label.
   std::optional<std::pair<unsigned, unsigned>> labelOffsets;
 
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 65e279e046e886..08283e3b15752a 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -218,14 +218,14 @@ class TypeConverter {
   ///
   /// The conversion functions take a non-null Type or subclass of Type and a
   /// non-null Attribute (or subclass of Attribute), and returns a
-  /// `AttributeConversionResult`. This result can either contan an `Attribute`,
+  /// `AttributeConversionResult`. This result can either contain an `Attribute`,
   /// which may be `nullptr`, representing the conversion's success,
   /// `AttributeConversionResult::na()` (the default empty value), indicating
   /// that the conversion function did not apply and that further conversion
   /// functions should be checked, or `AttributeConversionResult::abort()`
   /// indicating that the conversion process should be aborted.
   ///
-  /// Registered conversion functions are callled in the reverse of the order in
+  /// Registered conversion functions are called in the reverse of the order in
   /// which they were registered.
   template <
       typename FnT,
@@ -673,7 +673,7 @@ class ConversionPatternRewriter final : public PatternRewriter {
   /// If no block argument types are changing, the original block will be
   /// left in place and returned.
   ///
-  /// A signature converison must be provided. (Type converters can construct
+  /// A signature conversion must be provided. (Type converters can construct
   /// a signature conversion with `convertBlockSignature`.)
   ///
   /// Optionally, a type converter can be provided to build materializations.
diff --git a/mlir/include/mlir/Transforms/Inliner.h b/mlir/include/mlir/Transforms/Inliner.h
index ec77319d6ac885..812c5e46030ec5 100644
--- a/mlir/include/mlir/Transforms/Inliner.h
+++ b/mlir/include/mlir/Transforms/Inliner.h
@@ -65,7 +65,7 @@ class InlinerConfig {
 /// This is an implementation of the inliner
 /// that operates bottom up over the Strongly Connected Components(SCCs)
 /// of the CallGraph. This enables a more incremental propagation
-/// of inlining decisions from the leafs to the roots of the callgraph.
+/// of inlining decisions from the leaves to the roots of the callgraph.
 class Inliner {
 public:
   /// This struct represents a resolved call to a given callgraph node. Given
diff --git a/mlir/include/mlir/Transforms/OneToNTypeConversion.h b/mlir/include/mlir/Transforms/OneToNTypeConversion.h
index 4c689ba219e884..39706c25d936b4 100644
--- a/mlir/include/mlir/Transforms/OneToNTypeConversion.h
+++ b/mlir/include/mlir/Transforms/OneToNTypeConversion.h
@@ -15,7 +15,7 @@
 // unrealized casts by user-provided materializations. For this to work, the
 // main function requires a special `TypeConverter`, a special
 // `PatternRewriter`, and special RewritePattern`s, which extend their
-// respective base classes for 1:N type converions.
+// respective base classes for 1:N type conversions.
 //
 // Note that this is much more simple-minded than the "real" dialect conversion,
 // which checks for legality before applying patterns and does probably many
@@ -179,7 +179,7 @@ class OneToNPatternRewriter : public PatternRewriter {
 };
 
 /// Base class for patterns with 1:N type conversions. Derived classes have to
-/// overwrite the `matchAndRewrite` overlaod that provides additional
+/// overwrite the `matchAndRewrite` overload that provides additional
 /// information for 1:N type conversions.
 class OneToNConversionPattern : public RewritePatternWithConverter {
 public:
diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td
index 000d9f697618e6..65f75a0ec90154 100644
--- a/mlir/include/mlir/Transforms/Passes.td
+++ b/mlir/include/mlir/Transforms/Passes.td
@@ -19,7 +19,7 @@ include "mlir/Rewrite/PassUtil.td"
 def Canonicalizer : Pass<"canonicalize"> {
   let summary = "Canonicalize operations";
   let description = [{
-    This pass performs various types of canonicalizations over a set of
+    This pass performs various types of canonicalization over a set of
     operations by iteratively applying the canonicalization patterns of all
     loaded dialects until either a fixpoint is reached or the maximum number of
     iterations/rewrites is exhausted. Canonicalization is best-effort and does
@@ -106,7 +106,7 @@ def RemoveDeadValues : Pass<"remove-dead-values"> {
     (A) Removes function arguments that are not live,
     (B) Removes function return values that are not live across all callers of
     the function,
-    (C) Removes unneccesary operands, results, region arguments, and region
+    (C) Removes unnecessary operands, results, region arguments, and region
     terminator operands of region branch ops, and,
     (D) Removes simple and region branch ops that have all non-live results and
     don't affect memory in any way,
@@ -412,7 +412,7 @@ def SROA : Pass<"sroa"> {
   let summary = "Scalar Replacement of Aggregates";
   let description = [{
     Scalar Replacement of Aggregates. Replaces allocations of aggregates into
-    independant allocations of its elements.
+    independent allocations of its elements.
 
     Allocators must implement `DestructurableAllocationOpInterface` to provide
     the list of memory slots for which destructuring should be attempted.
diff --git a/mlir/include/mlir/Transforms/RegionUtils.h b/mlir/include/mlir/Transforms/RegionUtils.h
index 5c57dd5b7532a4..f09272b5596d71 100644
--- a/mlir/include/mlir/Transforms/RegionUtils.h
+++ b/mlir/include/mlir/Transforms/RegionUtils.h
@@ -60,7 +60,7 @@ void getUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
 /// - `cloneOperationIntoRegion` is a callback that allows caller to specify
 ///   if the operation defining an `OpOperand` needs to be cloned into the
 ///   region. Then the operands of this operation become part of the captured
-///   values set (unless the operations that define the operands themeselves
+///   values set (unless the operations that define the operands themselves
 ///   are to be cloned). The cloned operations are added to the entry block
 ///   of the region.
 /// Return the set of captured values for the operation.
diff --git a/mlir/lib/Bindings/Python/IRModule.h b/mlir/lib/Bindings/Python/IRModule.h
index 172898cfda0c52..3cf65e00b2eeb7 100644
--- a/mlir/lib/Bindings/Python/IRModule.h
+++ b/mlir/lib/Bindings/Python/IRModule.h
@@ -228,7 +228,7 @@ class PyMlirContext {
   void clearOperationsInside(PyOperationBase &op);
   void clearOperationsInside(MlirOperation op);
 
-  /// Clears the operaiton _and_ all operations inside using
+  /// Clears the operation _and_ all operations inside using
   /// `clearOperation(MlirOperation)`.
   void clearOperationAndInside(PyOperationBase &op);
 
diff --git a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
index b7fd454c60902f..055a51fa1d5b88 100644
--- a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
@@ -305,7 +305,7 @@ static Value createMinMaxF(OpBuilder &builder, Location loc, Value lhs,
   if (auto vecType = dyn_cast<VectorType>(lhs.getType()))
     i1Type = VectorType::get(vecType.getShape(), i1Type);
   Value cmp = builder.create<LLVM::FCmpOp>(
-      loc, i1Type, isMin ? LLVM::FCmpPredicate::olt : LLVM::FCmpPredicate::ogt,
+      loc, i1Type, isMin ? LLVM::FCmpPredicate::old : LLVM::FCmpPredicate::ogt,
       lhs, rhs);
   Value sel = builder.create<LLVM::SelectOp>(loc, cmp, lhs, rhs);
   Value isNan = builder.create<LLVM::FCmpOp>(
diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
index b00cd0dee3ae80..d8d038365b1f1a 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
+++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
@@ -887,7 +887,7 @@ void PatternLowering::generateOperationResultTypeRewriter(
   Block *rewriterBlock = op->getBlock();
 
   // Try to handle resolution for each of the result types individually. This is
-  // preferred over type inferrence because it will allow for us to use existing
+  // preferred over type inference because it will allow for us to use existing
   // types directly, as opposed to trying to rebuild the type list.
   OperandRange resultTypeValues = op.getTypeValues();
   auto tryResolveResultTypes = [&] {
diff --git a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
index 6d250237e0a334..67c31c4cc29a17 100644
--- a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
+++ b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
@@ -316,7 +316,7 @@ static omp::DeclareReductionOp declareReduction(PatternRewriter &builder,
           reduction, {arith::CmpFPredicate::OLT, arith::CmpFPredicate::OLE},
           {arith::CmpFPredicate::OGT, arith::CmpFPredicate::OGE}, isMin) ||
       matchSelectReduction<LLVM::FCmpOp, LLVM::SelectOp>(
-          reduction, {LLVM::FCmpPredicate::olt, LLVM::FCmpPredicate::ole},
+          reduction, {LLVM::FCmpPredicate::old, LLVM::FCmpPredicate::ole},
           {LLVM::FCmpPredicate::ogt, LLVM::FCmpPredicate::oge}, isMin)) {
     return createDecl(builder, symbolTable, reduce, reductionIndex,
                       minMaxValueForFloat(type, !isMin));
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index ca786316324198..78ecba187e12d1 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -1573,7 +1573,7 @@ void mlir::populateSPIRVToLLVMConversionPatterns(
       FComparePattern<spirv::FOrdGreaterThanOp, LLVM::FCmpPredicate::ogt>,
       FComparePattern<spirv::FOrdGreaterThanEqualOp, LLVM::FCmpPredicate::oge>,
       FComparePattern<spirv::FOrdLessThanEqualOp, LLVM::FCmpPredicate::ole>,
-      FComparePattern<spirv::FOrdLessThanOp, LLVM::FCmpPredicate::olt>,
+      FComparePattern<spirv::FOrdLessThanOp, LLVM::FCmpPredicate::old>,
       FComparePattern<spirv::FOrdNotEqualOp, LLVM::FCmpPredicate::one>,
       FComparePattern<spirv::FUnordEqualOp, LLVM::FCmpPredicate::ueq>,
       FComparePattern<spirv::FUnordGreaterThanOp, LLVM::FCmpPredicate::ugt>,
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 11b6b7cf5fd5a7..32c5047b49a41d 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -637,7 +637,7 @@ static int64_t getLargestKnownDivisor(AffineExpr e, ArrayRef<Value> operands) {
   // We simply exploit information from loop IVs.
   // We don't need to use mlir::getLargestKnownDivisorOfValue since the other
   // desired simplifications are expected to be part of other
-  // canonicalizations. Also, mlir::getLargestKnownDivisorOfValue is part of the
+  // canonicalization. Also, mlir::getLargestKnownDivisorOfValue is part of the
   // LoopAnalysis library.
   Value operand = operands[dimExpr.getPosition()];
   int64_t operandDivisor = 1;
diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index 898467d573362b..22210550d84f39 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -531,7 +531,7 @@ void mlir::affine::normalizeAffineParallel(AffineParallelOp op) {
                               /*symbolCount=*/lbMap.getNumSymbols(), expr);
 
     // Use an 'affine.apply' op that will be simplified later in subsequent
-    // canonicalizations.
+    // canonicalization.
     OperandRange lbOperands = op.getLowerBoundsOperands();
     OperandRange dimOperands = lbOperands.take_front(nDims);
     OperandRange symbolOperands = lbOperands.drop_front(nDims);
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
index 4968c4fc463d04..d074ebb39be282 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
+++ b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
@@ -507,7 +507,7 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop
 };
 
 //===----------------------------------------------------------------------===//
-// ArmSME-specific fixup canonicalizations/folds
+// ArmSME-specific fixup canonicalization/folds
 //===----------------------------------------------------------------------===//
 
 /// Folds an extract from a 3D `vector.create_mask` (which is a vector of
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 04a8ff30ee946b..2465e8927cfa60 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -1076,7 +1076,7 @@ struct EraseAlwaysFalseDealloc : public OpRewritePattern<DeallocOp> {
 /// memref if the operand is not already guaranteed to be the result of a memref
 /// allocation operation. This canonicalization pattern removes this extraction
 /// operation if the operand is now produced by an allocation operation (e.g.,
-/// due to other canonicalizations simplifying the IR).
+/// due to other canonicalization simplifying the IR).
 ///
 /// Example:
 /// ```mlir
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
index 8fffdbf664c3f4..dee9e384f22539 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
@@ -29,7 +29,7 @@ using namespace mlir::bufferization;
 // BufferPlacementAllocs
 //===----------------------------------------------------------------------===//
 
-/// Get the start operation to place the given alloc value withing the
+/// Get the start operation to place the given alloc value within the
 // specified placement block.
 Operation *BufferPlacementAllocs::getStartOperation(Value allocValue,
                                                     Block *placementBlock,
diff --git a/mlir/lib/Dialect/DLTI/DLTI.cpp b/mlir/lib/Dialect/DLTI/DLTI.cpp
index 85ec9fc93248a1..c6a13249fd52d7 100644
--- a/mlir/lib/Dialect/DLTI/DLTI.cpp
+++ b/mlir/lib/Dialect/DLTI/DLTI.cpp
@@ -254,8 +254,8 @@ DataLayoutSpecAttr::combineWith(ArrayRef<DataLayoutSpecInterface> specs) const {
 }
 
 StringAttr
-DataLayoutSpecAttr::getEndiannessIdentifier(MLIRContext *context) const {
-  return Builder(context).getStringAttr(DLTIDialect::kDataLayoutEndiannessKey);
+DataLayoutSpecAttr::getEndiannesssIdentifier(MLIRContext *context) const {
+  return Builder(context).getStringAttr(DLTIDialect::kDataLayoutEndiannesssKey);
 }
 
 StringAttr
@@ -490,9 +490,9 @@ dlti::query(Operation *op, ArrayRef<DataLayoutEntryKey> keys, bool emitError) {
 }
 
 constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutAttrName;
-constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutEndiannessKey;
-constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutEndiannessBig;
-constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutEndiannessLittle;
+constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutEndiannesssKey;
+constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutEndiannesssBig;
+constexpr const StringLiteral mlir::DLTIDialect::kDataLayoutEndiannesssLittle;
 
 namespace {
 class TargetDataLayoutInterface : public DataLayoutDialectInterface {
@@ -502,16 +502,16 @@ class TargetDataLayoutInterface : public DataLayoutDialectInterface {
   LogicalResult verifyEntry(DataLayoutEntryInterface entry,
                             Location loc) const final {
     StringRef entryName = entry.getKey().get<StringAttr>().strref();
-    if (entryName == DLTIDialect::kDataLayoutEndiannessKey) {
+    if (entryName == DLTIDialect::kDataLayoutEndiannesssKey) {
       auto value = llvm::dyn_cast<StringAttr>(entry.getValue());
       if (value &&
-          (value.getValue() == DLTIDialect::kDataLayoutEndiannessBig ||
-           value.getValue() == DLTIDialect::kDataLayoutEndiannessLittle))
+          (value.getValue() == DLTIDialect::kDataLayoutEndiannesssBig ||
+           value.getValue() == DLTIDialect::kDataLayoutEndiannesssLittle))
         return success();
       return emitError(loc) << "'" << entryName
                             << "' data layout entry is expected to be either '"
-                            << DLTIDialect::kDataLayoutEndiannessBig << "' or '"
-                            << DLTIDialect::kDataLayoutEndiannessLittle << "'";
+                            << DLTIDialect::kDataLayoutEndiannesssBig << "' or '"
+                            << DLTIDialect::kDataLayoutEndiannesssLittle << "'";
     }
     if (entryName == DLTIDialect::kDataLayoutAllocaMemorySpaceKey ||
         entryName == DLTIDialect::kDataLayoutProgramMemorySpaceKey ||
diff --git a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
index a75598afe8c72d..aa162d512f0ed9 100644
--- a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
@@ -81,7 +81,7 @@ struct GpuAllReduceRewriter {
     Value invocationIdx = create<arith::AddIOp>(int32Type, tmp3, tidX);
     Value workgroupSize = create<arith::MulIOp>(int32Type, tmp4, dimZ);
 
-    // Compute lane id (invocation id withing the subgroup).
+    // Compute lane id (invocation id within the subgroup).
     Value subgroupMask =
         create<arith::ConstantIntOp>(kSubgroupSize - 1, int32Type);
     Value laneId = create<arith::AndIOp>(invocationIdx, subgroupMask);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
index 5dc506c14ef96f..2a8259ed8a61f0 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
@@ -174,7 +174,7 @@ static bool areConversionCompatible(const DataLayout &layout, Type targetType,
 
 /// Checks if `dataLayout` describes a little endian layout.
 static bool isBigEndian(const DataLayout &dataLayout) {
-  auto endiannessStr = dyn_cast_or_null<StringAttr>(dataLayout.getEndianness());
+  auto endiannessStr = dyn_cast_or_null<StringAttr>(dataLayout.getEndiannesss());
   return endiannessStr && endiannessStr == "big";
 }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index 90ee0fb3bf0b6b..c9beab9c501aa1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -330,7 +330,7 @@ static Value collapseValue(
 /// Compute the modified metadata for an operands of operation
 /// whose unit dims are being dropped. Return the new indexing map
 /// to use, the shape of the operand in the replacement op
-/// and the `reassocation` to use to go from original operand shape
+/// and the `reassociation` to use to go from original operand shape
 /// to modified operand shape.
 struct UnitExtentReplacementInfo {
   AffineMap indexMap;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index c818675993c2c3..59a8ac737a3f49 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -584,7 +584,7 @@ class ExpansionInfo {
 public:
   // Computes the mapping from original dimensions of the op to the dimensions
   // of the expanded op given the `indexingMap` of the fused operand/result of
-  // the generic op, the `reassocationMaps` of the reshape op and the shape of
+  // the generic op, the `reassociationMaps` of the reshape op and the shape of
   // the expanded op.
   LogicalResult compute(LinalgOp linalgOp, OpOperand *fusableOpOperand,
                         ArrayRef<AffineMap> reassociationMaps,
diff --git a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
index 16ab45ea8bee63..4dfc0c0009888d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
@@ -374,7 +374,7 @@ struct RemoveUnusedCycleInGenericOp : public OpRewritePattern<GenericOp> {
 /// Assuming that all %a and %b have the same index map:
 /// * All uses of %in0 and %in2 are replaced with %out1
 /// * All uses of %in1 are replaced with %in3
-/// This pattern can enable additional canonicalizations: In the above example,
+/// This pattern can enable additional canonicalization: In the above example,
 /// %in0, %in1 and %in3 have no uses anymore and their corresponding operands
 /// can be folded away. This pattern does not modify uses of output block args.
 struct FoldDuplicateInputBbArgs : public OpRewritePattern<GenericOp> {
diff --git a/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp b/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
index 84bde1bc0b8464..15e3aeb31702a1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
@@ -7,7 +7,7 @@
 //===----------------------------------------------------------------------===//
 //
 // This file implements conversions between named ops that can be seens as
-// canonicalizations of named ops.
+// canonicalization of named ops.
 //
 //===----------------------------------------------------------------------===//
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 63dcda78d0f2be..39c00ea63958b8 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1283,8 +1283,8 @@ vectorizeOneOp(RewriterBase &rewriter, VectorizationState &state,
 /// permutation_map of the vector.transfer_read operations. The eager
 /// broadcasting makes it trivial to detrmine where broadcast, transposes and
 /// reductions should occur, without any bookkeeping. The tradeoff is that, in
-/// the absence of good canonicalizations, the amount of work increases.
-/// This is not deemed a problem as we expect canonicalizations and foldings to
+/// the absence of good canonicalization, the amount of work increases.
+/// This is not deemed a problem as we expect canonicalization and foldings to
 /// aggressively clean up the useless work.
 static LogicalResult
 vectorizeAsLinalgGeneric(RewriterBase &rewriter, VectorizationState &state,
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 9c021d3613f1c8..98c83cce00cf6a 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -2107,7 +2107,7 @@ LogicalResult ExpandShapeOp::reifyResultShapes(
 /// result and operand. Layout maps are verified separately.
 ///
 /// If `allowMultipleDynamicDimsPerGroup`, multiple dynamic dimensions are
-/// allowed in a reassocation group.
+/// allowed in a reassociation group.
 static LogicalResult
 verifyCollapsedShape(Operation *op, ArrayRef<int64_t> collapsedShape,
                      ArrayRef<int64_t> expandedShape,
diff --git a/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp b/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp
index 57f0141c95dc56..770c4a0981fdf6 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp
@@ -121,7 +121,7 @@ struct EmulateWideIntPass final
         [&typeConverter](Operation *op) { return typeConverter.isLegal(op); });
 
     RewritePatternSet patterns(ctx);
-    // Add common pattenrs to support contants, functions, etc.
+    // Add common pattenrs to support contains, functions, etc.
     arith::populateArithWideIntEmulationPatterns(typeConverter, patterns);
 
     memref::populateMemRefWideIntEmulationPatterns(typeConverter, patterns);
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index a794a121d6267b..2e388c65a072d6 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -110,7 +110,7 @@ SmallVector<scf::ForOp> mlir::replaceLoopNestWithNewYields(
 /// Assumes the FuncOp result types is the type of the yielded operands of the
 /// single block. This constraint makes it easy to determine the result.
 /// This method also clones the `arith::ConstantIndexOp` at the start of
-/// `outlinedFuncBody` to alloc simple canonicalizations. If `callOp` is
+/// `outlinedFuncBody` to alloc simple canonicalization. If `callOp` is
 /// provided, it will be set to point to the operation that calls the outlined
 /// function.
 // TODO: support more than single-block regions.
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
index 07cf26926a1dfb..7a1e52deeb4d24 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
@@ -581,7 +581,7 @@ void UnifyAliasedResourcePass::runOnOperation() {
 
   if (getTargetEnvFn) {
     // This pass is only needed for targeting WebGPU, Metal, or layering
-    // Vulkan on Metal via MoltenVK, where we need to translate SPIR-V into
+    // Vulkan on Metal via MoldenVK, where we need to translate SPIR-V into
     // WGSL or MSL. The translation has limitations.
     spirv::TargetEnvAttr targetEnv = getTargetEnvFn(moduleOp);
     spirv::ClientAPI clientAPI = targetEnv.getClientAPI();
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
index 7ff435a033985c..df6e3e391368d3 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
@@ -18,7 +18,7 @@ using namespace mlir::tensor;
 
 /// Compute a map that for a given dimension of the expanded type gives the
 /// dimension in the collapsed type it maps to. Essentially its the inverse of
-/// the `reassocation` maps.
+/// the `reassociation` maps.
 static llvm::DenseMap<int64_t, int64_t>
 getExpandedDimToCollapsedDimMap(ArrayRef<AffineMap> reassociation) {
   llvm::DenseMap<int64_t, int64_t> expandedDimToCollapsedDim;
@@ -134,14 +134,14 @@ static SmallVector<OpFoldResult, 4> getExpandedOutputShapeFromInputShape(
 static SmallVector<OpFoldResult, 4>
 getReshapeOutputShapeFromInputShape(OpBuilder &builder, Location loc, Value src,
                                     ArrayRef<int64_t> dstStaticShape,
-                                    ArrayRef<AffineMap> reassocation) {
+                                    ArrayRef<AffineMap> reassociation) {
   return dstStaticShape.size() >
                  static_cast<size_t>(
                      llvm::cast<ShapedType>(src.getType()).getRank())
              ? getExpandedOutputShapeFromInputShape(
-                   builder, loc, src, dstStaticShape, reassocation)
+                   builder, loc, src, dstStaticShape, reassociation)
              : getCollapsedOutputShapeFromInputShape(
-                   builder, loc, src, dstStaticShape, reassocation);
+                   builder, loc, src, dstStaticShape, reassociation);
 }
 
 template <typename OpTy>
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 47f540e092e990..dddc5f83dcb4e6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -3895,7 +3895,7 @@ namespace {
 /// Subset of PackOp/UnPackOp fields used to compute the result of applying
 /// various permutations to the op.
 // TODO: Add linalg.transpose + pack/unpack folding patterns that just reuse
-// these. These may or may not become true foldings / canonicalizations
+// these. These may or may not become true foldings / canonicalization
 // depending on how aggressive we want to be in automatically folding
 // transposes.
 struct PackOrUnPackTransposeResult {
diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index e46b6a4a6bb693..5bc48711737422 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -926,7 +926,7 @@ MemRefType mlir::canonicalizeStridedLayout(MemRefType t) {
 AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
                                                 ArrayRef<AffineExpr> exprs,
                                                 MLIRContext *context) {
-  // Size 0 corner case is useful for canonicalizations.
+  // Size 0 corner case is useful for canonicalization.
   if (sizes.empty())
     return getAffineConstantExpr(0, context);
 
diff --git a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
index 2634245a4b7b1e..4399da7d8d8041 100644
--- a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
+++ b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
@@ -239,7 +239,7 @@ std::optional<uint64_t> mlir::detail::getDefaultIndexBitwidth(
 
 // Returns the endianness if specified in the given entry. If the entry is empty
 // the default endianness represented by an empty attribute is returned.
-Attribute mlir::detail::getDefaultEndianness(DataLayoutEntryInterface entry) {
+Attribute mlir::detail::getDefaultEndiannesss(DataLayoutEntryInterface entry) {
   if (entry == DataLayoutEntryInterface())
     return Attribute();
 
@@ -580,19 +580,19 @@ std::optional<uint64_t> mlir::DataLayout::getTypeIndexBitwidth(Type t) const {
   });
 }
 
-mlir::Attribute mlir::DataLayout::getEndianness() const {
+mlir::Attribute mlir::DataLayout::getEndiannesss() const {
   checkValid();
   if (endianness)
     return *endianness;
   DataLayoutEntryInterface entry;
   if (originalLayout)
     entry = originalLayout.getSpecForIdentifier(
-        originalLayout.getEndiannessIdentifier(originalLayout.getContext()));
+        originalLayout.getEndiannesssIdentifier(originalLayout.getContext()));
 
   if (auto iface = dyn_cast_or_null<DataLayoutOpInterface>(scope))
-    endianness = iface.getEndianness(entry);
+    endianness = iface.getEndiannesss(entry);
   else
-    endianness = detail::getDefaultEndianness(entry);
+    endianness = detail::getDefaultEndiannesss(entry);
   return *endianness;
 }
 
diff --git a/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp b/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp
index 35001757f214e1..b4a9f45130cb55 100644
--- a/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp
+++ b/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp
@@ -149,9 +149,9 @@ DataLayoutImporter::tryToEmplacePointerAlignmentEntry(LLVMPointerType type,
 }
 
 LogicalResult
-DataLayoutImporter::tryToEmplaceEndiannessEntry(StringRef endianness,
+DataLayoutImporter::tryToEmplaceEndiannesssEntry(StringRef endianness,
                                                 StringRef token) {
-  auto key = StringAttr::get(context, DLTIDialect::kDataLayoutEndiannessKey);
+  auto key = StringAttr::get(context, DLTIDialect::kDataLayoutEndiannesssKey);
   if (keyEntries.count(key))
     return success();
 
@@ -236,14 +236,14 @@ void DataLayoutImporter::translateDataLayout(
 
     // Parse the endianness.
     if (*prefix == "e") {
-      if (failed(tryToEmplaceEndiannessEntry(
-              DLTIDialect::kDataLayoutEndiannessLittle, token)))
+      if (failed(tryToEmplaceEndiannesssEntry(
+              DLTIDialect::kDataLayoutEndiannesssLittle, token)))
         return;
       continue;
     }
     if (*prefix == "E") {
-      if (failed(tryToEmplaceEndiannessEntry(
-              DLTIDialect::kDataLayoutEndiannessBig, token)))
+      if (failed(tryToEmplaceEndiannesssEntry(
+              DLTIDialect::kDataLayoutEndiannesssBig, token)))
         return;
       continue;
     }
diff --git a/mlir/lib/Target/LLVMIR/DataLayoutImporter.h b/mlir/lib/Target/LLVMIR/DataLayoutImporter.h
index 59b60acd24be20..c7dd233162f6e5 100644
--- a/mlir/lib/Target/LLVMIR/DataLayoutImporter.h
+++ b/mlir/lib/Target/LLVMIR/DataLayoutImporter.h
@@ -93,7 +93,7 @@ class DataLayoutImporter {
                                                   StringRef token);
 
   /// Adds an endianness entry if there is none yet.
-  LogicalResult tryToEmplaceEndiannessEntry(StringRef endianness,
+  LogicalResult tryToEmplaceEndiannesssEntry(StringRef endianness,
                                             StringRef token);
 
   /// Adds an alloca address space entry if there is none yet.
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index fcb329eb7a92c1..c18b5872f810bb 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -190,10 +190,10 @@ translateDataLayout(DataLayoutSpecInterface attribute,
     auto key = llvm::dyn_cast_if_present<StringAttr>(entry.getKey());
     if (!key)
       continue;
-    if (key.getValue() == DLTIDialect::kDataLayoutEndiannessKey) {
+    if (key.getValue() == DLTIDialect::kDataLayoutEndiannesssKey) {
       auto value = cast<StringAttr>(entry.getValue());
       bool isLittleEndian =
-          value.getValue() == DLTIDialect::kDataLayoutEndiannessLittle;
+          value.getValue() == DLTIDialect::kDataLayoutEndiannesssLittle;
       layoutStream << "-" << (isLittleEndian ? "e" : "E");
       layoutStream.flush();
       continue;
diff --git a/mlir/lib/Tools/PDLL/ODS/Context.cpp b/mlir/lib/Tools/PDLL/ODS/Context.cpp
index 61a9df92c80479..3ec2df076908f0 100644
--- a/mlir/lib/Tools/PDLL/ODS/Context.cpp
+++ b/mlir/lib/Tools/PDLL/ODS/Context.cpp
@@ -63,11 +63,11 @@ const Dialect *Context::lookupDialect(StringRef name) const {
 std::pair<Operation *, bool>
 Context::insertOperation(StringRef name, StringRef summary, StringRef desc,
                          StringRef nativeClassName,
-                         bool supportsResultTypeInferrence, SMLoc loc) {
+                         bool supportsResultTypeInference, SMLoc loc) {
   std::pair<StringRef, StringRef> dialectAndName = name.split('.');
   return insertDialect(dialectAndName.first)
       .insertOperation(name, summary, desc, nativeClassName,
-                       supportsResultTypeInferrence, loc);
+                       supportsResultTypeInference, loc);
 }
 
 const Operation *Context::lookupOperation(StringRef name) const {
diff --git a/mlir/lib/Tools/PDLL/ODS/Dialect.cpp b/mlir/lib/Tools/PDLL/ODS/Dialect.cpp
index b4654a6ad5b2eb..6046a4950027d8 100644
--- a/mlir/lib/Tools/PDLL/ODS/Dialect.cpp
+++ b/mlir/lib/Tools/PDLL/ODS/Dialect.cpp
@@ -24,13 +24,13 @@ Dialect::~Dialect() = default;
 std::pair<Operation *, bool>
 Dialect::insertOperation(StringRef name, StringRef summary, StringRef desc,
                          StringRef nativeClassName,
-                         bool supportsResultTypeInferrence, llvm::SMLoc loc) {
+                         bool supportsResultTypeInference, llvm::SMLoc loc) {
   std::unique_ptr<Operation> &operation = operations[name];
   if (operation)
     return std::make_pair(&*operation, /*wasInserted*/ false);
 
   operation.reset(new Operation(name, summary, desc, nativeClassName,
-                                supportsResultTypeInferrence, loc));
+                                supportsResultTypeInference, loc));
   return std::make_pair(&*operation, /*wasInserted*/ true);
 }
 
diff --git a/mlir/lib/Tools/PDLL/ODS/Operation.cpp b/mlir/lib/Tools/PDLL/ODS/Operation.cpp
index 7e708be1ae4d11..adcfbec2e3d174 100644
--- a/mlir/lib/Tools/PDLL/ODS/Operation.cpp
+++ b/mlir/lib/Tools/PDLL/ODS/Operation.cpp
@@ -18,9 +18,9 @@ using namespace mlir::pdll::ods;
 //===----------------------------------------------------------------------===//
 
 Operation::Operation(StringRef name, StringRef summary, StringRef desc,
-                     StringRef nativeClassName, bool supportsTypeInferrence,
+                     StringRef nativeClassName, bool supportsTypeInference,
                      llvm::SMLoc loc)
     : name(name.str()), summary(summary.str()), description(desc.str()),
       nativeClassName(nativeClassName.str()),
-      supportsTypeInferrence(supportsTypeInferrence),
+      supportsTypeInference(supportsTypeInference),
       location(loc, llvm::SMLoc::getFromPointer(loc.getPointer() + 1)) {}
diff --git a/mlir/lib/Tools/PDLL/Parser/Parser.cpp b/mlir/lib/Tools/PDLL/Parser/Parser.cpp
index 2f842df48826d5..8b54dbe4c71485 100644
--- a/mlir/lib/Tools/PDLL/Parser/Parser.cpp
+++ b/mlir/lib/Tools/PDLL/Parser/Parser.cpp
@@ -433,7 +433,7 @@ class Parser {
                                          std::optional<StringRef> name,
                                          const ods::Operation *odsOp,
                                          SmallVectorImpl<ast::Expr *> &results);
-  void checkOperationResultTypeInferrence(SMRange loc, StringRef name,
+  void checkOperationResultTypeInference(SMRange loc, StringRef name,
                                           const ods::Operation *odsOp);
   LogicalResult validateOperationOperandsOrResults(
       StringRef groupName, SMRange loc, std::optional<SMRange> odsOpLoc,
@@ -890,14 +890,14 @@ void Parser::processTdIncludeRecords(const llvm::RecordKeeper &tdRecords,
   for (const llvm::Record *def : tdRecords.getAllDerivedDefinitions("Op")) {
     tblgen::Operator op(def);
 
-    // Check to see if this operation is known to support type inferrence.
-    bool supportsResultTypeInferrence =
+    // Check to see if this operation is known to support type inference.
+    bool supportsResultTypeInference =
         op.getTrait("::mlir::InferTypeOpInterface::Trait");
 
     auto [odsOp, inserted] = odsContext.insertOperation(
         op.getOperationName(), processDoc(op.getSummary()),
         processAndFormatDoc(op.getDescription()), op.getQualCppClassName(),
-        supportsResultTypeInferrence, op.getLoc().front());
+        supportsResultTypeInference, op.getLoc().front());
 
     // Ignore operations that have already been added.
     if (!inserted)
@@ -2112,7 +2112,7 @@ Parser::parseOperationExpr(OpResultTypeContext inputResultTypeContext) {
       return failure();
 
     // If result types are provided, initially assume that the operation does
-    // not rely on type inferrence. We don't assert that it isn't, because we
+    // not rely on type inference. We don't assert that it isn't, because we
     // may be inferring the value of some type/type range variables, but given
     // that these variables may be defined in calls we can't always discern when
     // this is the case.
@@ -2415,7 +2415,7 @@ FailureOr<ast::ReplaceStmt *> Parser::parseReplaceStmt() {
       return failure();
   } else {
     // Handle replacement with an operation uniquely, as the replacement
-    // operation supports type inferrence from the root operation.
+    // operation supports type inference from the root operation.
     FailureOr<ast::Expr *> replExpr;
     if (curToken.is(Token::kw_op))
       replExpr = parseOperationExpr(OpResultTypeContext::Replacement);
@@ -2853,19 +2853,19 @@ FailureOr<ast::OperationExpr *> Parser::createOperationExpr(
 
   assert(
       (resultTypeContext == OpResultTypeContext::Explicit || results.empty()) &&
-      "unexpected inferrence when results were explicitly specified");
+      "unexpected inference when results were explicitly specified");
 
-  // If we aren't relying on type inferrence, or explicit results were provided,
+  // If we aren't relying on type inference, or explicit results were provided,
   // validate them.
   if (resultTypeContext == OpResultTypeContext::Explicit) {
     if (failed(validateOperationResults(loc, opNameRef, odsOp, results)))
       return failure();
 
-    // Validate the use of interface based type inferrence for this operation.
+    // Validate the use of interface based type inference for this operation.
   } else if (resultTypeContext == OpResultTypeContext::Interface) {
     assert(opNameRef &&
            "expected valid operation name when inferring operation results");
-    checkOperationResultTypeInferrence(loc, *opNameRef, odsOp);
+    checkOperationResultTypeInference(loc, *opNameRef, odsOp);
   }
 
   return ast::OperationExpr::create(ctx, loc, odsOp, name, operands, results,
@@ -2891,14 +2891,14 @@ Parser::validateOperationResults(SMRange loc, std::optional<StringRef> name,
       results, odsOp ? odsOp->getResults() : std::nullopt, typeTy, typeRangeTy);
 }
 
-void Parser::checkOperationResultTypeInferrence(SMRange loc, StringRef opName,
+void Parser::checkOperationResultTypeInference(SMRange loc, StringRef opName,
                                                 const ods::Operation *odsOp) {
-  // If the operation might not have inferrence support, emit a warning to the
+  // If the operation might not have inference support, emit a warning to the
   // user. We don't emit an error because the interface might be added to the
   // operation at runtime. It's rare, but it could still happen. We emit a
   // warning here instead.
 
-  // Handle inferrence warnings for unknown operations.
+  // Handle inference warnings for unknown operations.
   if (!odsOp) {
     ctx.getDiagEngine().emitWarning(
         loc, llvm::formatv(
@@ -2910,15 +2910,15 @@ void Parser::checkOperationResultTypeInferrence(SMRange loc, StringRef opName,
     return;
   }
 
-  // Handle inferrence warnings for known operations that expected at least one
+  // Handle inference warnings for known operations that expected at least one
   // result, but don't have inference support. An elided results list can mean
   // "zero-results", and we don't want to warn when that is the expected
   // behavior.
-  bool requiresInferrence =
+  bool requiresInference =
       llvm::any_of(odsOp->getResults(), [](const ods::OperandOrResult &result) {
         return !result.isVariableLength();
       });
-  if (requiresInferrence && !odsOp->hasResultTypeInferrence()) {
+  if (requiresInference && !odsOp->hasResultTypeInference()) {
     ast::InFlightDiagnostic diag = ctx.getDiagEngine().emitWarning(
         loc,
         llvm::formatv("operation result types are marked to be inferred, but "
diff --git a/mlir/lib/Transforms/InlinerPass.cpp b/mlir/lib/Transforms/InlinerPass.cpp
index 04b2f9e4191877..c1b050dbd44b2e 100644
--- a/mlir/lib/Transforms/InlinerPass.cpp
+++ b/mlir/lib/Transforms/InlinerPass.cpp
@@ -8,7 +8,7 @@
 //
 // This file implements a basic inlining algorithm that operates bottom up over
 // the Strongly Connect Components(SCCs) of the CallGraph. This enables a more
-// incremental propagation of inlining decisions from the leafs to the roots of
+// incremental propagation of inlining decisions from the leaves to the roots of
 // the callgraph.
 //
 //===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Transforms/RemoveDeadValues.cpp b/mlir/lib/Transforms/RemoveDeadValues.cpp
index 055256903a1522..cee528e9bfb628 100644
--- a/mlir/lib/Transforms/RemoveDeadValues.cpp
+++ b/mlir/lib/Transforms/RemoveDeadValues.cpp
@@ -16,7 +16,7 @@
 // (A) Removes function arguments that are not live,
 // (B) Removes function return values that are not live across all callers of
 // the function,
-// (C) Removes unneccesary operands, results, region arguments, and region
+// (C) Removes unnecessary operands, results, region arguments, and region
 // terminator operands of region branch ops, and,
 // (D) Removes simple and region branch ops that have all non-live results and
 // don't affect memory in any way,
@@ -282,7 +282,7 @@ static void cleanFuncOp(FunctionOpInterface funcOp, Operation *module,
 ///   (2') Erasing it
 /// if it has no memory effects and none of its results are live, AND
 ///   (1) Erasing its unnecessary operands (operands that are forwarded to
-///   unneccesary results and arguments),
+///   unnecessary results and arguments),
 ///   (2) Cleaning each of its regions,
 ///   (3) Dropping the uses of its unnecessary results (results that are
 ///   forwarded from unnecessary operands and terminator operands), AND
@@ -290,10 +290,10 @@ static void cleanFuncOp(FunctionOpInterface funcOp, Operation *module,
 /// otherwise.
 /// Note that here, cleaning a region means:
 ///   (2.a) Dropping the uses of its unnecessary arguments (arguments that are
-///   forwarded from unneccesary operands and terminator operands),
+///   forwarded from unnecessary operands and terminator operands),
 ///   (2.b) Erasing these arguments, AND
 ///   (2.c) Erasing its unnecessary terminator operands (terminator operands
-///   that are forwarded to unneccesary results and arguments).
+///   that are forwarded to unnecessary results and arguments).
 /// It is important to note that values in this op flow from operands and
 /// terminator operands (successor operands) to arguments and results (successor
 /// inputs).
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index e0d0acd122e26b..05e8e46e634e1e 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -898,7 +898,7 @@ mlir::applyPatternsAndFoldGreedily(Region &region,
                                    const FrozenRewritePatternSet &patterns,
                                    GreedyRewriteConfig config, bool *changed) {
   // The top-level operation must be known to be isolated from above to
-  // prevent performing canonicalizations on operations defined at or above
+  // prevent performing canonicalization on operations defined at or above
   // the region containing 'op'.
   assert(region.getParentOp()->hasTrait<OpTrait::IsIsolatedFromAbove>() &&
          "patterns can only be applied to operations IsolatedFromAbove");
diff --git a/mlir/lib/Transforms/Utils/Inliner.cpp b/mlir/lib/Transforms/Utils/Inliner.cpp
index 8acfc96d2b611b..e7636a591f5630 100644
--- a/mlir/lib/Transforms/Utils/Inliner.cpp
+++ b/mlir/lib/Transforms/Utils/Inliner.cpp
@@ -9,7 +9,7 @@
 // This file implements Inliner that uses a basic inlining
 // algorithm that operates bottom up over the Strongly Connect Components(SCCs)
 // of the CallGraph. This enables a more incremental propagation of inlining
-// decisions from the leafs to the roots of the callgraph.
+// decisions from the leaves to the roots of the callgraph.
 //
 //===----------------------------------------------------------------------===//
 
diff --git a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
index afd1198ede0f76..4cbd978f3ef3c0 100644
--- a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
+++ b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
@@ -318,17 +318,17 @@ func.func @arith_cmpf_oge(%arg0: f32, %arg1: f32) -> i1 {
 
 // -----
 
-func.func @arith_cmpf_olt(%arg0: f32, %arg1: f32) -> i1 {
-  // CHECK-LABEL: arith_cmpf_olt
+func.func @arith_cmpf_old(%arg0: f32, %arg1: f32) -> i1 {
+  // CHECK-LABEL: arith_cmpf_old
   // CHECK-SAME: ([[Arg0:[^ ]*]]: f32, [[Arg1:[^ ]*]]: f32)
   // CHECK-DAG: [[LT:[^ ]*]] = emitc.cmp lt, [[Arg0]], [[Arg1]] : (f32, f32) -> i1
   // CHECK-DAG: [[NotNaNArg0:[^ ]*]] = emitc.cmp eq, [[Arg0]], [[Arg0]] : (f32, f32) -> i1
   // CHECK-DAG: [[NotNaNArg1:[^ ]*]] = emitc.cmp eq, [[Arg1]], [[Arg1]] : (f32, f32) -> i1
   // CHECK-DAG: [[Ordered:[^ ]*]] = emitc.logical_and [[NotNaNArg0]], [[NotNaNArg1]] : i1, i1
   // CHECK-DAG: [[OLT:[^ ]*]] = emitc.logical_and [[Ordered]], [[LT]] : i1, i1
-  %olt = arith.cmpf olt, %arg0, %arg1 : f32
+  %old = arith.cmpf old, %arg0, %arg1 : f32
   // CHECK: return [[OLT]]
-  return %olt: i1
+  return %old: i1
 }
 
 // -----
diff --git a/mlir/test/Conversion/ArithToLLVM/arith-to-llvm.mlir b/mlir/test/Conversion/ArithToLLVM/arith-to-llvm.mlir
index d3bdbe89a54876..f8db3870249731 100644
--- a/mlir/test/Conversion/ArithToLLVM/arith-to-llvm.mlir
+++ b/mlir/test/Conversion/ArithToLLVM/arith-to-llvm.mlir
@@ -334,7 +334,7 @@ func.func @fcmp(f32, f32) -> () {
   // CHECK:      llvm.fcmp "oeq" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.fcmp "ogt" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.fcmp "oge" %arg0, %arg1 : f32
-  // CHECK-NEXT: llvm.fcmp "olt" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "old" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.fcmp "ole" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.fcmp "one" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.fcmp "ord" %arg0, %arg1 : f32
@@ -350,7 +350,7 @@ func.func @fcmp(f32, f32) -> () {
   %1 = arith.cmpf oeq, %arg0, %arg1 : f32
   %2 = arith.cmpf ogt, %arg0, %arg1 : f32
   %3 = arith.cmpf oge, %arg0, %arg1 : f32
-  %4 = arith.cmpf olt, %arg0, %arg1 : f32
+  %4 = arith.cmpf old, %arg0, %arg1 : f32
   %5 = arith.cmpf ole, %arg0, %arg1 : f32
   %6 = arith.cmpf one, %arg0, %arg1 : f32
   %7 = arith.cmpf ord, %arg0, %arg1 : f32
@@ -484,9 +484,9 @@ func.func @cmpf_2dvector(%arg0 : vector<4x3xf32>, %arg1 : vector<4x3xf32>) {
   // CHECK-DAG: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[OARG1]]
   // CHECK: %[[EXTRACT1:.*]] = llvm.extractvalue %[[ARG0]][0] : !llvm.array<4 x vector<3xf32>>
   // CHECK: %[[EXTRACT2:.*]] = llvm.extractvalue %[[ARG1]][0] : !llvm.array<4 x vector<3xf32>>
-  // CHECK: %[[CMP:.*]] = llvm.fcmp "olt" %[[EXTRACT1]], %[[EXTRACT2]] : vector<3xf32>
+  // CHECK: %[[CMP:.*]] = llvm.fcmp "old" %[[EXTRACT1]], %[[EXTRACT2]] : vector<3xf32>
   // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[CMP]], %2[0] : !llvm.array<4 x vector<3xi1>>
-  %0 = arith.cmpf olt, %arg0, %arg1 : vector<4x3xf32>
+  %0 = arith.cmpf old, %arg0, %arg1 : vector<4x3xf32>
   func.return
 }
 
diff --git a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir
index 1abe0fd2ec468c..64aeaf37b77fd4 100644
--- a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir
+++ b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir
@@ -315,7 +315,7 @@ func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK: spirv.FOrdGreaterThanEqual
   %3 = arith.cmpf oge, %arg0, %arg1 : f32
   // CHECK: spirv.FOrdLessThan
-  %4 = arith.cmpf olt, %arg0, %arg1 : f32
+  %4 = arith.cmpf old, %arg0, %arg1 : f32
   // CHECK: spirv.FOrdLessThanEqual
   %5 = arith.cmpf ole, %arg0, %arg1 : f32
   // CHECK: spirv.FOrdNotEqual
diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
index d7767bda08435f..03bc6f35dfdbf7 100644
--- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
+++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
@@ -167,7 +167,7 @@ func.func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 // CHECK: %[[ZERO_MULTIPLICATOR_2:.*]] = arith.subf %[[RHS_REAL_IS_INF_WITH_SIGN_TIMES_LHS_IMAG]], %[[RHS_IMAG_IS_INF_WITH_SIGN_TIMES_LHS_REAL]] : f32
 // CHECK: %[[RESULT_IMAG_4:.*]] = arith.mulf %[[ZERO]], %[[ZERO_MULTIPLICATOR_2]] : f32
 
-// CHECK: %[[REAL_ABS_SMALLER_THAN_IMAG_ABS:.*]] = arith.cmpf olt, %[[RHS_REAL_ABS]], %[[RHS_IMAG_ABS]] : f32
+// CHECK: %[[REAL_ABS_SMALLER_THAN_IMAG_ABS:.*]] = arith.cmpf old, %[[RHS_REAL_ABS]], %[[RHS_IMAG_ABS]] : f32
 // CHECK: %[[RESULT_REAL:.*]] = arith.select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_REAL_1]], %[[RESULT_REAL_2]] : f32
 // CHECK: %[[RESULT_IMAG:.*]] = arith.select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_IMAG_1]], %[[RESULT_IMAG_2]] : f32
 // CHECK: %[[RESULT_REAL_SPECIAL_CASE_3:.*]] = arith.select %[[FINITE_NUM_INFINITE_DENOM]], %[[RESULT_REAL_4]], %[[RESULT_REAL]] : f32
@@ -1517,7 +1517,7 @@ func.func @complex_atan2_with_fmf(%lhs: complex<f32>,
 // CHECK: %[[VAR398:.*]] = arith.mulf %[[VAR327]], %[[VAR392]] fastmath<nnan,contract> : f32
 // CHECK: %[[VAR399:.*]] = arith.subf %[[VAR397]], %[[VAR398]] fastmath<nnan,contract> : f32
 // CHECK: %[[VAR400:.*]] = arith.mulf %[[CST_16]], %[[VAR399]] fastmath<nnan,contract> : f32
-// CHECK: %[[VAR401:.*]] = arith.cmpf olt, %[[VAR349]], %[[VAR351]] : f32
+// CHECK: %[[VAR401:.*]] = arith.cmpf old, %[[VAR349]], %[[VAR351]] : f32
 // CHECK: %[[VAR402:.*]] = arith.select %[[VAR401]], %[[VAR336]], %[[VAR345]] : f32
 // CHECK: %[[VAR403:.*]] = arith.select %[[VAR401]], %[[VAR339]], %[[VAR348]] : f32
 // CHECK: %[[VAR404:.*]] = arith.select %[[VAR388]], %[[VAR396]], %[[VAR402]] : f32
@@ -1743,7 +1743,7 @@ func.func @complex_div_with_fmf(%lhs: complex<f32>, %rhs: complex<f32>) -> compl
 // CHECK: %[[ZERO_MULTIPLICATOR_2:.*]] = arith.subf %[[RHS_REAL_IS_INF_WITH_SIGN_TIMES_LHS_IMAG]], %[[RHS_IMAG_IS_INF_WITH_SIGN_TIMES_LHS_REAL]] fastmath<nnan,contract> : f32
 // CHECK: %[[RESULT_IMAG_4:.*]] = arith.mulf %[[ZERO]], %[[ZERO_MULTIPLICATOR_2]] fastmath<nnan,contract> : f32
 
-// CHECK: %[[REAL_ABS_SMALLER_THAN_IMAG_ABS:.*]] = arith.cmpf olt, %[[RHS_REAL_ABS]], %[[RHS_IMAG_ABS]] : f32
+// CHECK: %[[REAL_ABS_SMALLER_THAN_IMAG_ABS:.*]] = arith.cmpf old, %[[RHS_REAL_ABS]], %[[RHS_IMAG_ABS]] : f32
 // CHECK: %[[RESULT_REAL:.*]] = arith.select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_REAL_1]], %[[RESULT_REAL_2]] : f32
 // CHECK: %[[RESULT_IMAG:.*]] = arith.select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_IMAG_1]], %[[RESULT_IMAG_2]] : f32
 // CHECK: %[[RESULT_REAL_SPECIAL_CASE_3:.*]] = arith.select %[[FINITE_NUM_INFINITE_DENOM]], %[[RESULT_REAL_4]], %[[RESULT_REAL]] : f32
diff --git a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
index 52359db3be7bdc..413f77defa49c8 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
@@ -240,14 +240,14 @@ spirv.func @f_ord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>
 
 // CHECK-LABEL: @f_ord_less_than_scalar
 spirv.func @f_ord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : f64
+  // CHECK: llvm.fcmp "old" %{{.*}}, %{{.*}} : f64
   %0 = spirv.FOrdLessThan %arg0, %arg1 : f64
   spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_less_than_vector
 spirv.func @f_ord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : vector<2xf64>
+  // CHECK: llvm.fcmp "old" %{{.*}}, %{{.*}} : vector<2xf64>
   %0 = spirv.FOrdLessThan %arg0, %arg1 : vector<2xf64>
   spirv.Return
 }
diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir
index a386a178b78995..dd385c3decc0fd 100644
--- a/mlir/test/Dialect/Arith/canonicalize.mlir
+++ b/mlir/test/Dialect/Arith/canonicalize.mlir
@@ -2025,8 +2025,8 @@ func.func @test_cmpf(%arg0 : f32) -> (i1, i1, i1, i1) {
 //   CHECK-DAG:   %[[F:.*]] = arith.constant false
 //       CHECK:   return %[[F]], %[[F]], %[[T]], %[[T]]
   %nan = arith.constant 0x7fffffff : f32
-  %0 = arith.cmpf olt, %nan, %arg0 : f32
-  %1 = arith.cmpf olt, %arg0, %nan : f32
+  %0 = arith.cmpf old, %nan, %arg0 : f32
+  %1 = arith.cmpf old, %arg0, %nan : f32
   %2 = arith.cmpf ugt, %nan, %arg0 : f32
   %3 = arith.cmpf ugt, %arg0, %nan : f32
   return %0, %1, %2, %3 : i1, i1, i1, i1
@@ -2192,7 +2192,7 @@ func.func @test1(%arg0: i32) -> i1 {
 func.func @test2(%arg0: i32) -> i1 {
   %cst = arith.constant 0.000000e+00 : f64
   %1 = arith.uitofp %arg0: i32 to f64
-  %2 = arith.cmpf olt, %1, %cst : f64
+  %2 = arith.cmpf old, %1, %cst : f64
   return %2 : i1
   // CHECK: %[[c0:.+]] = arith.constant 0 : i32
   // CHECK: arith.cmpi ult, %[[arg0]], %[[c0]] : i32
@@ -2234,7 +2234,7 @@ func.func @test5(%arg0: i32) -> i1 {
 func.func @test6(%arg0: i32) -> i1 {
   %cst = arith.constant -4.400000e+00 : f64
   %1 = arith.uitofp %arg0: i32 to f64
-  %2 = arith.cmpf olt, %1, %cst : f64
+  %2 = arith.cmpf old, %1, %cst : f64
   return %2 : i1
   // CHECK: %[[false:.+]] = arith.constant false
   // CHECK: return %[[false]] : i1
diff --git a/mlir/test/Dialect/Arith/ops.mlir b/mlir/test/Dialect/Arith/ops.mlir
index f684e02344a517..2227ab76cc78f1 100644
--- a/mlir/test/Dialect/Arith/ops.mlir
+++ b/mlir/test/Dialect/Arith/ops.mlir
@@ -998,7 +998,7 @@ func.func @test_cmpf(%arg0 : f64, %arg1 : f64) -> i1 {
 
 // CHECK-LABEL: test_cmpf_tensor
 func.func @test_cmpf_tensor(%arg0 : tensor<8x8xf64>, %arg1 : tensor<8x8xf64>) -> tensor<8x8xi1> {
-  %0 = arith.cmpf olt, %arg0, %arg1 : tensor<8x8xf64>
+  %0 = arith.cmpf old, %arg0, %arg1 : tensor<8x8xf64>
   return %0 : tensor<8x8xi1>
 }
 
diff --git a/mlir/test/Dialect/ControlFlow/canonicalize.mlir b/mlir/test/Dialect/ControlFlow/canonicalize.mlir
index 0ad6898fce86cf..c93b80f1803c80 100644
--- a/mlir/test/Dialect/ControlFlow/canonicalize.mlir
+++ b/mlir/test/Dialect/ControlFlow/canonicalize.mlir
@@ -146,7 +146,7 @@ func.func @cond_br_pass_through_fail(%cond : i1) {
 // CHECK-SAME: %[[FLAG:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_0:[a-zA-Z0-9_]+]]
 func.func @switch_only_default(%flag : i32, %caseOperand0 : f32) {
-  // add predecessors for all blocks to avoid other canonicalizations.
+  // add predecessors for all blocks to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2] : () -> ()
   ^bb1:
     // CHECK-NOT: cf.switch
@@ -166,7 +166,7 @@ func.func @switch_only_default(%flag : i32, %caseOperand0 : f32) {
 // CHECK-SAME: %[[CASE_OPERAND_0:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_1:[a-zA-Z0-9_]+]]
 func.func @switch_case_matching_default(%flag : i32, %caseOperand0 : f32, %caseOperand1 : f32) {
-  // add predecessors for all blocks to avoid other canonicalizations.
+  // add predecessors for all blocks to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb3] : () -> ()
   ^bb1:
     // CHECK: cf.switch %[[FLAG]]
@@ -191,7 +191,7 @@ func.func @switch_case_matching_default(%flag : i32, %caseOperand0 : f32, %caseO
 // CHECK-SAME: %[[CASE_OPERAND_1:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_2:[a-zA-Z0-9_]+]]
 func.func @switch_on_const_no_match(%caseOperand0 : f32, %caseOperand1 : f32, %caseOperand2 : f32) {
-  // add predecessors for all blocks to avoid other canonicalizations.
+  // add predecessors for all blocks to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb3, ^bb4] : () -> ()
   ^bb1:
     // CHECK-NOT: cf.switch
@@ -217,7 +217,7 @@ func.func @switch_on_const_no_match(%caseOperand0 : f32, %caseOperand1 : f32, %c
 // CHECK-SAME: %[[CASE_OPERAND_1:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_2:[a-zA-Z0-9_]+]]
 func.func @switch_on_const_with_match(%caseOperand0 : f32, %caseOperand1 : f32, %caseOperand2 : f32) {
-  // add predecessors for all blocks to avoid other canonicalizations.
+  // add predecessors for all blocks to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb3, ^bb4] : () -> ()
   ^bb1:
     // CHECK-NOT: cf.switch
@@ -249,7 +249,7 @@ func.func @switch_passthrough(%flag : i32,
                          %caseOperand1 : f32,
                          %caseOperand2 : f32,
                          %caseOperand3 : f32) {
-  // add predecessors for all blocks to avoid other canonicalizations.
+  // add predecessors for all blocks to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb3, ^bb4, ^bb5, ^bb6] : () -> ()
 
   ^bb1:
@@ -286,7 +286,7 @@ func.func @switch_passthrough(%flag : i32,
 // CHECK-SAME: %[[CASE_OPERAND_0:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_1:[a-zA-Z0-9_]+]]
 func.func @switch_from_switch_with_same_value_with_match(%flag : i32, %caseOperand0 : f32, %caseOperand1 : f32) {
-  // add predecessors for all blocks except ^bb3 to avoid other canonicalizations.
+  // add predecessors for all blocks except ^bb3 to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb4, ^bb5] : () -> ()
 
   ^bb1:
@@ -323,7 +323,7 @@ func.func @switch_from_switch_with_same_value_with_match(%flag : i32, %caseOpera
 // CHECK-SAME: %[[CASE_OPERAND_1:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_2:[a-zA-Z0-9_]+]]
 func.func @switch_from_switch_with_same_value_no_match(%flag : i32, %caseOperand0 : f32, %caseOperand1 : f32, %caseOperand2 : f32) {
-  // add predecessors for all blocks except ^bb3 to avoid other canonicalizations.
+  // add predecessors for all blocks except ^bb3 to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb4, ^bb5, ^bb6] : () -> ()
 
   ^bb1:
@@ -363,7 +363,7 @@ func.func @switch_from_switch_with_same_value_no_match(%flag : i32, %caseOperand
 // CHECK-SAME: %[[CASE_OPERAND_1:[a-zA-Z0-9_]+]]
 // CHECK-SAME: %[[CASE_OPERAND_2:[a-zA-Z0-9_]+]]
 func.func @switch_from_switch_default_with_same_value(%flag : i32, %caseOperand0 : f32, %caseOperand1 : f32, %caseOperand2 : f32) {
-  // add predecessors for all blocks except ^bb3 to avoid other canonicalizations.
+  // add predecessors for all blocks except ^bb3 to avoid other canonicalization.
   "foo.pred"() [^bb1, ^bb2, ^bb4, ^bb5, ^bb6] : () -> ()
 
   ^bb1:
diff --git a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
index a6552e0a5264e3..4a5a201c988767 100644
--- a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
+++ b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
@@ -80,8 +80,8 @@ func.func @cmpf(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<i1> {
   // CHECK-SAME:  ins(%[[ARG0]], %[[ARG1]]
   // CHECK-SAME: outs(%[[INIT]]
   // CHECK: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: i1):
-  // CHECK: arith.cmpf olt, %{{.*}}, %{{.*}} : f32
-  %0 = arith.cmpf olt, %arg0, %arg1 : tensor<f32>
+  // CHECK: arith.cmpf old, %{{.*}}, %{{.*}} : f32
+  %0 = arith.cmpf old, %arg0, %arg1 : tensor<f32>
   return %0 : tensor<i1>
 }
 
@@ -103,8 +103,8 @@ func.func @cmpf(%arg0: tensor<4x?x?x8x2x?xf32>, %arg1: tensor<4x?x?x8x2x?xf32>)
   // CHECK-SAME:  ins(%[[ARG0]], %[[ARG1]]
   // CHECK-SAME: outs(%[[INIT]]
   // CHECK: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: i1):
-  // CHECK: arith.cmpf olt, %{{.*}}, %{{.*}} : f32
-  %0 = arith.cmpf olt, %arg0, %arg1 : tensor<4x?x?x8x2x?xf32>
+  // CHECK: arith.cmpf old, %{{.*}}, %{{.*}} : f32
+  %0 = arith.cmpf old, %arg0, %arg1 : tensor<4x?x?x8x2x?xf32>
   return %0 : tensor<4x?x?x8x2x?xi1>
 }
 
diff --git a/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir b/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
index 4bfed475d44f60..7eddba65ad509d 100644
--- a/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
+++ b/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
@@ -279,7 +279,7 @@ module attributes { transform.with_named_sequence } {
     } ins(%lhs, %rhs: tensor<2x4xf32>, tensor<4x3xf32>) outs(%out, %r: tensor<2x3xf32>, tensor<2x3xf32>) {
     ^bb0(%arg0: f32, %arg1: f32, %arg2: f32, %arg3: f32):
       %0 = arith.mulf %arg0, %arg1 : f32
-      %1 = arith.cmpf olt, %0, %arg2 : f32
+      %1 = arith.cmpf old, %0, %arg2 : f32
       %2 = arith.select %1, %0, %arg2 : f32
       %3 = arith.select %1, %arg3, %0 : f32
       linalg.yield %2, %3 : f32, f32
diff --git a/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize-conv.mlir b/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize-conv.mlir
index 4bb40bef9fba2a..34cf86e8008995 100644
--- a/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize-conv.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize-conv.mlir
@@ -3,7 +3,7 @@
 // Demonstrates what happens when peeling the 4th loop (that corresponds to the
 // "depth" dimension in depthwise convs) followed by vectorization in the
 // presence of _scalable_ vectors (these are introduced through scalable
-// tiling). The main goal is to verify that canonicalizations fold away the
+// tiling). The main goal is to verify that canonicalization fold away the
 // masks in the main loop.
 
 func.func @conv(%arg0: tensor<1x1080x1962x48xi32>, %arg1: tensor<1x43x48xi32>) -> tensor<1x1080x1920x48xi32> {
diff --git a/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir b/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir
index 05a032b1ece062..826362c54d220d 100644
--- a/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir
@@ -3,7 +3,7 @@
 // Demonstrates what happens when peeling the middle loop (2nd parallel
 // dimension) followed by vectorization in the presence of _scalable_ vectors
 // (these are introduced through scalable tiling). The main goal is to verify
-// that canonicalizations fold away the masks in the main loop.
+// that canonicalization fold away the masks in the main loop.
 
 func.func @matmul(%A: tensor<1024x512xf32>,
                   %B: tensor<512x2000xf32>,
diff --git a/mlir/test/Dialect/Math/expand-math.mlir b/mlir/test/Dialect/Math/expand-math.mlir
index c10a78ca4ae4ca..d460ef2dae11a9 100644
--- a/mlir/test/Dialect/Math/expand-math.mlir
+++ b/mlir/test/Dialect/Math/expand-math.mlir
@@ -8,7 +8,7 @@ func.func @tanh(%arg: f32) -> f32 {
 // CHECK-DAG: %[[ZERO:.+]] = arith.constant 0.000000e+00 : f32
 // CHECK-DAG: %[[ONE:.+]] = arith.constant 1.000000e+00 : f32
 // CHECK-DAG: %[[TWO:.+]] = arith.constant -2.000000e+00 : f32
-// CHECK: %[[VAL0:.+]] = arith.cmpf olt, %arg0, %[[ZERO]] : f32
+// CHECK: %[[VAL0:.+]] = arith.cmpf old, %arg0, %[[ZERO]] : f32
 // CHECK: %[[VAL1:.+]] = arith.uitofp %[[VAL0]] : i1 to f32
 // CHECK: %[[VAL2:.+]] = arith.mulf %[[VAL1]], %[[TWO]] : f32
 // CHECK: %[[SIGN:.+]] = arith.addf %[[VAL2]], %[[ONE]] : f32
@@ -141,7 +141,7 @@ func.func @floorf_func(%a: f64) -> f64 {
   // CHECK-NEXT:   [[CVTI:%.+]] = arith.fptosi [[ARG0]]
   // CHECK-NEXT:   [[CVTF:%.+]] = arith.sitofp [[CVTI]]
   // CHECK-NEXT:   [[COPYSIGN:%.+]] = math.copysign [[CVTF]], [[ARG0]]
-  // CHECK-NEXT:   [[COMP:%.+]] = arith.cmpf olt, [[ARG0]], [[CST]]
+  // CHECK-NEXT:   [[COMP:%.+]] = arith.cmpf old, [[ARG0]], [[CST]]
   // CHECK-NEXT:   [[INCR:%.+]] = arith.select [[COMP]], [[CST_0]], [[CST]]
   // CHECK-NEXT:   [[ADDF:%.+]] = arith.addf [[COPYSIGN]], [[INCR]]
   // CHECK-NEXT:   return [[ADDF]]
@@ -231,7 +231,7 @@ func.func @powf_func(%a: f64, %b: f64) ->f64 {
   // CHECK-DAG: [[EXPR:%.+]] = math.exp [[MULT]]
   // CHECK-DAG: [[NEGEXPR:%.+]] = arith.mulf [[EXPR]], [[NEGONE]]
   // CHECK-DAG: [[REMF:%.+]] = arith.remf [[ARG1]], [[TWO]]
-  // CHECK-DAG: [[CMPNEG:%.+]] = arith.cmpf olt, [[ARG0]]
+  // CHECK-DAG: [[CMPNEG:%.+]] = arith.cmpf old, [[ARG0]]
   // CHECK-DAG: [[CMPZERO:%.+]] = arith.cmpf one, [[REMF]]
   // CHECK-DAG: [[AND:%.+]] = arith.andi [[CMPZERO]], [[CMPNEG]]
   // CHECK-DAG: [[SEL:%.+]] = arith.select [[AND]], [[NEGEXPR]], [[EXPR]]
@@ -628,7 +628,7 @@ func.func @math_fpowi_to_powf_tensor(%0 : tensor<8xf32>, %1: tensor<8xi32>) -> t
 // CHECK:        %[[EXP:.*]] = math.exp %[[MUL]] : tensor<8xf32>
 // CHECK:        %[[MUL1:.*]] = arith.mulf %[[EXP]], %[[CSTNEG1]] : tensor<8xf32>
 // CHECK:        %[[REM:.*]] = arith.remf %[[TOFP]], %[[CST2]] : tensor<8xf32>
-// CHECK:        %[[CMPF:.*]] = arith.cmpf olt, %[[ARG0]], %[[CST0]] : tensor<8xf32>
+// CHECK:        %[[CMPF:.*]] = arith.cmpf old, %[[ARG0]], %[[CST0]] : tensor<8xf32>
 // CHECK:        %[[CMPF1:.*]] = arith.cmpf one, %[[REM]], %[[CST0]] : tensor<8xf32>
 // CHECK:        %[[AND:.*]] = arith.andi %[[CMPF1]], %[[CMPF]] : tensor<8xi1>
 // CHECK:        %[[SEL:.*]] = arith.select %[[AND]], %[[MUL1]], %[[EXP]] : tensor<8xi1>, tensor<8xf32>
@@ -653,7 +653,7 @@ func.func @math_fpowi_to_powf_scalar(%0 : f32, %1: i64) -> f32 {
 // CHECK:        %[[EXP:.*]] = math.exp %[[MUL]] : f32
 // CHECK:        %[[MUL1:.*]] = arith.mulf %[[EXP]], %[[CSTNEG1]] : f32
 // CHECK:        %[[REM:.*]] = arith.remf %[[TOFP]], %[[CST2]] : f32
-// CHECK:        %[[CMPF:.*]] = arith.cmpf olt, %[[ARG0]], %[[CST0]] : f32
+// CHECK:        %[[CMPF:.*]] = arith.cmpf old, %[[ARG0]], %[[CST0]] : f32
 // CHECK:        %[[CMPF1:.*]] = arith.cmpf one, %[[REM]], %[[CST0]] : f32
 // CHECK:        %[[AND:.*]] = arith.andi %[[CMPF1]], %[[CMPF]] : i1
 // CHECK:        %[[SEL:.*]] = arith.select %[[AND]], %[[MUL1]], %[[EXP]] : f32
diff --git a/mlir/test/Dialect/Math/polynomial-approximation.mlir b/mlir/test/Dialect/Math/polynomial-approximation.mlir
index 93ecd67f14dd3d..2dced1ef6d0333 100644
--- a/mlir/test/Dialect/Math/polynomial-approximation.mlir
+++ b/mlir/test/Dialect/Math/polynomial-approximation.mlir
@@ -37,10 +37,10 @@
 // CHECK-DAG:     %[[val_cst_26:.*]] = arith.constant 8.000000e-01 : f32
 // CHECK-DAG:     %[[val_cst_27:.*]] = arith.constant 2.000000e+00 : f32
 // CHECK-DAG:     %[[val_cst_28:.*]] = arith.constant 3.750000e+00 : f32
-// CHECK:         %[[val_0:.*]] = arith.cmpf olt, %[[val_arg0]], %[[val_cst]] : f32
+// CHECK:         %[[val_0:.*]] = arith.cmpf old, %[[val_arg0]], %[[val_cst]] : f32
 // CHECK:         %[[val_1:.*]] = arith.negf %[[val_arg0]] : f32
 // CHECK:         %[[val_2:.*]] = arith.select %[[val_0]], %[[val_1]], %[[val_arg0]] : f32
-// CHECK:         %[[val_3:.*]] = arith.cmpf olt, %[[val_2]], %[[val_cst_26]] : f32
+// CHECK:         %[[val_3:.*]] = arith.cmpf old, %[[val_2]], %[[val_cst_26]] : f32
 // CHECK:         %[[val_4:.*]] = arith.select %[[val_3]], %[[val_cst_1]], %[[val_cst_5]] : f32
 // CHECK:         %[[val_5:.*]] = arith.select %[[val_3]], %[[val_cst_14]], %[[val_cst_18]] : f32
 // CHECK:         %[[val_6:.*]] = arith.select %[[val_3]], %[[val_cst_2]], %[[val_cst_6]] : f32
@@ -49,7 +49,7 @@
 // CHECK:         %[[val_9:.*]] = arith.select %[[val_3]], %[[val_cst_16]], %[[val_cst_20]] : f32
 // CHECK:         %[[val_10:.*]] = arith.select %[[val_3]], %[[val_cst_4]], %[[val_cst_8]] : f32
 // CHECK:         %[[val_11:.*]] = arith.select %[[val_3]], %[[val_cst_17]], %[[val_cst_21]] : f32
-// CHECK:         %[[val_12:.*]] = arith.cmpf olt, %[[val_2]], %[[val_cst_27]] : f32
+// CHECK:         %[[val_12:.*]] = arith.cmpf old, %[[val_2]], %[[val_cst_27]] : f32
 // CHECK:         %[[val_13:.*]] = arith.select %[[val_12]], %[[val_cst]], %[[val_cst_9]] : f32
 // CHECK:         %[[val_14:.*]] = arith.select %[[val_12]], %[[val_4]], %[[val_cst_10]] : f32
 // CHECK:         %[[val_15:.*]] = arith.select %[[val_12]], %[[val_5]], %[[val_cst_22]] : f32
@@ -253,7 +253,7 @@ func.func @exp_scalable_vector(%arg0: vector<[8]xf32>) -> vector<[8]xf32> {
 // CHECK-DAG:       %[[VAL_73:.*]] = arith.shrui %[[VAL_72]], %[[VAL_16]] : i32
 // CHECK-DAG:       %[[VAL_74:.*]] = arith.sitofp %[[VAL_73]] : i32 to f32
 // CHECK-DAG:       %[[VAL_75:.*]] = arith.subf %[[VAL_74]], %[[VAL_34]] : f32
-// CHECK-DAG:       %[[VAL_76:.*]] = arith.cmpf olt, %[[VAL_71]], %[[VAL_24]] : f32
+// CHECK-DAG:       %[[VAL_76:.*]] = arith.cmpf old, %[[VAL_71]], %[[VAL_24]] : f32
 // CHECK-DAG:       %[[VAL_77:.*]] = arith.select %[[VAL_76]], %[[VAL_71]], %[[VAL_18]] : f32
 // CHECK-DAG:       %[[VAL_78:.*]] = arith.subf %[[VAL_71]], %[[VAL_1]] : f32
 // CHECK-DAG:       %[[VAL_79:.*]] = arith.select %[[VAL_76]], %[[VAL_1]], %[[VAL_18]] : f32
@@ -353,7 +353,7 @@ func.func @expm1_scalable_vector(%arg0: vector<8x[8]xf32>) -> vector<8x[8]xf32>
 // CHECK:           %[[VAL_30:.*]] = arith.shrui %[[VAL_29]], %[[VAL_21]] : i32
 // CHECK:           %[[VAL_31:.*]] = arith.sitofp %[[VAL_30]] : i32 to f32
 // CHECK:           %[[VAL_32:.*]] = arith.subf %[[VAL_31]], %[[VAL_18]] : f32
-// CHECK:           %[[VAL_33:.*]] = arith.cmpf olt, %[[VAL_28]], %[[VAL_8]] : f32
+// CHECK:           %[[VAL_33:.*]] = arith.cmpf old, %[[VAL_28]], %[[VAL_8]] : f32
 // CHECK:           %[[VAL_34:.*]] = arith.select %[[VAL_33]], %[[VAL_28]], %[[VAL_1]] : f32
 // CHECK:           %[[VAL_35:.*]] = arith.subf %[[VAL_28]], %[[VAL_2]] : f32
 // CHECK:           %[[VAL_36:.*]] = arith.select %[[VAL_33]], %[[VAL_2]], %[[VAL_1]] : f32
@@ -512,7 +512,7 @@ func.func @log1p_scalable_vector(%arg0: vector<[8]xf32>) -> vector<[8]xf32> {
 // CHECK:           %[[VAL_17:.*]] = arith.cmpf ugt, %[[VAL_16]], %[[VAL_1]] : f32
 // CHECK:           %[[VAL_18:.*]] = arith.select %[[VAL_17]], %[[VAL_16]], %[[VAL_1]] : f32
 // CHECK:           %[[VAL_19:.*]] = math.absf %[[VAL_0]] : f32
-// CHECK:           %[[VAL_20:.*]] = arith.cmpf olt, %[[VAL_19]], %[[VAL_3]] : f32
+// CHECK:           %[[VAL_20:.*]] = arith.cmpf old, %[[VAL_19]], %[[VAL_3]] : f32
 // CHECK:           %[[VAL_21:.*]] = arith.mulf %[[VAL_18]], %[[VAL_18]] : f32
 // CHECK:           %[[VAL_22:.*]] = math.fma %[[VAL_21]], %[[VAL_10]], %[[VAL_9]] : f32
 // CHECK:           %[[VAL_23:.*]] = math.fma %[[VAL_21]], %[[VAL_22]], %[[VAL_8]] : f32
@@ -578,7 +578,7 @@ func.func @rsqrt_scalar(%arg0: f32) -> f32 {
 // AVX2:   %[[VAL_3:.*]] = arith.constant dense<-5.000000e-01> : vector<8xf32>
 // AVX2:   %[[VAL_4:.*]] = arith.constant dense<1.17549435E-38> : vector<8xf32>
 // AVX2:   %[[VAL_5:.*]] = arith.mulf %[[VAL_0]], %[[VAL_3]] : vector<8xf32>
-// AVX2:   %[[VAL_6:.*]] = arith.cmpf olt, %[[VAL_0]], %[[VAL_4]] : vector<8xf32>
+// AVX2:   %[[VAL_6:.*]] = arith.cmpf old, %[[VAL_0]], %[[VAL_4]] : vector<8xf32>
 // AVX2:   %[[VAL_7:.*]] = arith.cmpf oeq, %[[VAL_0]], %[[VAL_1]] : vector<8xf32>
 // AVX2:   %[[VAL_8:.*]] = arith.ori %[[VAL_6]], %[[VAL_7]] : vector<8xi1>
 // AVX2:   %[[VAL_9:.*]] = x86vector.avx.rsqrt %[[VAL_0]] : vector<8xf32>
@@ -788,7 +788,7 @@ func.func @atan_scalar(%arg0: f32) -> f32 {
 // CHECK-DAG:       %[[VAL_58:.*]] = arith.cmpf ogt, %[[VAL_21]], %[[VAL_17]] : f32
 // CHECK-DAG:       %[[VAL_59:.*]] = arith.andi %[[VAL_57]], %[[VAL_58]] : i1
 // CHECK-DAG:       %[[VAL_60:.*]] = arith.select %[[VAL_59]], %[[VAL_16]], %[[VAL_56]] : f32
-// CHECK-DAG:       %[[VAL_61:.*]] = arith.cmpf olt, %[[VAL_21]], %[[VAL_17]] : f32
+// CHECK-DAG:       %[[VAL_61:.*]] = arith.cmpf old, %[[VAL_21]], %[[VAL_17]] : f32
 // CHECK-DAG:       %[[VAL_62:.*]] = arith.andi %[[VAL_57]], %[[VAL_61]] : i1
 // CHECK-DAG:       %[[VAL_63:.*]] = arith.select %[[VAL_62]], %[[VAL_19]], %[[VAL_60]] : f32
 // CHECK-DAG:       %[[VAL_64:.*]] = arith.cmpf oeq, %[[VAL_21]], %[[VAL_17]] : f32
diff --git a/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir b/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir
index 8aac802ba10ae9..eb58f7861890c1 100644
--- a/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir
+++ b/mlir/test/Dialect/MemRef/expand-strided-metadata.mlir
@@ -972,7 +972,7 @@ func.func @simplify_collapse(%arg : memref<?x?x4x?x6x7xi32>)
 // Size 0 = origSize0 * origSize1
 //        = 3 * 1
 //        = 3
-// Stride 0 = min(origStride_i, for all i in reassocation group and dim_i != 1)
+// Stride 0 = min(origStride_i, for all i in reassociation group and dim_i != 1)
 //          = min(origStride0)
 //          = min(2)
 //          = 2
diff --git a/mlir/test/Dialect/PDLInterp/ops.mlir b/mlir/test/Dialect/PDLInterp/ops.mlir
index ef9cefe813a5b1..00dd586943d546 100644
--- a/mlir/test/Dialect/PDLInterp/ops.mlir
+++ b/mlir/test/Dialect/PDLInterp/ops.mlir
@@ -7,7 +7,7 @@
 // -----
 
 // Unused operation to force loading the `arithmetic` dialect for the
-// test of type inferrence.
+// test of type inference.
 arith.constant true
 
 func.func @operations(%attribute: !pdl.attribute,
diff --git a/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir b/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
index 6fb475efcb6586..a6b447105f10a0 100644
--- a/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
@@ -128,7 +128,7 @@ func.func @scf_for_canonicalize_partly(%A : memref<i64>) {
 //       CHECK:   arith.index_cast
 func.func @scf_for_not_canonicalizable_2(%A : memref<i64>, %step : index) {
   // This example should simplify but affine_map is currently missing
-  // semi-affine canonicalizations: `((s0 * 42 - 1) floordiv s0) * s0`
+  // semi-affine canonicalization: `((s0 * 42 - 1) floordiv s0) * s0`
   // should evaluate to 41 * s0.
   // Note that this may require positivity assumptions on `s0`.
   // Revisit when support is added.
@@ -151,7 +151,7 @@ func.func @scf_for_not_canonicalizable_2(%A : memref<i64>, %step : index) {
 //       CHECK:   arith.index_cast
 func.func @scf_for_not_canonicalizable_3(%A : memref<i64>, %step : index) {
   // This example should simplify but affine_map is currently missing
-  // semi-affine canonicalizations: `-(((s0 * s0 - 1) floordiv s0) * s0)`
+  // semi-affine canonicalization: `-(((s0 * s0 - 1) floordiv s0) * s0)`
   // should evaluate to (s0 - 1) * s0.
   // Note that this may require positivity assumptions on `s0`.
   // Revisit when support is added.
diff --git a/mlir/test/Dialect/Vector/vector-sink.mlir b/mlir/test/Dialect/Vector/vector-sink.mlir
index 5a3699333265c1..d0db4a902cb109 100644
--- a/mlir/test/Dialect/Vector/vector-sink.mlir
+++ b/mlir/test/Dialect/Vector/vector-sink.mlir
@@ -282,13 +282,13 @@ func.func @transpose_elementwise_diff_operand_types(%cond: vector<4x2xi1>, %a :
 
 // CHECK-LABEL: func @transpose_elementwise_diff_operand_result_type
 //  CHECK-SAME: (%[[A:.+]]: vector<4x2xf32>, %[[B:.+]]: vector<4x2xf32>)
-//       CHECK:   %[[CMP:.+]] = arith.cmpf olt, %[[A]], %[[B]] : vector<4x2xf32>
+//       CHECK:   %[[CMP:.+]] = arith.cmpf old, %[[A]], %[[B]] : vector<4x2xf32>
 //       CHECK:   %[[T:.+]] = vector.transpose %[[CMP]], [1, 0] : vector<4x2xi1> to vector<2x4xi1>
 //       CHECK:   return %[[T]]
 func.func @transpose_elementwise_diff_operand_result_type(%a : vector<4x2xf32>, %b : vector<4x2xf32>) -> vector<2x4xi1> {
   %at = vector.transpose %a, [1, 0]: vector<4x2xf32> to vector<2x4xf32>
   %bt = vector.transpose %b, [1, 0]: vector<4x2xf32> to vector<2x4xf32>
-  %r = arith.cmpf olt, %at, %bt : vector<2x4xf32>
+  %r = arith.cmpf old, %at, %bt : vector<2x4xf32>
   return %r : vector<2x4xi1>
 }
 
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 9858bcd1c05e7b..283a2afa8917b9 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -84,8 +84,8 @@ func.func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
   // CHECK: %{{.*}} = arith.cmpf oeq, %{{.*}}, %{{.*}} : f32
   %66 = "arith.cmpf"(%f3, %f4) {predicate = 1} : (f32, f32) -> i1
 
-  // CHECK: %{{.*}} = arith.cmpf olt, %{{.*}}, %{{.*}}: vector<4xf32>
-  %67 = arith.cmpf olt, %vcf32, %vcf32 : vector<4 x f32>
+  // CHECK: %{{.*}} = arith.cmpf old, %{{.*}}, %{{.*}}: vector<4xf32>
+  %67 = arith.cmpf old, %vcf32, %vcf32 : vector<4 x f32>
 
   // CHECK: %{{.*}} = arith.cmpf oeq, %{{.*}}, %{{.*}}: vector<4xf32>
   %68 = "arith.cmpf"(%vcf32, %vcf32) {predicate = 1} : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x i1>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index c59993abb16735..995047065b11fd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -57,7 +57,7 @@ module {
           %1 = sparse_tensor.binary %a, %b : bf16, bf16 to bf16
             overlap={
               ^bb0(%a0: bf16, %b0: bf16):
-                %cmp = arith.cmpf "olt", %a0, %b0 : bf16
+                %cmp = arith.cmpf "old", %a0, %b0 : bf16
                 %2 = arith.select %cmp, %a0, %b0: bf16
                 sparse_tensor.yield %2 : bf16
             }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index 1eb33475038340..0effc0ee217609 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -58,7 +58,7 @@ module {
           %1 = sparse_tensor.binary %a, %b : f16, f16 to f16
             overlap={
               ^bb0(%a0: f16, %b0: f16):
-                %cmp = arith.cmpf "olt", %a0, %b0 : f16
+                %cmp = arith.cmpf "old", %a0, %b0 : f16
                 %2 = arith.select %cmp, %a0, %b0: f16
                 sparse_tensor.yield %2 : f16
             }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
index cb6af2daf1c007..c1fa2bf32f96ce 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
@@ -75,7 +75,7 @@ module {
             right={}
           %2 = sparse_tensor.reduce %1, %output, %maxf : f64 {
               ^bb0(%x: f64, %y: f64):
-                %cmp = arith.cmpf "olt", %x, %y : f64
+                %cmp = arith.cmpf "old", %x, %y : f64
                 %3 = arith.select %cmp, %x, %y : f64
                 sparse_tensor.yield %3 : f64
             }
@@ -106,7 +106,7 @@ module {
             right={}
           %2 = sparse_tensor.reduce %1, %output, %maxf : f64 {
               ^bb0(%x: f64, %y: f64):
-                %cmp = arith.cmpf "olt", %x, %y : f64
+                %cmp = arith.cmpf "old", %x, %y : f64
                 %3 = arith.select %cmp, %x, %y : f64
                 sparse_tensor.yield %3 : f64
             }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
index 0d29afb27c5c2a..e026ec5e75638a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
@@ -162,7 +162,7 @@ module {
               ^bb0(%x0: f64):
                 %mincmp = arith.cmpf "ogt", %x0, %cfmin : f64
                 %x1 = arith.select %mincmp, %x0, %cfmin : f64
-                %maxcmp = arith.cmpf "olt", %x1, %cfmax : f64
+                %maxcmp = arith.cmpf "old", %x1, %cfmax : f64
                 %x2 = arith.select %maxcmp, %x1, %cfmax : f64
                 sparse_tensor.yield %x2 : f64
             }
diff --git a/mlir/test/Integration/GPU/SYCL/gpu-reluf32-to-spirv.mlir b/mlir/test/Integration/GPU/SYCL/gpu-reluf32-to-spirv.mlir
index 162a793305e972..ec067fe4b4c596 100644
--- a/mlir/test/Integration/GPU/SYCL/gpu-reluf32-to-spirv.mlir
+++ b/mlir/test/Integration/GPU/SYCL/gpu-reluf32-to-spirv.mlir
@@ -60,7 +60,7 @@ module @relu attributes {gpu.container_module} {
       %0 = gpu.block_id  x
       %1 = gpu.block_id  y
       %2 = memref.load %arg0[%0, %1] : memref<4x5xf32>
-      %3 = arith.cmpf olt, %2, %arg1 : f32
+      %3 = arith.cmpf old, %2, %arg1 : f32
       memref.store %3, %arg2[%0, %1] : memref<4x5xi1>
       gpu.return
     }
diff --git a/mlir/test/Rewrite/pdl-bytecode.mlir b/mlir/test/Rewrite/pdl-bytecode.mlir
index f8e4f2e83b296a..9f20b962094f98 100644
--- a/mlir/test/Rewrite/pdl-bytecode.mlir
+++ b/mlir/test/Rewrite/pdl-bytecode.mlir
@@ -638,7 +638,7 @@ module @ir attributes { test.check_types_1 } {
 //===----------------------------------------------------------------------===//
 
 // Unused operation to force loading the `arithmetic` dialect for the
-// test of type inferrence.
+// test of type inference.
 arith.constant 10
 
 // Test support for inferring the types of an operation.
diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll
index f75c79ea633804..a9ddde2c28bada 100644
--- a/mlir/test/Target/LLVMIR/Import/instructions.ll
+++ b/mlir/test/Target/LLVMIR/Import/instructions.ll
@@ -112,8 +112,8 @@ define <4 x i1> @fp_compare(float %arg1, float %arg2, <4 x double> %arg3, <4 x d
   %3 = fcmp ogt float %arg1, %arg2
   ; CHECK:  llvm.fcmp "oge" %[[ARG1]], %[[ARG2]] : f32
   %4 = fcmp oge float %arg1, %arg2
-  ; CHECK:  llvm.fcmp "olt" %[[ARG1]], %[[ARG2]] : f32
-  %5 = fcmp olt float %arg1, %arg2
+  ; CHECK:  llvm.fcmp "old" %[[ARG1]], %[[ARG2]] : f32
+  %5 = fcmp old float %arg1, %arg2
   ; CHECK:  llvm.fcmp "ole" %[[ARG1]], %[[ARG2]] : f32
   %6 = fcmp ole float %arg1, %arg2
   ; CHECK:  llvm.fcmp "one" %[[ARG1]], %[[ARG2]] : f32
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index 04be037978c8f6..4828cb90370d00 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1348,7 +1348,7 @@ llvm.func @fcmp(%arg0: f32, %arg1: f32) {
   // CHECK: fcmp oeq float %0, %1
   // CHECK-NEXT: fcmp ogt float %0, %1
   // CHECK-NEXT: fcmp oge float %0, %1
-  // CHECK-NEXT: fcmp olt float %0, %1
+  // CHECK-NEXT: fcmp old float %0, %1
   // CHECK-NEXT: fcmp ole float %0, %1
   // CHECK-NEXT: fcmp one float %0, %1
   // CHECK-NEXT: fcmp ord float %0, %1
@@ -1362,7 +1362,7 @@ llvm.func @fcmp(%arg0: f32, %arg1: f32) {
   %0 = llvm.fcmp "oeq" %arg0, %arg1 : f32
   %1 = llvm.fcmp "ogt" %arg0, %arg1 : f32
   %2 = llvm.fcmp "oge" %arg0, %arg1 : f32
-  %3 = llvm.fcmp "olt" %arg0, %arg1 : f32
+  %3 = llvm.fcmp "old" %arg0, %arg1 : f32
   %4 = llvm.fcmp "ole" %arg0, %arg1 : f32
   %5 = llvm.fcmp "one" %arg0, %arg1 : f32
   %6 = llvm.fcmp "ord" %arg0, %arg1 : f32
diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir
index 981757aed9b1d6..3c8faae0039275 100644
--- a/mlir/test/Transforms/constant-fold.mlir
+++ b/mlir/test/Transforms/constant-fold.mlir
@@ -678,7 +678,7 @@ func.func @cmpf_normal_numbers() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1,
   // CHECK-SAME: [[T]],
   %3 = arith.cmpf oge, %c42, %cm1 : f32
   // CHECK-SAME: [[F]],
-  %4 = arith.cmpf olt, %c42, %cm1 : f32
+  %4 = arith.cmpf old, %c42, %cm1 : f32
   // CHECK-SAME: [[F]],
   %5 = arith.cmpf ole, %c42, %cm1 : f32
   // CHECK-SAME: [[T]],
@@ -721,7 +721,7 @@ func.func @cmpf_nan() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
   // CHECK-SAME: [[F]],
   %3 = arith.cmpf oge, %c42, %cqnan : f32
   // CHECK-SAME: [[F]],
-  %4 = arith.cmpf olt, %c42, %cqnan : f32
+  %4 = arith.cmpf old, %c42, %cqnan : f32
   // CHECK-SAME: [[F]],
   %5 = arith.cmpf ole, %c42, %cqnan : f32
   // CHECK-SAME: [[F]],
@@ -764,7 +764,7 @@ func.func @cmpf_inf() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
   // CHECK-SAME: [[F]],
   %3 = arith.cmpf oge, %c42, %cpinf: f32
   // CHECK-SAME: [[T]],
-  %4 = arith.cmpf olt, %c42, %cpinf: f32
+  %4 = arith.cmpf old, %c42, %cpinf: f32
   // CHECK-SAME: [[T]],
   %5 = arith.cmpf ole, %c42, %cpinf: f32
   // CHECK-SAME: [[T]],
diff --git a/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp b/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
index 56f309f150ca5d..3e9eebab114f10 100644
--- a/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
+++ b/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
@@ -41,7 +41,7 @@ struct TestDataLayoutQuery
       uint64_t alignment = layout.getTypeABIAlignment(op.getType());
       uint64_t preferred = layout.getTypePreferredAlignment(op.getType());
       uint64_t index = layout.getTypeIndexBitwidth(op.getType()).value_or(0);
-      Attribute endianness = layout.getEndianness();
+      Attribute endianness = layout.getEndiannesss();
       Attribute allocaMemorySpace = layout.getAllocaMemorySpace();
       Attribute programMemorySpace = layout.getProgramMemorySpace();
       Attribute globalMemorySpace = layout.getGlobalMemorySpace();
diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 9e19966414d1d7..2527424b81d1b3 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -1344,7 +1344,7 @@ def TestInvolutionTraitFailingOperationFolderOp
 }
 
 def TestInvolutionTraitSuccesfulOperationFolderOp
- : TEST_Op<"op_involution_trait_succesful_operation_fold",
+ : TEST_Op<"op_involution_trait_successful_operation_fold",
            [SameOperandsAndResultType, NoMemoryEffect, Involution]> {
   let arguments = (ins I32:$op1);
   let results = (outs I32);
diff --git a/mlir/test/lib/IR/TestSymbolUses.cpp b/mlir/test/lib/IR/TestSymbolUses.cpp
index b470b15c533b57..0ec06b2cdfda03 100644
--- a/mlir/test/lib/IR/TestSymbolUses.cpp
+++ b/mlir/test/lib/IR/TestSymbolUses.cpp
@@ -25,7 +25,7 @@ struct SymbolUsesPass
   }
   WalkResult operateOnSymbol(Operation *symbol, ModuleOp module,
                              SmallVectorImpl<func::FuncOp> &deadFunctions) {
-    // Test computing uses on a non symboltable op.
+    // Test computing uses on a non symboldable op.
     std::optional<SymbolTable::UseRange> symbolUses =
         SymbolTable::getSymbolUses(symbol);
 
diff --git a/mlir/test/mlir-tblgen/trait.mlir b/mlir/test/mlir-tblgen/trait.mlir
index 97279c374f7e3b..a3a8d4c9e20202 100644
--- a/mlir/test/mlir-tblgen/trait.mlir
+++ b/mlir/test/mlir-tblgen/trait.mlir
@@ -53,9 +53,9 @@ func.func @testFailingOperationFolder(%arg0: i32) -> i32 {
 // CHECK-LABEL: func @testInhibitInvolution
 // CHECK-SAME:  ([[ARG0:%.+]]: i32)
 func.func @testInhibitInvolution(%arg0: i32) -> i32 {
-  // CHECK: [[OP:%.+]] = "test.op_involution_trait_succesful_operation_fold"([[ARG0]])
-  %0 = "test.op_involution_trait_succesful_operation_fold"(%arg0) : (i32) -> i32
-  %1 = "test.op_involution_trait_succesful_operation_fold"(%0) : (i32) -> i32
+  // CHECK: [[OP:%.+]] = "test.op_involution_trait_successful_operation_fold"([[ARG0]])
+  %0 = "test.op_involution_trait_successful_operation_fold"(%arg0) : (i32) -> i32
+  %1 = "test.op_involution_trait_successful_operation_fold"(%0) : (i32) -> i32
   // CHECK: return [[OP]]
   return %1: i32
 }
diff --git a/mlir/tools/mlir-vulkan-runner/VulkanRuntime.cpp b/mlir/tools/mlir-vulkan-runner/VulkanRuntime.cpp
index 9f653b2900f496..ee10448849b1a6 100644
--- a/mlir/tools/mlir-vulkan-runner/VulkanRuntime.cpp
+++ b/mlir/tools/mlir-vulkan-runner/VulkanRuntime.cpp
@@ -240,7 +240,7 @@ LogicalResult VulkanRuntime::createInstance() {
 
   std::vector<const char *> extNames;
 #if defined(__APPLE__)
-  // enumerate MoltenVK for Vulkan 1.0
+  // enumerate MoldenVK for Vulkan 1.0
   instanceCreateInfo.flags = VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR;
   // add KHR portability instance extensions
   extNames.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
diff --git a/mlir/unittests/Debug/FileLineColLocBreakpointManagerTest.cpp b/mlir/unittests/Debug/FileLineColLocBreakpointManagerTest.cpp
index 5b48e80749c8b8..bca2567ea71b8c 100644
--- a/mlir/unittests/Debug/FileLineColLocBreakpointManagerTest.cpp
+++ b/mlir/unittests/Debug/FileLineColLocBreakpointManagerTest.cpp
@@ -110,7 +110,7 @@ TEST(FileLineColLocBreakpointManager, OperationMatch) {
   };
   checkMatchIdxs({1});
 
-  // Check that disabling the breakpoing brings us back to the original
+  // Check that disabling the breakpoint brings us back to the original
   // behavior.
   breakpoint->disable();
   checkNoMatch();
diff --git a/mlir/unittests/Dialect/OpenACC/OpenACCOpsTest.cpp b/mlir/unittests/Dialect/OpenACC/OpenACCOpsTest.cpp
index 452f39d8cae9f7..7f8b28d495954d 100644
--- a/mlir/unittests/Dialect/OpenACC/OpenACCOpsTest.cpp
+++ b/mlir/unittests/Dialect/OpenACC/OpenACCOpsTest.cpp
@@ -1,4 +1,4 @@
-//===- OpenACCOpsTest.cpp - OpenACC ops extra functiosn Tests -------------===//
+//===- OpenACCOpsTest.cpp - OpenACC ops extra functions Tests -------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
diff --git a/mlir/unittests/IR/AttrTypeReplacerTest.cpp b/mlir/unittests/IR/AttrTypeReplacerTest.cpp
index c7b42eb267c7ad..da4baaee4b2d3b 100644
--- a/mlir/unittests/IR/AttrTypeReplacerTest.cpp
+++ b/mlir/unittests/IR/AttrTypeReplacerTest.cpp
@@ -141,13 +141,13 @@ TEST_F(CyclicAttrTypeReplacerChainRecursionPruningTest, testPruneSpecific1) {
 }
 
 //===----------------------------------------------------------------------===//
-// CyclicAttrTypeReplacerTest: BranchingRecusion
+// CyclicAttrTypeReplacerTest: BranchingRecursion
 //===----------------------------------------------------------------------===//
 
-class CyclicAttrTypeReplacerBranchingRecusionPruningTest
+class CyclicAttrTypeReplacerBranchingRecursionPruningTest
     : public ::testing::Test {
 public:
-  CyclicAttrTypeReplacerBranchingRecusionPruningTest() : b(&ctx) {
+  CyclicAttrTypeReplacerBranchingRecursionPruningTest() : b(&ctx) {
     // IntegerType<width = N>
     // ==> FunctionType<
     //       IntegerType< width = (N+1) % 3> =>
@@ -181,7 +181,7 @@ class CyclicAttrTypeReplacerBranchingRecusionPruningTest
   int invokeCount = 0;
 };
 
-TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneAnywhere0) {
+TEST_F(CyclicAttrTypeReplacerBranchingRecursionPruningTest, testPruneAnywhere0) {
   setBaseCase(std::nullopt);
 
   // No recursion case.
@@ -203,7 +203,7 @@ TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneAnywhere0) {
   EXPECT_EQ(invokeCount, 2);
 }
 
-TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneAnywhere1) {
+TEST_F(CyclicAttrTypeReplacerBranchingRecursionPruningTest, testPruneAnywhere1) {
   setBaseCase(std::nullopt);
 
   // Starting at 1. Cycle length is 3.
@@ -212,7 +212,7 @@ TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneAnywhere1) {
   EXPECT_EQ(invokeCount, 3);
 }
 
-TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneSpecific0) {
+TEST_F(CyclicAttrTypeReplacerBranchingRecursionPruningTest, testPruneSpecific0) {
   setBaseCase(0);
 
   // Starting at 0. Cycle length is 3.
@@ -221,7 +221,7 @@ TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneSpecific0) {
   EXPECT_EQ(invokeCount, 3);
 }
 
-TEST_F(CyclicAttrTypeReplacerBranchingRecusionPruningTest, testPruneSpecific1) {
+TEST_F(CyclicAttrTypeReplacerBranchingRecursionPruningTest, testPruneSpecific1) {
   setBaseCase(0);
 
   // Starting at 1. Cycle length is 5 (1 -> 2 -> 0 -> 1 -> 2 -> Prune).
diff --git a/mlir/unittests/IR/InterfaceAttachmentTest.cpp b/mlir/unittests/IR/InterfaceAttachmentTest.cpp
index b6066dd5685dc6..6fd9bce601a994 100644
--- a/mlir/unittests/IR/InterfaceAttachmentTest.cpp
+++ b/mlir/unittests/IR/InterfaceAttachmentTest.cpp
@@ -39,7 +39,7 @@ struct Model
   static unsigned staticGetSomeValuePlusArg(unsigned arg) { return 42 + arg; }
 };
 
-/// External interface model for the float type. Provides non-deafult and
+/// External interface model for the float type. Provides non-default and
 /// overrides default methods.
 struct OverridingModel
     : public TestExternalTypeInterface::ExternalModel<OverridingModel,
@@ -277,7 +277,7 @@ struct TestExternalOpModel
   }
 };
 
-/// External interface model for the func operation. Provides non-deafult and
+/// External interface model for the func operation. Provides non-default and
 /// overrides default methods.
 struct TestExternalOpOverridingModel
     : public TestExternalOpInterface::FallbackModel<
@@ -306,7 +306,7 @@ TEST(InterfaceAttachment, Operation) {
       builder.create<ModuleOp>(UnknownLoc::get(&context));
   ASSERT_FALSE(isa<TestExternalOpInterface>(moduleOp->getOperation()));
 
-  // We can attach an external interface and now the operaiton has it.
+  // We can attach an external interface and now the operation has it.
   ModuleOp::attachInterface<TestExternalOpModel>(context);
   auto iface = dyn_cast<TestExternalOpInterface>(moduleOp->getOperation());
   ASSERT_TRUE(iface != nullptr);
diff --git a/mlir/unittests/IR/OpPropertiesTest.cpp b/mlir/unittests/IR/OpPropertiesTest.cpp
index 365775d541ec3d..aa5f075e860ffb 100644
--- a/mlir/unittests/IR/OpPropertiesTest.cpp
+++ b/mlir/unittests/IR/OpPropertiesTest.cpp
@@ -16,7 +16,7 @@
 using namespace mlir;
 
 namespace {
-/// Simple structure definining a struct to define "properties" for a given
+/// Simple structure defining a struct to define "properties" for a given
 /// operation. Default values are honored when creating an operation.
 struct TestProperties {
   int a = -1;
diff --git a/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp b/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp
index b667785c16f162..eb576617ba9211 100644
--- a/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp
+++ b/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp
@@ -22,7 +22,7 @@ using namespace mlir;
 
 namespace {
 constexpr static llvm::StringLiteral kAttrName = "dltest.layout";
-constexpr static llvm::StringLiteral kEndiannesKeyName = "dltest.endianness";
+constexpr static llvm::StringLiteral kEndiannessKeyName = "dltest.endianness";
 constexpr static llvm::StringLiteral kAllocaKeyName =
     "dltest.alloca_memory_space";
 constexpr static llvm::StringLiteral kProgramKeyName =
@@ -77,8 +77,8 @@ struct CustomDataLayoutSpec
   }
   DataLayoutEntryListRef getEntries() const { return getImpl()->entries; }
   LogicalResult verifySpec(Location loc) { return success(); }
-  StringAttr getEndiannessIdentifier(MLIRContext *context) const {
-    return Builder(context).getStringAttr(kEndiannesKeyName);
+  StringAttr getEndiannesssIdentifier(MLIRContext *context) const {
+    return Builder(context).getStringAttr(kEndiannessKeyName);
   }
   StringAttr getAllocaMemorySpaceIdentifier(MLIRContext *context) const {
     return Builder(context).getStringAttr(kAllocaKeyName);
@@ -189,7 +189,7 @@ struct SingleQueryType
     return 4;
   }
 
-  Attribute getEndianness(DataLayoutEntryInterface entry) {
+  Attribute getEndiannesss(DataLayoutEntryInterface entry) {
     static bool executed = false;
     if (executed)
       llvm::report_fatal_error("repeated call");
@@ -463,7 +463,7 @@ module {}
   EXPECT_EQ(layout.getTypePreferredAlignment(IntegerType::get(&ctx, 42)), 8u);
   EXPECT_EQ(layout.getTypePreferredAlignment(Float16Type::get(&ctx)), 2u);
 
-  EXPECT_EQ(layout.getEndianness(), Attribute());
+  EXPECT_EQ(layout.getEndiannesss(), Attribute());
   EXPECT_EQ(layout.getAllocaMemorySpace(), Attribute());
   EXPECT_EQ(layout.getProgramMemorySpace(), Attribute());
   EXPECT_EQ(layout.getGlobalMemorySpace(), Attribute());
@@ -495,7 +495,7 @@ TEST(DataLayout, NullSpec) {
   EXPECT_EQ(layout.getTypeIndexBitwidth(Float16Type::get(&ctx)), std::nullopt);
   EXPECT_EQ(layout.getTypeIndexBitwidth(IndexType::get(&ctx)), 64u);
 
-  EXPECT_EQ(layout.getEndianness(), Attribute());
+  EXPECT_EQ(layout.getEndiannesss(), Attribute());
   EXPECT_EQ(layout.getAllocaMemorySpace(), Attribute());
   EXPECT_EQ(layout.getProgramMemorySpace(), Attribute());
   EXPECT_EQ(layout.getGlobalMemorySpace(), Attribute());
@@ -535,7 +535,7 @@ TEST(DataLayout, EmptySpec) {
   EXPECT_EQ(layout.getTypeIndexBitwidth(Float16Type::get(&ctx)), std::nullopt);
   EXPECT_EQ(layout.getTypeIndexBitwidth(IndexType::get(&ctx)), 64u);
 
-  EXPECT_EQ(layout.getEndianness(), Attribute());
+  EXPECT_EQ(layout.getEndiannesss(), Attribute());
   EXPECT_EQ(layout.getAllocaMemorySpace(), Attribute());
   EXPECT_EQ(layout.getProgramMemorySpace(), Attribute());
   EXPECT_EQ(layout.getGlobalMemorySpace(), Attribute());
@@ -593,7 +593,7 @@ TEST(DataLayout, SpecWithEntries) {
   EXPECT_EQ(layout.getTypePreferredAlignment(IntegerType::get(&ctx, 32)), 64u);
   EXPECT_EQ(layout.getTypePreferredAlignment(Float32Type::get(&ctx)), 64u);
 
-  EXPECT_EQ(layout.getEndianness(), Builder(&ctx).getStringAttr("little"));
+  EXPECT_EQ(layout.getEndiannesss(), Builder(&ctx).getStringAttr("little"));
   EXPECT_EQ(layout.getAllocaMemorySpace(), Builder(&ctx).getI32IntegerAttr(5));
   EXPECT_EQ(layout.getProgramMemorySpace(), Builder(&ctx).getI32IntegerAttr(3));
   EXPECT_EQ(layout.getGlobalMemorySpace(), Builder(&ctx).getI32IntegerAttr(2));
@@ -645,7 +645,7 @@ TEST(DataLayout, Caching) {
   // The second call should hit the cache. If it does not, the function in
   // SingleQueryType will be called and will abort the process.
   sum += layout.getTypeSize(SingleQueryType::get(&ctx));
-  // Make sure the complier doesn't optimize away the query code.
+  // Make sure the compiler doesn't optimize away the query code.
   EXPECT_EQ(sum, 2u);
 
   // A fresh data layout has a new cache, so the call to it should be dispatched
diff --git a/mlir/utils/emacs/mlir-lsp-client.el b/mlir/utils/emacs/mlir-lsp-client.el
index 4397a55e7206ac..8e8f1f2d0c56f2 100644
--- a/mlir/utils/emacs/mlir-lsp-client.el
+++ b/mlir/utils/emacs/mlir-lsp-client.el
@@ -1,4 +1,4 @@
-;;; mlir-lsp-clinet.el --- LSP clinet for the MLIR.
+;;; mlir-lsp-client.el --- LSP client for the MLIR.
 
 ;; Copyright (C) 2022 The MLIR Authors.
 ;;
@@ -18,7 +18,7 @@
 
 ;;; Commentary:
 
-;; LSP clinet to use with `mlir-mode' that uses `mlir-lsp-server' or any
+;; LSP client to use with `mlir-mode' that uses `mlir-lsp-server' or any
 ;; user made compatible server.
 
 ;;; Code:
diff --git a/mlir/utils/generate-test-checks.py b/mlir/utils/generate-test-checks.py
index 8faa425beace1d..fe8cda67efcc86 100755
--- a/mlir/utils/generate-test-checks.py
+++ b/mlir/utils/generate-test-checks.py
@@ -258,8 +258,8 @@ def main():
     parser.add_argument(
         "--source",
         type=str,
-        help="Print each CHECK chunk before each delimeter line in the source"
-        "file, respectively. The delimeter lines are identified by "
+        help="Print each CHECK chunk before each delimiter line in the source"
+        "file, respectively. The delimiter lines are identified by "
         "--source_delim_regex.",
     )
     parser.add_argument("--source_delim_regex", type=str, default="func @")
diff --git a/mlir/utils/spirv/gen_spirv_dialect.py b/mlir/utils/spirv/gen_spirv_dialect.py
index 78c1022428d8a1..b57f17d7e5574d 100755
--- a/mlir/utils/spirv/gen_spirv_dialect.py
+++ b/mlir/utils/spirv/gen_spirv_dialect.py
@@ -109,22 +109,22 @@ def split_list_into_sublists(items):
     Arguments:
       - items: a list of strings
     """
-    chuncks = []
+    chunks = []
     chunk = []
     chunk_len = 0
 
     for item in items:
         chunk_len += len(item) + 2
         if chunk_len > 80:
-            chuncks.append(chunk)
+            chunks.append(chunk)
             chunk = []
             chunk_len = len(item) + 2
         chunk.append(item)
 
     if len(chunk) != 0:
-        chuncks.append(chunk)
+        chunks.append(chunk)
 
-    return chuncks
+    return chunks
 
 
 def uniquify_enum_cases(lst):
diff --git a/mlir/utils/tree-sitter-mlir/dialect/arith.js b/mlir/utils/tree-sitter-mlir/dialect/arith.js
index f77e2a758edfd8..479e64b8e5b333 100644
--- a/mlir/utils/tree-sitter-mlir/dialect/arith.js
+++ b/mlir/utils/tree-sitter-mlir/dialect/arith.js
@@ -101,7 +101,7 @@ module.exports = {
                     // $predicate `,` $lhs `,` $rhs attr-dict `:` type($lhs)
                     seq(choice('arith.cmpi', 'arith.cmpf'),
                         field('predicate',
-                              choice('eq', 'ne', 'oeq', 'olt', 'ole', 'ogt',
+                              choice('eq', 'ne', 'oeq', 'old', 'ole', 'ogt',
                                      'oge', 'slt', 'sle', 'sgt', 'sge', 'ult',
                                      'ule', 'ugt', 'uge', $.string_literal)),
                         ',', field('lhs', $.value_use), ',',
diff --git a/mlir/utils/tree-sitter-mlir/queries/highlights.scm b/mlir/utils/tree-sitter-mlir/queries/highlights.scm
index 97aba2b266eca8..348b35e91ce3e5 100644
--- a/mlir/utils/tree-sitter-mlir/queries/highlights.scm
+++ b/mlir/utils/tree-sitter-mlir/queries/highlights.scm
@@ -24,7 +24,7 @@
   "eq"
   "ne"
   "oeq"
-  "olt"
+  "old"
   "ole"
   "ogt"
   "oge"
@@ -331,7 +331,7 @@
 [
   ":"
   ","
-] @punctuation.delimeter
+] @punctuation.delimiter
 
 [
   "="
diff --git a/mlir/utils/tree-sitter-mlir/test/corpus/op.txt b/mlir/utils/tree-sitter-mlir/test/corpus/op.txt
index d2914e24e65f95..8dafa63a011c1d 100644
--- a/mlir/utils/tree-sitter-mlir/test/corpus/op.txt
+++ b/mlir/utils/tree-sitter-mlir/test/corpus/op.txt
@@ -351,7 +351,7 @@ func.func @gather_like(
                           (float_type))))))))))))))
 
 ================================================================================
-tensor.collapse_shape and tensor.expand_shape without reassocation
+tensor.collapse_shape and tensor.expand_shape without reassociation
 ================================================================================
 func.func @tensor_reshape_zero_dim(%arg0 : tensor<1x1xf32>, %arg1 : tensor<f32>)
     -> (tensor<f32>, tensor<1x1xf32>) {
@@ -444,7 +444,7 @@ func.func @tensor_reshape_zero_dim(%arg0 : tensor<1x1xf32>, %arg1 : tensor<f32>)
                           (float_type))))))))))))))
 
 ================================================================================
-tensor.collapse_shape with reassocation
+tensor.collapse_shape with reassociation
 ================================================================================
 func.func @legal_collapsing_reshape_dynamic_tensor
   (%arg0: tensor<?x?x?x4x?xf32>) -> tensor<?x?x?xf32>
diff --git a/mlir/utils/tree-sitter-mlir/test/highlight/controlflow.mlir b/mlir/utils/tree-sitter-mlir/test/highlight/controlflow.mlir
index d3193838c3a738..4039faad6b623b 100644
--- a/mlir/utils/tree-sitter-mlir/test/highlight/controlflow.mlir
+++ b/mlir/utils/tree-sitter-mlir/test/highlight/controlflow.mlir
@@ -3,7 +3,7 @@ func.func @simple(i64, i1) -> i64 {
 //        ^ function
 //               ^ punctuation.bracket
 //                ^ type.builtin
-//                   ^ punctuation.delimeter
+//                   ^ punctuation.delimiter
 //                     ^ type.builtin
 //                       ^ punctuation.bracket
 //                         ^ operator
diff --git a/mlir/utils/tree-sitter-mlir/test/highlight/func.mlir b/mlir/utils/tree-sitter-mlir/test/highlight/func.mlir
index cc6f872cf7410c..c059c9a67812c2 100644
--- a/mlir/utils/tree-sitter-mlir/test/highlight/func.mlir
+++ b/mlir/utils/tree-sitter-mlir/test/highlight/func.mlir
@@ -3,9 +3,9 @@ func.func @test_addi(%arg0 : i64, %arg1 : i64) -> i64 {
 //        ^ function
 //                  ^ punctuation.bracket
 //                   ^ variable.parameter
-//                         ^ punctuation.delimeter
+//                         ^ punctuation.delimiter
 //                           ^ type.builtin
-//                              ^ punctuation.delimeter
+//                              ^ punctuation.delimiter
 //                                ^ variable.parameter
 //                                        ^ type.builtin
 //                                           ^ punctuation.bracket
diff --git a/mlir/utils/vscode/cpp-grammar.json b/mlir/utils/vscode/cpp-grammar.json
index 36063c6a5210bd..1f1135b1d51352 100644
--- a/mlir/utils/vscode/cpp-grammar.json
+++ b/mlir/utils/vscode/cpp-grammar.json
@@ -14,7 +14,7 @@
           "name": "punctuation.definition.string.begin.cpp"
         },
         "1": {
-          "name": "mlir.delimeter.raw.string.cpp"
+          "name": "mlir.delimiter.raw.string.cpp"
         }
       },
       "end": "\\)(?i:mlir)\"",
@@ -23,7 +23,7 @@
           "name": "punctuation.definition.string.end.cpp"
         },
         "1": {
-          "name": "mlir.delimeter.raw.string.cpp"
+          "name": "mlir.delimiter.raw.string.cpp"
         }
       },
       "name": "mlir.raw.string.cpp",



More information about the cfe-commits mailing list