[Mlir-commits] [mlir] 91d2ecf - [NFC] Fix some typos in libc and mlir comments (#133374)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Mar 28 12:52:40 PDT 2025


Author: Qinkun Bao
Date: 2025-03-28T15:52:37-04:00
New Revision: 91d2ecf0d563b03d75380375e8ac26a291bed9d7

URL: https://github.com/llvm/llvm-project/commit/91d2ecf0d563b03d75380375e8ac26a291bed9d7
DIFF: https://github.com/llvm/llvm-project/commit/91d2ecf0d563b03d75380375e8ac26a291bed9d7.diff

LOG: [NFC] Fix some typos in libc and mlir comments (#133374)

Added: 
    

Modified: 
    libc/src/__support/CPP/atomic.h
    mlir/include/mlir/Analysis/Presburger/Simplex.h
    mlir/include/mlir/AsmParser/AsmParser.h
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
    mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
    mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
    mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
    mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
    mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
    mlir/test/mlir-tblgen/op-properties-predicates.td

Removed: 
    


################################################################################
diff  --git a/libc/src/__support/CPP/atomic.h b/libc/src/__support/CPP/atomic.h
index f21755293102e..2f00b3ed32811 100644
--- a/libc/src/__support/CPP/atomic.h
+++ b/libc/src/__support/CPP/atomic.h
@@ -97,7 +97,7 @@ template <typename T> struct Atomic {
 
   LIBC_INLINE constexpr Atomic() = default;
 
-  // Intializes the value without using atomic operations.
+  // Initializes the value without using atomic operations.
   LIBC_INLINE constexpr Atomic(value_type v) : val(v) {}
 
   LIBC_INLINE Atomic(const Atomic &) = delete;

diff  --git a/mlir/include/mlir/Analysis/Presburger/Simplex.h b/mlir/include/mlir/Analysis/Presburger/Simplex.h
index 4c40c4cdcb655..d89bbe37a6b3e 100644
--- a/mlir/include/mlir/Analysis/Presburger/Simplex.h
+++ b/mlir/include/mlir/Analysis/Presburger/Simplex.h
@@ -344,7 +344,7 @@ class SimplexBase {
   SmallVector<UndoLogEntry, 8> undoLog;
 
   /// Holds a vector of bases. The ith saved basis is the basis that should be
-  /// restored when processing the ith occurrance of UndoLogEntry::RestoreBasis
+  /// restored when processing the ith occurrence of UndoLogEntry::RestoreBasis
   /// in undoLog. This is used by getSnapshotBasis.
   SmallVector<SmallVector<int, 8>, 8> savedBases;
 
@@ -367,7 +367,7 @@ class SimplexBase {
 ///
 /// This does not directly support negative-valued variables, so it uses the big
 /// M parameter trick to make all the variables non-negative. Basically we
-/// introduce an artifical variable M that is considered to have a value of
+/// introduce an artificial variable M that is considered to have a value of
 /// +infinity and instead of the variables x, y, z, we internally use variables
 /// M + x, M + y, M + z, which are now guaranteed to be non-negative. See the
 /// documentation for SimplexBase for more details. M is also considered to be
@@ -561,7 +561,7 @@ struct SymbolicLexOpt {
 /// negative for all values in the symbol domain, the row needs to be pivoted
 /// irrespective of the precise value of the symbols. To answer queries like
 /// "Is this symbolic sample always negative in the symbol domain?", we maintain
-/// a `LexSimplex domainSimplex` correponding to the symbol domain.
+/// a `LexSimplex domainSimplex` corresponding to the symbol domain.
 ///
 /// In other cases, it may be that the symbolic sample is violated at some
 /// values in the symbol domain and not violated at others. In this case,

diff  --git a/mlir/include/mlir/AsmParser/AsmParser.h b/mlir/include/mlir/AsmParser/AsmParser.h
index 3c1bff1fbc7f1..33daf7ca26f49 100644
--- a/mlir/include/mlir/AsmParser/AsmParser.h
+++ b/mlir/include/mlir/AsmParser/AsmParser.h
@@ -47,7 +47,7 @@ parseAsmSourceFile(const llvm::SourceMgr &sourceMgr, Block *block,
 /// not, an error diagnostic is emitted to the context and a null value is
 /// returned.
 /// If `numRead` is provided, it is set to the number of consumed characters on
-/// succesful parse. Otherwise, parsing fails if the entire string is not
+/// successful parse. Otherwise, parsing fails if the entire string is not
 /// consumed.
 /// Some internal copying can be skipped if the source string is known to be
 /// null terminated.
@@ -58,7 +58,7 @@ Attribute parseAttribute(llvm::StringRef attrStr, MLIRContext *context,
 /// This parses a single MLIR type to an MLIR context if it was valid. If not,
 /// an error diagnostic is emitted to the context.
 /// If `numRead` is provided, it is set to the number of consumed characters on
-/// succesful parse. Otherwise, parsing fails if the entire string is not
+/// successful parse. Otherwise, parsing fails if the entire string is not
 /// consumed.
 /// Some internal copying can be skipped if the source string is known to be
 /// null terminated.

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
index f9816584b4655..1af9dc6cf2061 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
@@ -41,7 +41,7 @@ module {
 
     //
     // Note: position for loose_compressed level can vary in the end,
-    // therefore we loosly check it with {{.*}}.
+    // therefore we loosely check it with {{.*}}.
     //
     // CHECK:   ---- Sparse Tensor ----
     // CHECK-NEXT: nse = 17

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
index f38b9ddfaa10e..690c4d2636a44 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
@@ -96,21 +96,21 @@ func.func @main() {
   %f0 = arith.constant 0.0e+00 : f16
   %c32 = arith.constant 32 : index
 
-  // Intialize the lhs matrix with a linspace function.
+  // Initialize the lhs matrix with a linspace function.
   scf.for %r = %c0 to %M step %c1 {
     scf.for %c = %c0 to %K step %c1 {
       %idx = func.call @compute_linspace_val(%r, %c, %K) : (index, index, index) -> f16
       memref.store %idx, %lhs[%r, %c] : !lhs_memref_type
     }
   }
-  // Intialize the rhs matrix with a linspace function.
+  // Initialize the rhs matrix with a linspace function.
   scf.for %r = %c0 to %K step %c1 {
     scf.for %c = %c0 to %N step %c1 {
       %idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f16
       memref.store %idx, %rhs[%r, %c] : !rhs_memref_type
     }
   }
-  // Intialize the rhs matrix with a linspace function.
+  // Initialize the rhs matrix with a linspace function.
   scf.for %r = %c0 to %M step %c1 {
     scf.for %c = %c0 to %N step %c1 {
       %idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f16

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
index 1eae22f0b85d0..2eef2ff8f3564 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
@@ -47,21 +47,21 @@ func.func @main() {
   %f0 = arith.constant 0.0e+00 : f32
   %c32 = arith.constant 32 : index
 
-  // Intialize the lhs matrix with a linspace function.
+  // Initialize the lhs matrix with a linspace function.
   scf.for %r = %c0 to %M step %c1 {
     scf.for %c = %c0 to %K step %c1 {
       %idx = func.call @compute_linspace_val(%r, %c, %K) : (index, index, index) -> f32
       memref.store %idx, %lhs[%r, %c] : !lhs_memref_type
     }
   }
-  // Intialize the rhs matrix with a linspace function.
+  // Initialize the rhs matrix with a linspace function.
   scf.for %r = %c0 to %K step %c1 {
     scf.for %c = %c0 to %N step %c1 {
       %idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f32
       memref.store %idx, %rhs[%r, %c] : !rhs_memref_type
     }
   }
-  // Intialize the rhs matrix with a linspace function.
+  // Initialize the rhs matrix with a linspace function.
   scf.for %r = %c0 to %M step %c1 {
     scf.for %c = %c0 to %N step %c1 {
       %idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f32

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
index 01144b3495866..2d91aa8f12b5e 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
@@ -20,7 +20,7 @@ func.func @main() {
   %c32 = arith.constant 32 : index
   %c1 = arith.constant 1 : index
 
-  // Intialize the Input matrix with the column index in each row.
+  // Initialize the Input matrix with the column index in each row.
   scf.for %arg0 = %c0 to %c16 step %c1 {
     scf.for %arg1 = %c0 to %c16 step %c1 {
       %2 = arith.index_cast %arg1 : index to i16
@@ -28,7 +28,7 @@ func.func @main() {
       memref.store %3, %0[%arg0, %arg1] : memref<16x16xf16>
     }
   }
-  // Intialize the accumulator matrix with zeros.
+  // Initialize the accumulator matrix with zeros.
   scf.for %arg0 = %c0 to %c16 step %c1 {
     scf.for %arg1 = %c0 to %c16 step %c1 {
       memref.store %f0, %22[%arg0, %arg1] : memref<16x16xf16>

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
index 3301e68e5d123..18b1c1a1b0b45 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
@@ -20,13 +20,13 @@ func.func @main() {
   %c32 = arith.constant 32 : index
   %c1 = arith.constant 1 : index
 
-  // Intialize the Input matrix with ones.
+  // Initialize the Input matrix with ones.
   scf.for %arg0 = %c0 to %c16 step %c1 {
     scf.for %arg1 = %c0 to %c16 step %c1 {
       memref.store %f1, %h0[%arg0, %arg1] : memref<16x16xf16>
     }
   }
-  // Intialize the accumulator matrix with zeros.
+  // Initialize the accumulator matrix with zeros.
   scf.for %arg0 = %c0 to %c16 step %c1 {
     scf.for %arg1 = %c0 to %c16 step %c1 {
       memref.store %f0, %h_out[%arg0, %arg1] : memref<16x16xf32>

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
index 4db9aa056e757..78895cda72c5a 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
@@ -18,13 +18,13 @@ func.func @main() {
   %c32 = arith.constant 32 : index
   %c1 = arith.constant 1 : index
 
-  // Intialize the Input matrix with ones.
+  // Initialize the Input matrix with ones.
   scf.for %arg0 = %c0 to %c16 step %c1 {
     scf.for %arg1 = %c0 to %c16 step %c1 {
       memref.store %f1, %0[%arg0, %arg1] : memref<16x16xf16>
     }
   }
-  // Intialize the accumulator matrix with zeros.
+  // Initialize the accumulator matrix with zeros.
   scf.for %arg0 = %c0 to %c16 step %c1 {
     scf.for %arg1 = %c0 to %c16 step %c1 {
       memref.store %f0, %22[%arg0, %arg1] : memref<16x16xf32>

diff  --git a/mlir/test/mlir-tblgen/op-properties-predicates.td b/mlir/test/mlir-tblgen/op-properties-predicates.td
index de59f5166d7e1..9834edd0cbb57 100644
--- a/mlir/test/mlir-tblgen/op-properties-predicates.td
+++ b/mlir/test/mlir-tblgen/op-properties-predicates.td
@@ -36,7 +36,7 @@ def OpWithPredicates : NS_Op<"op_with_predicates"> {
 }
 
 // CHECK-LABEL: OpWithPredicates::verifyInvariantsImpl()
-// Note: for test readibility, we capture [[maybe_unused]] into the variable maybe_unused
+// Note: for test readability, we capture [[maybe_unused]] into the variable maybe_unused
 // CHECK: [[maybe_unused:\[\[maybe_unused\]\]]] int64_t tblgen_scalar = this->getScalar();
 // CHECK: if (!((tblgen_scalar >= 0)))
 // CHECK-NEXT:  return emitOpError("property 'scalar' failed to satisfy constraint: non-negative int64_t");


        


More information about the Mlir-commits mailing list