[llvm] e5cd9bd - [NFC][TableGen] Delete extra spaces in comments (#147004)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 4 07:06:44 PDT 2025


Author: Rahul Joshi
Date: 2025-07-04T07:06:41-07:00
New Revision: e5cd9bdfea90def36df4f48186a4434306e50a00

URL: https://github.com/llvm/llvm-project/commit/e5cd9bdfea90def36df4f48186a4434306e50a00
DIFF: https://github.com/llvm/llvm-project/commit/e5cd9bdfea90def36df4f48186a4434306e50a00.diff

LOG: [NFC][TableGen] Delete extra spaces in comments (#147004)

Added: 
    

Modified: 
    llvm/include/llvm/TableGen/Record.h
    llvm/include/llvm/TableGen/SetTheory.h
    llvm/include/llvm/TableGen/StringMatcher.h
    llvm/lib/TableGen/DetailedRecordsBackend.cpp
    llvm/lib/TableGen/Main.cpp
    llvm/lib/TableGen/Record.cpp
    llvm/lib/TableGen/TGLexer.cpp
    llvm/lib/TableGen/TGLexer.h
    llvm/lib/TableGen/TGParser.cpp
    llvm/lib/TableGen/TGParser.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h
index 5849344bcb0b5..a2b86eb8e7cad 100644
--- a/llvm/include/llvm/TableGen/Record.h
+++ b/llvm/include/llvm/TableGen/Record.h
@@ -868,7 +868,7 @@ class UnOpInit final : public OpInit, public FoldingSetNode {
   UnaryOp getOpcode() const { return (UnaryOp)Opc; }
   const Init *getOperand() const { return LHS; }
 
-  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // Fold - If possible, fold this to a simpler init. Return this if not
   // possible to fold.
   const Init *Fold(const Record *CurRec, bool IsFinal = false) const;
 
@@ -940,7 +940,7 @@ class BinOpInit final : public OpInit, public FoldingSetNode {
   std::optional<bool> CompareInit(unsigned Opc, const Init *LHS,
                                   const Init *RHS) const;
 
-  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // Fold - If possible, fold this to a simpler init. Return this if not
   // possible to fold.
   const Init *Fold(const Record *CurRec) const;
 
@@ -990,7 +990,7 @@ class TernOpInit final : public OpInit, public FoldingSetNode {
   const Init *getMHS() const { return MHS; }
   const Init *getRHS() const { return RHS; }
 
-  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // Fold - If possible, fold this to a simpler init. Return this if not
   // possible to fold.
   const Init *Fold(const Record *CurRec) const;
 
@@ -1096,7 +1096,7 @@ class FoldOpInit final : public TypedInit, public FoldingSetNode {
 
   void Profile(FoldingSetNodeID &ID) const;
 
-  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // Fold - If possible, fold this to a simpler init. Return this if not
   // possible to fold.
   const Init *Fold(const Record *CurRec) const;
 
@@ -1129,7 +1129,7 @@ class IsAOpInit final : public TypedInit, public FoldingSetNode {
 
   void Profile(FoldingSetNodeID &ID) const;
 
-  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // Fold - If possible, fold this to a simpler init. Return this if not
   // possible to fold.
   const Init *Fold() const;
 
@@ -1163,7 +1163,7 @@ class ExistsOpInit final : public TypedInit, public FoldingSetNode {
 
   void Profile(FoldingSetNodeID &ID) const;
 
-  // Fold - If possible, fold this to a simpler init.  Return this if not
+  // Fold - If possible, fold this to a simpler init. Return this if not
   // possible to fold.
   const Init *Fold(const Record *CurRec, bool IsFinal = false) const;
 
@@ -1412,8 +1412,8 @@ class FieldInit final : public TypedInit {
   }
 };
 
-/// (v a, b) - Represent a DAG tree value.  DAG inits are required
-/// to have at least one value then a (possibly empty) list of arguments.  Each
+/// (v a, b) - Represent a DAG tree value. DAG inits are required
+/// to have at least one value then a (possibly empty) list of arguments. Each
 /// argument can have a name associated with it.
 class DagInit final
     : public TypedInit,

diff  --git a/llvm/include/llvm/TableGen/SetTheory.h b/llvm/include/llvm/TableGen/SetTheory.h
index 771dcff2f214c..53c78e4572927 100644
--- a/llvm/include/llvm/TableGen/SetTheory.h
+++ b/llvm/include/llvm/TableGen/SetTheory.h
@@ -7,7 +7,7 @@
 //===----------------------------------------------------------------------===//
 //
 // This file implements the SetTheory class that computes ordered sets of
-// Records from DAG expressions.  Operators for standard set operations are
+// Records from DAG expressions. Operators for standard set operations are
 // predefined, and it is possible to add special purpose set operators as well.
 //
 // The user may define named sets as Records of predefined classes. Set
@@ -112,7 +112,7 @@ class SetTheory {
   void addExpander(StringRef ClassName, std::unique_ptr<Expander>);
 
   /// addFieldExpander - Add an expander for ClassName that simply evaluates
-  /// FieldName in the Record to get the set elements.  That is all that is
+  /// FieldName in the Record to get the set elements. That is all that is
   /// needed for a class like:
   ///
   ///   class Set<dag d> {
@@ -134,7 +134,7 @@ class SetTheory {
       evaluate(*begin++, Elts, Loc);
   }
 
-  /// expand - Expand a record into a set of elements if possible.  Return a
+  /// expand - Expand a record into a set of elements if possible. Return a
   /// pointer to the expanded elements, or NULL if Set cannot be expanded
   /// further.
   const RecVec *expand(const Record *Set);

diff  --git a/llvm/include/llvm/TableGen/StringMatcher.h b/llvm/include/llvm/TableGen/StringMatcher.h
index 49769883a98b4..70554de1a6f17 100644
--- a/llvm/include/llvm/TableGen/StringMatcher.h
+++ b/llvm/include/llvm/TableGen/StringMatcher.h
@@ -26,7 +26,7 @@ class raw_ostream;
 /// simple switch tree to classify the input string.
 ///
 /// If a match is found, the code in Matches[i].second is executed; control must
-/// not exit this code fragment.  If nothing matches, execution falls through.
+/// not exit this code fragment. If nothing matches, execution falls through.
 class StringMatcher {
 public:
   using StringPair = std::pair<std::string, std::string>;

diff  --git a/llvm/lib/TableGen/DetailedRecordsBackend.cpp b/llvm/lib/TableGen/DetailedRecordsBackend.cpp
index cf697599e53a2..1ed64356b7c62 100644
--- a/llvm/lib/TableGen/DetailedRecordsBackend.cpp
+++ b/llvm/lib/TableGen/DetailedRecordsBackend.cpp
@@ -1,4 +1,4 @@
-//===- DetailedRecordBackend.cpp - Detailed Records Report      -*- C++ -*-===//
+//===- DetailedRecordBackend.cpp - Detailed Records Report ------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This Tablegen backend prints a report that includes all the global 
+// This Tablegen backend prints a report that includes all the global
 // variables, classes, and records in complete detail. It includes more
 // detail than the default TableGen printer backend.
 //

diff  --git a/llvm/lib/TableGen/Main.cpp b/llvm/lib/TableGen/Main.cpp
index ea716215e0679..f545706d6fe30 100644
--- a/llvm/lib/TableGen/Main.cpp
+++ b/llvm/lib/TableGen/Main.cpp
@@ -8,7 +8,7 @@
 //
 // TableGen is a tool which can be used to build up a description of something,
 // then invoke one or more "tablegen backends" to emit information about the
-// description in some predefined format.  In practice, this is used by the LLVM
+// description in some predefined format. In practice, this is used by the LLVM
 // code generators to automate generation of a code generator through a
 // high-level description of the target.
 //
@@ -156,7 +156,7 @@ int llvm::TableGenMain(const char *argv0,
     return 1;
 
   // Always write the depfile, even if the main output hasn't changed.
-  // If it's missing, Ninja considers the output dirty.  If this was below
+  // If it's missing, Ninja considers the output dirty. If this was below
   // the early exit below and someone deleted the .inc.d file but not the .inc
   // file, tablegen would never write the depfile.
   if (!DependFilename.empty()) {

diff  --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index 7f2ed77a74099..1f3e5dc68f1d6 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -500,7 +500,7 @@ const Init *BitsInit::convertInitializerTo(const RecTy *Ty) const {
   }
 
   if (auto *BRT = dyn_cast<BitsRecTy>(Ty)) {
-    // If the number of bits is right, return it.  Otherwise we need to expand
+    // If the number of bits is right, return it. Otherwise we need to expand
     // or truncate.
     if (getNumBits() != BRT->getNumBits()) return nullptr;
     return this;
@@ -944,7 +944,7 @@ const Init *UnOpInit::Fold(const Record *CurRec, bool IsFinal) const {
   case TAIL:
     if (const auto *LHSl = dyn_cast<ListInit>(LHS)) {
       assert(!LHSl->empty() && "Empty list in tail");
-      // Note the slice(1).  We can't just pass the result of getElements()
+      // Note the slice(1). We can't just pass the result of getElements()
       // directly.
       return ListInit::get(LHSl->getElements().slice(1),
                            LHSl->getElementType());
@@ -2921,16 +2921,16 @@ void Record::setName(const Init *NewName) {
   Name = NewName;
   checkName();
   // DO NOT resolve record values to the name at this point because
-  // there might be default values for arguments of this def.  Those
+  // there might be default values for arguments of this def. Those
   // arguments might not have been resolved yet so we don't want to
   // prematurely assume values for those arguments were not passed to
   // this def.
   //
   // Nonetheless, it may be that some of this Record's values
-  // reference the record name.  Indeed, the reason for having the
-  // record name be an Init is to provide this flexibility.  The extra
+  // reference the record name. Indeed, the reason for having the
+  // record name be an Init is to provide this flexibility. The extra
   // resolve steps after completely instantiating defs takes care of
-  // this.  See TGParser::ParseDef and TGParser::ParseDefm.
+  // this. See TGParser::ParseDef and TGParser::ParseDefm.
 }
 
 void Record::resolveReferences(Resolver &R, const RecordVal *SkipVal) {

diff  --git a/llvm/lib/TableGen/TGLexer.cpp b/llvm/lib/TableGen/TGLexer.cpp
index 46487cba9453e..c8e020d791e09 100644
--- a/llvm/lib/TableGen/TGLexer.cpp
+++ b/llvm/lib/TableGen/TGLexer.cpp
@@ -102,7 +102,7 @@ SMRange TGLexer::getLocRange() const {
 }
 
 /// ReturnError - Set the error to the specified string at the specified
-/// location.  This is defined to always return tgtok::Error.
+/// location. This is defined to always return tgtok::Error.
 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) {
   PrintError(Loc, Msg);
   return tgtok::Error;
@@ -116,7 +116,7 @@ bool TGLexer::processEOF() {
   SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer);
   if (ParentIncludeLoc != SMLoc()) {
     // If prepExitInclude() detects a problem with the preprocessing
-    // control stack, it will return false.  Pretend that we reached
+    // control stack, it will return false. Pretend that we reached
     // the final EOF and stop lexing more tokens by returning false
     // to LexToken().
     if (!prepExitInclude(false))
@@ -147,7 +147,7 @@ int TGLexer::getNextChar() {
 
   case 0: {
     // A NUL character in the stream is either the end of the current buffer or
-    // a spurious NUL in the file.  Disambiguate that here.
+    // a spurious NUL in the file. Disambiguate that here.
     if (CurPtr - 1 == CurBuf.end()) {
       --CurPtr; // Arrange for another call to return EOF again.
       return EOF;
@@ -160,7 +160,7 @@ int TGLexer::getNextChar() {
   case '\n':
   case '\r':
     // Handle the newline character by ignoring it and incrementing the line
-    // count.  However, be careful about 'dos style' files with \n\r in them.
+    // count. However, be careful about 'dos style' files with \n\r in them.
     // Only treat a \n\r or \r\n as a single line.
     if ((*CurPtr == '\n' || (*CurPtr == '\r')) &&
         *CurPtr != CurChar)
@@ -259,7 +259,7 @@ tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
     int NextChar = 0;
     if (isDigit(CurChar)) {
       // Allow identifiers to start with a number if it is followed by
-      // an identifier.  This can happen with paste operations like
+      // an identifier. This can happen with paste operations like
       // foo#8i.
       int i = 0;
       do {
@@ -422,7 +422,7 @@ tgtok::TokKind TGLexer::LexIdentifier() {
   return Kind;
 }
 
-/// LexInclude - We just read the "include" token.  Get the string token that
+/// LexInclude - We just read the "include" token. Get the string token that
 /// comes next and enter the include.
 bool TGLexer::LexInclude() {
   // The token after the include must be a string.
@@ -461,7 +461,7 @@ void TGLexer::SkipBCPLComment() {
   CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos;
 }
 
-/// SkipCComment - This skips C-style /**/ comments.  The only 
diff erence from C
+/// SkipCComment - This skips C-style /**/ comments. The only 
diff erence from C
 /// is that we allow nesting.
 bool TGLexer::SkipCComment() {
   ++CurPtr;  // skip the star.
@@ -554,8 +554,8 @@ tgtok::TokKind TGLexer::LexNumber() {
   return Base == 2 ? tgtok::BinaryIntVal : tgtok::IntVal;
 }
 
-/// LexBracket - We just read '['.  If this is a code block, return it,
-/// otherwise return the bracket.  Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
+/// LexBracket - We just read '['. If this is a code block, return it,
+/// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
 tgtok::TokKind TGLexer::LexBracket() {
   if (CurPtr[0] != '{')
     return tgtok::l_square;
@@ -687,7 +687,7 @@ tgtok::TokKind TGLexer::prepIsDirective() const {
         NextChar == '\n' ||
         // It looks like TableGen does not support '\r' as the actual
         // carriage return, e.g. getNextChar() treats a single '\r'
-        // as '\n'.  So we do the same here.
+        // as '\n'. So we do the same here.
         NextChar == '\r')
       return Kind;
 
@@ -700,7 +700,7 @@ tgtok::TokKind TGLexer::prepIsDirective() const {
     //     #define/**/ AND #define//
     //
     // These cases will be reported as incorrect after calling
-    // prepLexMacroName().  We could have supported C-style comments
+    // prepLexMacroName(). We could have supported C-style comments
     // after #ifdef/#define, but this would complicate the code
     // for little benefit.
     if (NextChar == '/') {
@@ -733,7 +733,7 @@ void TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) {
 
 tgtok::TokKind TGLexer::lexPreprocessor(tgtok::TokKind Kind,
                                         bool ReturnNextLiveToken) {
-  // We must be looking at a preprocessing directive.  Eat it!
+  // We must be looking at a preprocessing directive. Eat it!
   prepEatPreprocessorDirective(Kind);
 
   if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) {
@@ -879,7 +879,7 @@ bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
     tgtok::TokKind Kind = prepIsDirective();
 
     // If we did not find a preprocessing directive or it is #define,
-    // then just skip to the next line.  We do not have to do anything
+    // then just skip to the next line. We do not have to do anything
     // for #define in the line-skipping mode.
     if (Kind == tgtok::Error || Kind == tgtok::Define)
       continue;
@@ -909,7 +909,7 @@ bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
   } while (CurPtr != CurBuf.end());
 
   // We have reached the end of the file, but never left the lines-skipping
-  // mode.  This means there is no matching #endif.
+  // mode. This means there is no matching #endif.
   prepReportPreprocessorStackError();
   return false;
 }
@@ -939,8 +939,8 @@ bool TGLexer::prepSkipLineBegin() {
         // Skip C-style comment.
         // Note that we do not care about skipping the C++-style comments.
         // If the line contains "//", it may not contain any processable
-        // preprocessing directive.  Just return CurPtr pointing to
-        // the first '/' in this case.  We also do not care about
+        // preprocessing directive. Just return CurPtr pointing to
+        // the first '/' in this case. We also do not care about
         // incorrect symbols after the first '/' - we are in lines-skipping
         // mode, so incorrect code is allowed to some extent.
 
@@ -968,7 +968,7 @@ bool TGLexer::prepSkipLineBegin() {
     ++CurPtr;
   }
 
-  // We have reached the end of the file.  Return to the lines skipping
+  // We have reached the end of the file. Return to the lines skipping
   // code, and allow it to handle the EOF as needed.
   return true;
 }
@@ -994,7 +994,7 @@ bool TGLexer::prepSkipDirectiveEnd() {
         SkipBCPLComment();
       } else if (NextChar == '*') {
         // When we are skipping C-style comment at the end of a preprocessing
-        // directive, we can skip several lines.  If any meaningful TD token
+        // directive, we can skip several lines. If any meaningful TD token
         // follows the end of the C-style comment on the same line, it will
         // be considered as an invalid usage of TD token.
         // For example, we want to forbid usages like this one:
@@ -1003,7 +1003,7 @@ bool TGLexer::prepSkipDirectiveEnd() {
         //     #define MACRO /* This macro is used
         //                      to ... */ class Class {}
         // One can argue that this should be allowed, but it does not seem
-        // to be worth of the complication.  Moreover, this matches
+        // to be worth of the complication. Moreover, this matches
         // the C preprocessor behavior.
 
         // Set TokStart to the beginning of the comment to enable proper

diff  --git a/llvm/lib/TableGen/TGLexer.h b/llvm/lib/TableGen/TGLexer.h
index a5b10b37e9886..ed7d8f3baae59 100644
--- a/llvm/lib/TableGen/TGLexer.h
+++ b/llvm/lib/TableGen/TGLexer.h
@@ -61,7 +61,7 @@ enum TokKind {
   // Integer value.
   IntVal,
 
-  // Binary constant.  Note that these are sized according to the number of
+  // Binary constant. Note that these are sized according to the number of
   // bits given.
   BinaryIntVal,
 
@@ -280,7 +280,7 @@ class TGLexer {
   //
   // An ordered list of preprocessing controls defined by #ifdef/#else
   // directives that are in effect currently is called preprocessing
-  // control stack.  It is represented as a vector of PreprocessorControlDesc's.
+  // control stack. It is represented as a vector of PreprocessorControlDesc's.
   //
   // The control stack is updated according to the following rules:
   //
@@ -321,9 +321,9 @@ class TGLexer {
   //     EOF
   //
   // To do this, we clear the preprocessing control stack on entry
-  // to each of the included file.  PrepIncludeStack is used to store
+  // to each of the included file. PrepIncludeStack is used to store
   // preprocessing control stacks for the current file and all its
-  // parent files.  The back() element is the preprocessing control
+  // parent files. The back() element is the preprocessing control
   // stack for the current file.
   SmallVector<SmallVector<PreprocessorControlDesc>> PrepIncludeStack;
 
@@ -332,7 +332,7 @@ class TGLexer {
   //
   // If IncludeStackMustBeEmpty is true, the include stack must be empty
   // after the popping, otherwise, the include stack must not be empty
-  // after the popping.  Basically, the include stack must be empty
+  // after the popping. Basically, the include stack must be empty
   // only if we exit the "top-level" file (i.e. finish lexing).
   //
   // The method returns false, if the current preprocessing control stack
@@ -340,8 +340,8 @@ class TGLexer {
   // true - otherwise.
   bool prepExitInclude(bool IncludeStackMustBeEmpty);
 
-  // Look ahead for a preprocessing directive starting from CurPtr.  The caller
-  // must only call this method, if *(CurPtr - 1) is '#'.  If the method matches
+  // Look ahead for a preprocessing directive starting from CurPtr. The caller
+  // must only call this method, if *(CurPtr - 1) is '#'. If the method matches
   // a preprocessing directive word followed by a whitespace, then it returns
   // one of the internal token kinds, i.e. Ifdef, Else, Endif, Define.
   //
@@ -353,26 +353,26 @@ class TGLexer {
   //
   // We use look-ahead prepIsDirective() and prepEatPreprocessorDirective()
   // to avoid adjusting CurPtr before we are sure that '#' is followed
-  // by a preprocessing directive.  If it is not, then we fall back to
+  // by a preprocessing directive. If it is not, then we fall back to
   // tgtok::paste interpretation of '#'.
   void prepEatPreprocessorDirective(tgtok::TokKind Kind);
 
   // The main "exit" point from the token parsing to preprocessor.
   //
   // The method is called for CurPtr, when prepIsDirective() returns
-  // true.  The first parameter matches the result of prepIsDirective(),
+  // true. The first parameter matches the result of prepIsDirective(),
   // denoting the actual preprocessor directive to be processed.
   //
   // If the preprocessing directive disables the tokens processing, e.g.:
   //     #ifdef NAME // NAME is undefined
   // then lexPreprocessor() enters the lines-skipping mode.
   // In this mode, it does not parse any tokens, because the code under
-  // the #ifdef may not even be a correct tablegen code.  The preprocessor
+  // the #ifdef may not even be a correct tablegen code. The preprocessor
   // looks for lines containing other preprocessing directives, which
-  // may be prepended with whitespaces and C-style comments.  If the line
+  // may be prepended with whitespaces and C-style comments. If the line
   // does not contain a preprocessing directive, it is skipped completely.
   // Otherwise, the preprocessing directive is processed by recursively
-  // calling lexPreprocessor().  The processing of the encountered
+  // calling lexPreprocessor(). The processing of the encountered
   // preprocessing directives includes updating preprocessing control stack
   // and adding new macros into DefinedMacros set.
   //
@@ -390,38 +390,38 @@ class TGLexer {
 
   // Worker method for lexPreprocessor() to skip lines after some
   // preprocessing directive up to the buffer end or to the directive
-  // that re-enables token processing.  The method returns true
+  // that re-enables token processing. The method returns true
   // upon processing the next directive that re-enables tokens
-  // processing.  False is returned if an error was encountered.
+  // processing. False is returned if an error was encountered.
   //
   // Note that prepSkipRegion() calls lexPreprocessor() to process
-  // encountered preprocessing directives.  In this case, the second
-  // parameter to lexPreprocessor() is set to false.  Being passed
+  // encountered preprocessing directives. In this case, the second
+  // parameter to lexPreprocessor() is set to false. Being passed
   // false ReturnNextLiveToken, lexPreprocessor() must never call
-  // prepSkipRegion().  We assert this by passing ReturnNextLiveToken
+  // prepSkipRegion(). We assert this by passing ReturnNextLiveToken
   // to prepSkipRegion() and checking that it is never set to false.
   bool prepSkipRegion(bool MustNeverBeFalse);
 
-  // Lex name of the macro after either #ifdef or #define.  We could have used
+  // Lex name of the macro after either #ifdef or #define. We could have used
   // LexIdentifier(), but it has special handling of "include" word, which
-  // could result in awkward diagnostic errors.  Consider:
+  // could result in awkward diagnostic errors. Consider:
   // ----
   // #ifdef include
   // class ...
   // ----
   // LexIdentifier() will engage LexInclude(), which will complain about
-  // missing file with name "class".  Instead, prepLexMacroName() will treat
+  // missing file with name "class". Instead, prepLexMacroName() will treat
   // "include" as a normal macro name.
   //
   // On entry, CurPtr points to the end of a preprocessing directive word.
   // The method allows for whitespaces between the preprocessing directive
-  // and the macro name.  The allowed whitespaces are ' ' and '\t'.
+  // and the macro name. The allowed whitespaces are ' ' and '\t'.
   //
   // If the first non-whitespace symbol after the preprocessing directive
   // is a valid start symbol for an identifier (i.e. [a-zA-Z_]), then
   // the method updates TokStart to the position of the first non-whitespace
   // symbol, sets CurPtr to the position of the macro name's last symbol,
-  // and returns a string reference to the macro name.  Otherwise,
+  // and returns a string reference to the macro name. Otherwise,
   // TokStart is set to the first non-whitespace symbol after the preprocessing
   // directive, and the method returns an empty string reference.
   //
@@ -429,10 +429,10 @@ class TGLexer {
   // the preprocessing directive.
   StringRef prepLexMacroName();
 
-  // Skip any whitespaces starting from CurPtr.  The method is used
+  // Skip any whitespaces starting from CurPtr. The method is used
   // only in the lines-skipping mode to find the first non-whitespace
-  // symbol after or at CurPtr.  Allowed whitespaces are ' ', '\t', '\n'
-  // and '\r'.  The method skips C-style comments as well, because
+  // symbol after or at CurPtr. Allowed whitespaces are ' ', '\t', '\n'
+  // and '\r'. The method skips C-style comments as well, because
   // it is used to find the beginning of the preprocessing directive.
   // If we do not handle C-style comments the following code would
   // result in incorrect detection of a preprocessing directive:
@@ -445,13 +445,13 @@ class TGLexer {
   //        second line comment */ #ifdef NAME
   //
   // The method returns true upon reaching the first non-whitespace symbol
-  // or EOF, CurPtr is set to point to this symbol.  The method returns false,
+  // or EOF, CurPtr is set to point to this symbol. The method returns false,
   // if an error occurred during skipping of a C-style comment.
   bool prepSkipLineBegin();
 
   // Skip any whitespaces or comments after a preprocessing directive.
   // The method returns true upon reaching either end of the line
-  // or end of the file.  If there is a multiline C-style comment
+  // or end of the file. If there is a multiline C-style comment
   // after the preprocessing directive, the method skips
   // the comment, so the final CurPtr may point to one of the next lines.
   // The method returns false, if an error occurred during skipping
@@ -459,7 +459,7 @@ class TGLexer {
   // after the preprocessing directive.
   //
   // The method maybe called both during lines-skipping and tokens
-  // processing.  It actually verifies that only whitespaces or/and
+  // processing. It actually verifies that only whitespaces or/and
   // comments follow a preprocessing directive.
   //
   // After the execution of this mehod, CurPtr points either to new line
@@ -475,7 +475,7 @@ class TGLexer {
   bool prepIsProcessingEnabled();
 
   // Report an error, if we reach EOF with non-empty preprocessing control
-  // stack.  This means there is no matching #endif for the previous
+  // stack. This means there is no matching #endif for the previous
   // #ifdef/#else.
   void prepReportPreprocessorStackError();
 };

diff  --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp
index 87a1fb64943c4..62c5355654149 100644
--- a/llvm/lib/TableGen/TGParser.cpp
+++ b/llvm/lib/TableGen/TGParser.cpp
@@ -240,17 +240,15 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const Init *ValName,
     return Error(Loc, "Value '" + ValName->getAsUnquotedString() +
                  "' unknown!");
 
-  // Do not allow assignments like 'X = X'.  This will just cause infinite loops
+  // Do not allow assignments like 'X = X'. This will just cause infinite loops
   // in the resolution machinery.
   if (BitList.empty())
     if (const auto *VI = dyn_cast<VarInit>(V))
       if (VI->getNameInit() == ValName && !AllowSelfAssignment)
         return Error(Loc, "Recursion / self-assignment forbidden");
 
-  // If we are assigning to a subset of the bits in the value... then we must be
-  // assigning to a field of BitsRecTy, which must have a BitsInit
-  // initializer.
-  //
+  // If we are assigning to a subset of the bits in the value we must be
+  // assigning to a field of BitsRecTy, which must have a BitsInit initializer.
   if (!BitList.empty()) {
     const auto *CurVal = dyn_cast<BitsInit>(RV->getValue());
     if (!CurVal)
@@ -427,9 +425,9 @@ bool TGParser::resolve(const ForeachLoop &Loop, SubstStack &Substs,
   const Init *List = Loop.ListValue->resolveReferences(R);
 
   // For if-then-else blocks, we lower to a foreach loop whose list is a
-  // ternary selection between lists of 
diff erent length.  Since we don't
+  // ternary selection between lists of 
diff erent length. Since we don't
   // have a means to track variable length record lists, we *must* resolve
-  // the condition here.  We want to defer final resolution of the arms
+  // the condition here. We want to defer final resolution of the arms
   // until the resulting records are finalized.
   // e.g. !if(!exists<SchedWrite>("__does_not_exist__"), [1], [])
   if (const auto *TI = dyn_cast<TernOpInit>(List);
@@ -700,7 +698,7 @@ const Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
   return Name;
 }
 
-/// ParseClassID - Parse and resolve a reference to a class name.  This returns
+/// ParseClassID - Parse and resolve a reference to a class name. This returns
 /// null on error.
 ///
 ///    ClassID ::= ID
@@ -788,7 +786,7 @@ ParseSubClassReference(Record *CurRec, bool isDefm) {
 }
 
 /// ParseSubMultiClassReference - Parse a reference to a subclass or to a
-/// templated submulticlass.  This returns a SubMultiClassRefTy with a null
+/// templated submulticlass. This returns a SubMultiClassRefTy with a null
 /// Record* on error.
 ///
 ///  SubMultiClassRef ::= MultiClassID
@@ -1078,7 +1076,7 @@ bool TGParser::ParseOptionalBitList(SmallVectorImpl<unsigned> &Ranges) {
   return false;
 }
 
-/// ParseType - Parse and return a tblgen type.  This returns null on error.
+/// ParseType - Parse and return a tblgen type. This returns null on error.
 ///
 ///   Type ::= STRING                       // string type
 ///   Type ::= CODE                         // code type
@@ -1180,7 +1178,7 @@ const Init *TGParser::ParseIDValue(Record *CurRec, const StringInit *Name,
   return nullptr;
 }
 
-/// ParseOperation - Parse an operator.  This returns null on error.
+/// ParseOperation - Parse an operator. This returns null on error.
 ///
 /// Operation ::= XOperator ['<' Type '>'] '(' Args ')'
 ///
@@ -2253,7 +2251,7 @@ const Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
   }
 }
 
-/// ParseOperatorType - Parse a type for an operator.  This returns
+/// ParseOperatorType - Parse a type for an operator. This returns
 /// null on error.
 ///
 /// OperatorType ::= '<' Type '>'
@@ -2670,7 +2668,7 @@ const Init *TGParser::ParseOperationCond(Record *CurRec,
   return CondOpInit::get(Case, Val, Type)->Fold(CurRec);
 }
 
-/// ParseSimpleValue - Parse a tblgen value.  This returns null on error.
+/// ParseSimpleValue - Parse a tblgen value. This returns null on error.
 ///
 ///   SimpleValue ::= IDValue
 ///   SimpleValue ::= INTVAL
@@ -2803,7 +2801,7 @@ const Init *TGParser::ParseSimpleValue(Record *CurRec, const RecTy *ItemType,
     SmallVector<const Init *, 16> NewBits;
 
     // As we parse { a, b, ... }, 'a' is the highest bit, but we parse it
-    // first.  We'll first read everything in to a vector, then we can reverse
+    // first. We'll first read everything in to a vector, then we can reverse
     // it to get the bits in the correct order for the BitsInit value.
     for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
       // FIXME: The following two loops would not be duplicated
@@ -3319,10 +3317,10 @@ bool TGParser::ParseTemplateArgValueList(
 }
 
 /// ParseDeclaration - Read a declaration, returning the name of field ID, or an
-/// empty string on error.  This can happen in a number of 
diff erent contexts,
+/// empty string on error. This can happen in a number of 
diff erent contexts,
 /// including within a def or in the template args for a class (in which case
 /// CurRec will be non-null) and within the template args for a multiclass (in
-/// which case CurRec will be null, but CurMultiClass will be set).  This can
+/// which case CurRec will be null, but CurMultiClass will be set). This can
 /// also happen within a def that is within a multiclass, which will set both
 /// CurRec and CurMultiClass.
 ///
@@ -3384,7 +3382,7 @@ const Init *TGParser::ParseDeclaration(Record *CurRec,
     if (!Val ||
         SetValue(CurRec, ValLoc, DeclName, {}, Val,
                  /*AllowSelfAssignment=*/false, /*OverrideDefLoc=*/false)) {
-      // Return the name, even if an error is thrown.  This is so that we can
+      // Return the name, even if an error is thrown. This is so that we can
       // continue to make some progress, even without the value having been
       // initialized.
       return DeclName;
@@ -3395,7 +3393,7 @@ const Init *TGParser::ParseDeclaration(Record *CurRec,
 }
 
 /// ParseForeachDeclaration - Read a foreach declaration, returning
-/// the name of the declared object or a NULL Init on error.  Return
+/// the name of the declared object or a NULL Init on error. Return
 /// the name of the parsed initializer list through ForeachListName.
 ///
 ///  ForeachDeclaration ::= ID '=' '{' RangeList '}'
@@ -3477,7 +3475,7 @@ TGParser::ParseForeachDeclaration(const Init *&ForeachListValue) {
 }
 
 /// ParseTemplateArgList - Read a template argument list, which is a non-empty
-/// sequence of template-declarations in <>'s.  If CurRec is non-null, these are
+/// sequence of template-declarations in <>'s. If CurRec is non-null, these are
 /// template args for a class. If null, these are the template args for a
 /// multiclass.
 ///
@@ -3578,7 +3576,7 @@ bool TGParser::ParseBodyItem(Record *CurRec) {
   return SetValue(CurRec, IdLoc, FieldName, BitList, Val);
 }
 
-/// ParseBody - Read the body of a class or def.  Return true on error, false on
+/// ParseBody - Read the body of a class or def. Return true on error, false on
 /// success.
 ///
 ///   Body     ::= ';'
@@ -3641,8 +3639,8 @@ bool TGParser::ApplyLetStack(RecordsEntry &Entry) {
   return false;
 }
 
-/// ParseObjectBody - Parse the body of a def or class.  This consists of an
-/// optional ClassList followed by a Body.  CurRec is the current def or class
+/// ParseObjectBody - Parse the body of a def or class. This consists of an
+/// optional ClassList followed by a Body. CurRec is the current def or class
 /// that is being parsed.
 ///
 ///   ObjectBody      ::= BaseClassList Body
@@ -3841,8 +3839,8 @@ bool TGParser::ParseDefvar(Record *CurRec) {
   return false;
 }
 
-/// ParseForeach - Parse a for statement.  Return the record corresponding
-/// to it.  This returns true on error.
+/// ParseForeach - Parse a for statement. Return the record corresponding
+/// to it. This returns true on error.
 ///
 ///   Foreach ::= FOREACH Declaration IN '{ ObjectList '}'
 ///   Foreach ::= FOREACH Declaration IN Object
@@ -4129,7 +4127,7 @@ void TGParser::ParseLetList(SmallVectorImpl<LetRecord> &Result) {
   } while (consume(tgtok::comma));
 }
 
-/// ParseTopLevelLet - Parse a 'let' at top level.  This can be a couple of
+/// ParseTopLevelLet - Parse a 'let' at top level. This can be a couple of
 /// 
diff erent related productions. This works inside multiclasses too.
 ///
 ///   Object ::= LET LetList IN '{' ObjectList '}'

diff  --git a/llvm/lib/TableGen/TGParser.h b/llvm/lib/TableGen/TGParser.h
index 017cc5fff683a..2a5a1925343cf 100644
--- a/llvm/lib/TableGen/TGParser.h
+++ b/llvm/lib/TableGen/TGParser.h
@@ -161,10 +161,10 @@ class TGParser {
   // Record tracker
   RecordKeeper &Records;
 
-  // A "named boolean" indicating how to parse identifiers.  Usually
+  // A "named boolean" indicating how to parse identifiers. Usually
   // identifiers map to some existing object but in special cases
   // (e.g. parsing def names) no such object exists yet because we are
-  // in the middle of creating in.  For those situations, allow the
+  // in the middle of creating in. For those situations, allow the
   // parser to ignore missing object errors.
   enum IDParseMode {
     ParseValueMode,   // We are parsing a value we expect to look up.
@@ -183,7 +183,7 @@ class TGParser {
         NoWarnOnUnusedTemplateArgs(NoWarnOnUnusedTemplateArgs),
         TrackReferenceLocs(TrackReferenceLocs) {}
 
-  /// ParseFile - Main entrypoint for parsing a tblgen file.  These parser
+  /// ParseFile - Main entrypoint for parsing a tblgen file. These parser
   /// routines return true on error, or false on success.
   bool ParseFile();
 


        


More information about the llvm-commits mailing list