[cfe-commits] r42799 - in /cfe/trunk/Lex: Lexer.cpp LiteralSupport.cpp MacroExpander.cpp PPExpressions.cpp Pragma.cpp Preprocessor.cpp

Chris Lattner sabre at nondot.org
Tue Oct 9 11:02:17 PDT 2007


Author: lattner
Date: Tue Oct  9 13:02:16 2007
New Revision: 42799

URL: http://llvm.org/viewvc/llvm-project?rev=42799&view=rev
Log:
Switch lexer/pp over to new Token::is/isNot api

Modified:
    cfe/trunk/Lex/Lexer.cpp
    cfe/trunk/Lex/LiteralSupport.cpp
    cfe/trunk/Lex/MacroExpander.cpp
    cfe/trunk/Lex/PPExpressions.cpp
    cfe/trunk/Lex/Pragma.cpp
    cfe/trunk/Lex/Preprocessor.cpp

Modified: cfe/trunk/Lex/Lexer.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/Lex/Lexer.cpp?rev=42799&r1=42798&r2=42799&view=diff

==============================================================================
--- cfe/trunk/Lex/Lexer.cpp (original)
+++ cfe/trunk/Lex/Lexer.cpp Tue Oct  9 13:02:16 2007
@@ -41,8 +41,8 @@
 
 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 
 bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
-  return getKind() == tok::identifier && 
-  getIdentifierInfo()->getObjCKeywordID() == objcKey;
+  return is(tok::identifier) && 
+         getIdentifierInfo()->getObjCKeywordID() == objcKey;
 }
 
 /// getObjCKeywordID - Return the ObjC keyword kind.
@@ -979,7 +979,7 @@
   ParsingFilename = false;
   
   // No filename?
-  if (FilenameTok.getKind() == tok::eom)
+  if (FilenameTok.is(tok::eom))
     Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
 }
 
@@ -1015,7 +1015,7 @@
       
       // Next, lex the character, which should handle the EOM transition.
       Lex(Tmp);
-      assert(Tmp.getKind() == tok::eom && "Unexpected token!");
+      assert(Tmp.is(tok::eom) && "Unexpected token!");
       
       // Finally, we're done, return the string we found.
       return Result;
@@ -1096,9 +1096,9 @@
   // Restore the lexer back to non-skipping mode.
   LexingRawMode = false;
   
-  if (Tok.getKind() == tok::eof)
+  if (Tok.is(tok::eof))
     return 2;
-  return Tok.getKind() == tok::l_paren;
+  return Tok.is(tok::l_paren);
 }
 
 

Modified: cfe/trunk/Lex/LiteralSupport.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/Lex/LiteralSupport.cpp?rev=42799&r1=42798&r2=42799&view=diff

==============================================================================
--- cfe/trunk/Lex/LiteralSupport.cpp (original)
+++ cfe/trunk/Lex/LiteralSupport.cpp Tue Oct  9 13:02:16 2007
@@ -549,7 +549,7 @@
   // literal, the result is a wide-string literal [C99 6.4.5p4].
   MaxTokenLength = StringToks[0].getLength();
   SizeBound = StringToks[0].getLength()-2;  // -2 for "".
-  AnyWide = StringToks[0].getKind() == tok::wide_string_literal;
+  AnyWide = StringToks[0].is(tok::wide_string_literal);
   
   hadError = false;
 
@@ -565,7 +565,7 @@
       MaxTokenLength = StringToks[i].getLength();
     
     // Remember if we see any wide strings.
-    AnyWide |= StringToks[i].getKind() == tok::wide_string_literal;
+    AnyWide |= StringToks[i].is(tok::wide_string_literal);
   }
   
   

Modified: cfe/trunk/Lex/MacroExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/Lex/MacroExpander.cpp?rev=42799&r1=42798&r2=42799&view=diff

==============================================================================
--- cfe/trunk/Lex/MacroExpander.cpp (original)
+++ cfe/trunk/Lex/MacroExpander.cpp Tue Oct  9 13:02:16 2007
@@ -59,7 +59,7 @@
 /// argument.
 unsigned MacroArgs::getArgLength(const Token *ArgPtr) {
   unsigned NumArgTokens = 0;
-  for (; ArgPtr->getKind() != tok::eof; ++ArgPtr)
+  for (; ArgPtr->isNot(tok::eof); ++ArgPtr)
     ++NumArgTokens;
   return NumArgTokens;
 }
@@ -75,7 +75,7 @@
   // Scan to find Arg.
   for (; Arg; ++Result) {
     assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
-    if (Result->getKind() == tok::eof)
+    if (Result->is(tok::eof))
       --Arg;
   }
   return Result;
@@ -88,7 +88,7 @@
                                      Preprocessor &PP) const {
   // If there are no identifiers in the argument list, or if the identifiers are
   // known to not be macros, pre-expansion won't modify it.
-  for (; ArgTok->getKind() != tok::eof; ++ArgTok)
+  for (; ArgTok->isNot(tok::eof); ++ArgTok)
     if (IdentifierInfo *II = ArgTok->getIdentifierInfo()) {
       if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled())
         // Return true even though the macro could be a function-like macro
@@ -124,7 +124,7 @@
   do {
     Result.push_back(Token());
     PP.Lex(Result.back());
-  } while (Result.back().getKind() != tok::eof);
+  } while (Result.back().isNot(tok::eof));
   
   // Pop the token stream off the top of the stack.  We know that the internal
   // pointer inside of it is to the "end" of the token stream, but the stack
@@ -152,7 +152,7 @@
   std::string Result = "\"";
   // FIXME: Optimize this loop to not use std::strings.
   bool isFirst = true;
-  for (; ArgToks->getKind() != tok::eof; ++ArgToks) {
+  for (; ArgToks->isNot(tok::eof); ++ArgToks) {
     const Token &Tok = *ArgToks;
     if (!isFirst && (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()))
       Result += ' ';
@@ -160,9 +160,9 @@
     
     // If this is a string or character constant, escape the token as specified
     // by 6.10.3.2p2.
-    if (Tok.getKind() == tok::string_literal ||      // "foo"
-        Tok.getKind() == tok::wide_string_literal || // L"foo"
-        Tok.getKind() == tok::char_constant) {       // 'x' and L'x'.
+    if (Tok.is(tok::string_literal) ||       // "foo"
+        Tok.is(tok::wide_string_literal) ||  // L"foo"
+        Tok.is(tok::char_constant)) {        // 'x' and L'x'.
       Result += Lexer::Stringify(PP.getSpelling(Tok));
     } else {
       // Otherwise, just append the token.
@@ -223,7 +223,7 @@
     memset(&StringifiedArgs[0], 0,
            sizeof(StringifiedArgs[0])*getNumArguments());
   }
-  if (StringifiedArgs[ArgNo].getKind() != tok::string_literal)
+  if (StringifiedArgs[ArgNo].isNot(tok::string_literal))
     StringifiedArgs[ArgNo] = StringifyArgument(getUnexpArgument(ArgNo), PP);
   return StringifiedArgs[ArgNo];
 }
@@ -320,12 +320,12 @@
     // preprocessor already verified that the following token is a macro name
     // when the #define was parsed.
     const Token &CurTok = MacroTokens[i];
-    if (CurTok.getKind() == tok::hash || CurTok.getKind() == tok::hashat) {
+    if (CurTok.is(tok::hash) || CurTok.is(tok::hashat)) {
       int ArgNo = Macro->getArgumentNum(MacroTokens[i+1].getIdentifierInfo());
       assert(ArgNo != -1 && "Token following # is not an argument?");
     
       Token Res;
-      if (CurTok.getKind() == tok::hash)  // Stringify
+      if (CurTok.is(tok::hash))  // Stringify
         Res = ActualArgs->getStringifiedArgument(ArgNo, PP);
       else {
         // 'charify': don't bother caching these.
@@ -366,8 +366,8 @@
     // Otherwise, this is a use of the argument.  Find out if there is a paste
     // (##) operator before or after the argument.
     bool PasteBefore = 
-      !ResultToks.empty() && ResultToks.back().getKind() == tok::hashhash;
-    bool PasteAfter = i+1 != e && MacroTokens[i+1].getKind() == tok::hashhash;
+      !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
+    bool PasteAfter = i+1 != e && MacroTokens[i+1].is(tok::hashhash);
     
     // If it is not the LHS/RHS of a ## operator, we must pre-expand the
     // argument and substitute the expanded tokens into the result.  This is
@@ -384,7 +384,7 @@
         ResultArgToks = ArgTok;  // Use non-preexpanded tokens.
       
       // If the arg token expanded into anything, append it.
-      if (ResultArgToks->getKind() != tok::eof) {
+      if (ResultArgToks->isNot(tok::eof)) {
         unsigned FirstResult = ResultToks.size();
         unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
         ResultToks.append(ResultArgToks, ResultArgToks+NumToks);
@@ -435,7 +435,7 @@
     
     // If this is on the RHS of a paste operator, we've already copied the
     // paste operator to the ResultToks list.  Remove it.
-    assert(PasteBefore && ResultToks.back().getKind() == tok::hashhash);
+    assert(PasteBefore && ResultToks.back().is(tok::hashhash));
     NextTokGetsSpace |= ResultToks.back().hasLeadingSpace();
     ResultToks.pop_back();
     
@@ -444,7 +444,7 @@
     // the ## was a comma, remove the comma.
     if ((unsigned)ArgNo == Macro->getNumArgs()-1 && // is __VA_ARGS__
         ActualArgs->isVarargsElidedUse() &&       // Argument elided.
-        !ResultToks.empty() && ResultToks.back().getKind() == tok::comma) {
+        !ResultToks.empty() && ResultToks.back().is(tok::comma)) {
       // Never add a space, even if the comma, ##, or arg had a space.
       NextTokGetsSpace = false;
       ResultToks.pop_back();
@@ -492,7 +492,7 @@
   Tok = MacroTokens[CurToken++];
   
   // If this token is followed by a token paste (##) operator, paste the tokens!
-  if (!isAtEnd() && MacroTokens[CurToken].getKind() == tok::hashhash)
+  if (!isAtEnd() && MacroTokens[CurToken].is(tok::hashhash))
     PasteTokens(Tok);
 
   // The token's current location indicate where the token was lexed from.  We
@@ -566,10 +566,9 @@
     
     // Avoid testing /*, as the lexer would think it is the start of a comment
     // and emit an error that it is unterminated.
-    if (Tok.getKind() == tok::slash && RHS.getKind() == tok::star) {
+    if (Tok.is(tok::slash) && RHS.is(tok::star)) {
       isInvalid = true;
-    } else if (Tok.getKind() == tok::identifier && 
-               RHS.getKind() == tok::identifier) {
+    } else if (Tok.is(tok::identifier) && RHS.is(tok::identifier)) {
       // Common paste case: identifier+identifier = identifier.  Avoid creating
       // a lexer and other overhead.
       PP.IncrementPasteCounter(true);
@@ -596,7 +595,7 @@
       
       // If we got an EOF token, we didn't form even ONE token.  For example, we
       // did "/ ## /" to get "//".
-      IsComplete &= Result.getKind() != tok::eof;
+      IsComplete &= Result.isNot(tok::eof);
       isInvalid = !IsComplete;
       
       // We're now done with the temporary lexer.
@@ -614,7 +613,7 @@
     }
     
     // Turn ## into 'other' to avoid # ## # from looking like a paste operator.
-    if (Result.getKind() == tok::hashhash)
+    if (Result.is(tok::hashhash))
       Result.setKind(tok::unknown);
     // FIXME: Turn __VARRGS__ into "not a token"?
     
@@ -625,12 +624,12 @@
     // Finally, replace LHS with the result, consume the RHS, and iterate.
     ++CurToken;
     Tok = Result;
-  } while (!isAtEnd() && MacroTokens[CurToken].getKind() == tok::hashhash);
+  } while (!isAtEnd() && MacroTokens[CurToken].is(tok::hashhash));
   
   // Now that we got the result token, it will be subject to expansion.  Since
   // token pasting re-lexes the result token in raw mode, identifier information
   // isn't looked up.  As such, if the result is an identifier, look up id info.
-  if (Tok.getKind() == tok::identifier) {
+  if (Tok.is(tok::identifier)) {
     // Look up the identifier info for the token.  We disabled identifier lookup
     // by saying we're skipping contents, so we need to do this manually.
     Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
@@ -644,5 +643,5 @@
   // Out of tokens?
   if (isAtEnd())
     return 2;
-  return MacroTokens[CurToken].getKind() == tok::l_paren;
+  return MacroTokens[CurToken].is(tok::l_paren);
 }

Modified: cfe/trunk/Lex/PPExpressions.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/Lex/PPExpressions.cpp?rev=42799&r1=42798&r2=42799&view=diff

==============================================================================
--- cfe/trunk/Lex/PPExpressions.cpp (original)
+++ cfe/trunk/Lex/PPExpressions.cpp Tue Oct  9 13:02:16 2007
@@ -87,7 +87,7 @@
 
     // Two options, it can either be a pp-identifier or a (.
     bool InParens = false;
-    if (PeekTok.getKind() == tok::l_paren) {
+    if (PeekTok.is(tok::l_paren)) {
       // Found a paren, remember we saw it and skip it.
       InParens = true;
       PP.LexUnexpandedToken(PeekTok);
@@ -129,7 +129,7 @@
 
     // If we are in parens, ensure we have a trailing ).
     if (InParens) {
-      if (PeekTok.getKind() != tok::r_paren) {
+      if (PeekTok.isNot(tok::r_paren)) {
         PP.Diag(PeekTok, diag::err_pp_missing_rparen);
         return true;
       }
@@ -246,13 +246,13 @@
 
     // If this is a silly value like (X), which doesn't need parens, check for
     // !(defined X).
-    if (PeekTok.getKind() == tok::r_paren) {
+    if (PeekTok.is(tok::r_paren)) {
       // Just use DT unmodified as our result.
     } else {
       if (EvaluateDirectiveSubExpr(Result, 1, PeekTok, ValueLive, PP))
         return true;
       
-      if (PeekTok.getKind() != tok::r_paren) {
+      if (PeekTok.isNot(tok::r_paren)) {
         PP.Diag(PeekTok, diag::err_pp_expected_rparen);
         return true;
       }
@@ -560,7 +560,7 @@
       break; 
     case tok::question: {
       // Parse the : part of the expression.
-      if (PeekTok.getKind() != tok::colon) {
+      if (PeekTok.isNot(tok::colon)) {
         PP.Diag(OpToken, diag::err_pp_question_without_colon);
         return true;
       }
@@ -622,7 +622,7 @@
   DefinedTracker DT;
   if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
     // Parse error, skip the rest of the macro line.
-    if (Tok.getKind() != tok::eom)
+    if (Tok.isNot(tok::eom))
       DiscardUntilEndOfDirective();
     return false;
   }
@@ -630,7 +630,7 @@
   // If we are at the end of the expression after just parsing a value, there
   // must be no (unparenthesized) binary operators involved, so we can exit
   // directly.
-  if (Tok.getKind() == tok::eom) {
+  if (Tok.is(tok::eom)) {
     // If the expression we parsed was of the form !defined(macro), return the
     // macro in IfNDefMacro.
     if (DT.State == DefinedTracker::NotDefinedMacro)
@@ -643,14 +643,14 @@
   // operator and the stuff after it.
   if (EvaluateDirectiveSubExpr(ResVal, 1, Tok, true, *this)) {
     // Parse error, skip the rest of the macro line.
-    if (Tok.getKind() != tok::eom)
+    if (Tok.isNot(tok::eom))
       DiscardUntilEndOfDirective();
     return false;
   }
   
   // If we aren't at the tok::eom token, something bad happened, like an extra
   // ')' token.
-  if (Tok.getKind() != tok::eom) {
+  if (Tok.isNot(tok::eom)) {
     Diag(Tok, diag::err_pp_expected_eol);
     DiscardUntilEndOfDirective();
   }

Modified: cfe/trunk/Lex/Pragma.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/Lex/Pragma.cpp?rev=42799&r1=42798&r2=42799&view=diff

==============================================================================
--- cfe/trunk/Lex/Pragma.cpp (original)
+++ cfe/trunk/Lex/Pragma.cpp Tue Oct  9 13:02:16 2007
@@ -93,13 +93,12 @@
   
   // Read the '('.
   Lex(Tok);
-  if (Tok.getKind() != tok::l_paren)
+  if (Tok.isNot(tok::l_paren))
     return Diag(PragmaLoc, diag::err__Pragma_malformed);
 
   // Read the '"..."'.
   Lex(Tok);
-  if (Tok.getKind() != tok::string_literal &&
-      Tok.getKind() != tok::wide_string_literal)
+  if (Tok.isNot(tok::string_literal) && Tok.isNot(tok::wide_string_literal))
     return Diag(PragmaLoc, diag::err__Pragma_malformed);
   
   // Remember the string.
@@ -108,7 +107,7 @@
 
   // Read the ')'.
   Lex(Tok);
-  if (Tok.getKind() != tok::r_paren)
+  if (Tok.isNot(tok::r_paren))
     return Diag(PragmaLoc, diag::err__Pragma_malformed);
   
   // The _Pragma is lexically sound.  Destringize according to C99 6.10.9.1.
@@ -194,10 +193,10 @@
     if (CurLexer) CurLexer->LexingRawMode = false;
     
     // If we reached the end of line, we're done.
-    if (Tok.getKind() == tok::eom) return;
+    if (Tok.is(tok::eom)) return;
     
     // Can only poison identifiers.
-    if (Tok.getKind() != tok::identifier) {
+    if (Tok.isNot(tok::identifier)) {
       Diag(Tok, diag::err_pp_invalid_poison);
       return;
     }
@@ -247,7 +246,7 @@
   CurLexer->LexIncludeFilename(FilenameTok);
 
   // If the token kind is EOM, the error has already been diagnosed.
-  if (FilenameTok.getKind() == tok::eom)
+  if (FilenameTok.is(tok::eom))
     return;
   
   // Reserve a buffer to get the spelling.
@@ -280,7 +279,7 @@
     // Lex tokens at the end of the message and include them in the message.
     std::string Message;
     Lex(DependencyTok);
-    while (DependencyTok.getKind() != tok::eom) {
+    while (DependencyTok.isNot(tok::eom)) {
       Message += getSpelling(DependencyTok) + " ";
       Lex(DependencyTok);
     }

Modified: cfe/trunk/Lex/Preprocessor.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/Lex/Preprocessor.cpp?rev=42799&r1=42798&r2=42799&view=diff

==============================================================================
--- cfe/trunk/Lex/Preprocessor.cpp (original)
+++ cfe/trunk/Lex/Preprocessor.cpp Tue Oct  9 13:02:16 2007
@@ -618,7 +618,7 @@
   
   Token Tok;
   LexUnexpandedToken(Tok);
-  assert(Tok.getKind() == tok::l_paren && "Error computing l-paren-ness?");
+  assert(Tok.is(tok::l_paren) && "Error computing l-paren-ness?");
   return true;
 }
 
@@ -767,7 +767,7 @@
   llvm::SmallVector<Token, 64> ArgTokens;
 
   unsigned NumActuals = 0;
-  while (Tok.getKind() == tok::comma) {
+  while (Tok.is(tok::comma)) {
     // C99 6.10.3p11: Keep track of the number of l_parens we have seen.  Note
     // that we already consumed the first one.
     unsigned NumParens = 0;
@@ -777,18 +777,18 @@
       // an argument value in a macro could expand to ',' or '(' or ')'.
       LexUnexpandedToken(Tok);
       
-      if (Tok.getKind() == tok::eof) {
+      if (Tok.is(tok::eof)) {
         Diag(MacroName, diag::err_unterm_macro_invoc);
         // Do not lose the EOF.  Return it to the client.
         MacroName = Tok;
         return 0;
-      } else if (Tok.getKind() == tok::r_paren) {
+      } else if (Tok.is(tok::r_paren)) {
         // If we found the ) token, the macro arg list is done.
         if (NumParens-- == 0)
           break;
-      } else if (Tok.getKind() == tok::l_paren) {
+      } else if (Tok.is(tok::l_paren)) {
         ++NumParens;
-      } else if (Tok.getKind() == tok::comma && NumParens == 0) {
+      } else if (Tok.is(tok::comma) && NumParens == 0) {
         // Comma ends this argument if there are more fixed arguments expected.
         if (NumFixedArgsLeft)
           break;
@@ -801,7 +801,7 @@
           return 0;
         }
         // Otherwise, continue to add the tokens to this variable argument.
-      } else if (Tok.getKind() == tok::comment && !KeepMacroComments) {
+      } else if (Tok.is(tok::comment) && !KeepMacroComments) {
         // If this is a comment token in the argument list and we're just in
         // -C mode (not -CC mode), discard the comment.
         continue;
@@ -1005,7 +1005,7 @@
 /// identifier information for the token and install it into the token.
 IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
                                                    const char *BufPtr) {
-  assert(Identifier.getKind() == tok::identifier && "Not an identifier!");
+  assert(Identifier.is(tok::identifier) && "Not an identifier!");
   assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
   
   // Look up this token, see if it is a macro, or if it is a language keyword.
@@ -1174,7 +1174,7 @@
   Token Tmp;
   do {
     LexUnexpandedToken(Tmp);
-  } while (Tmp.getKind() != tok::eom);
+  } while (Tmp.isNot(tok::eom));
 }
 
 /// isCXXNamedOperator - Returns "true" if the token is a named operator in C++.
@@ -1194,7 +1194,7 @@
   LexUnexpandedToken(MacroNameTok);
   
   // Missing macro name?
-  if (MacroNameTok.getKind() == tok::eom)
+  if (MacroNameTok.is(tok::eom))
     return Diag(MacroNameTok, diag::err_pp_missing_macro_name);
   
   IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
@@ -1235,10 +1235,10 @@
   Lex(Tmp);
   // There should be no tokens after the directive, but we allow them as an
   // extension.
-  while (Tmp.getKind() == tok::comment)  // Skip comments in -C mode.
+  while (Tmp.is(tok::comment))  // Skip comments in -C mode.
     Lex(Tmp);
   
-  if (Tmp.getKind() != tok::eom) {
+  if (Tmp.isNot(tok::eom)) {
     Diag(Tmp, diag::ext_pp_extra_tokens_at_eol, DirType);
     DiscardUntilEndOfDirective();
   }
@@ -1272,7 +1272,7 @@
     CurLexer->Lex(Tok);
     
     // If this is the end of the buffer, we have an error.
-    if (Tok.getKind() == tok::eof) {
+    if (Tok.is(tok::eof)) {
       // Emit errors for each unterminated conditional on the stack, including
       // the current one.
       while (!CurLexer->ConditionalStack.empty()) {
@@ -1286,7 +1286,7 @@
     }
     
     // If this token is not a preprocessor directive, just skip it.
-    if (Tok.getKind() != tok::hash || !Tok.isAtStartOfLine())
+    if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
       continue;
       
     // We just parsed a # character at the start of a line, so we're in
@@ -1301,7 +1301,7 @@
     
     // If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
     // something bogus), skip it.
-    if (Tok.getKind() != tok::identifier) {
+    if (Tok.isNot(tok::identifier)) {
       CurLexer->ParsingPreprocessorDirective = false;
       // Restore comment saving mode.
       CurLexer->KeepCommentMode = KeepComments;
@@ -1586,8 +1586,8 @@
   Lex(StrTok);
   
   // If the token kind isn't a string, it's a malformed directive.
-  if (StrTok.getKind() != tok::string_literal &&
-      StrTok.getKind() != tok::wide_string_literal)
+  if (StrTok.isNot(tok::string_literal) &&
+      StrTok.isNot(tok::wide_string_literal))
     return Diag(StrTok, diag::err_pp_malformed_ident);
   
   // Verify that there is nothing after the string, other than EOM.
@@ -1663,7 +1663,7 @@
   Token CurTok;
   
   PP.Lex(CurTok);
-  while (CurTok.getKind() != tok::eom) {
+  while (CurTok.isNot(tok::eom)) {
     // Append the spelling of this token to the buffer. If there was a space
     // before it, add it now.
     if (CurTok.hasLeadingSpace())
@@ -1685,7 +1685,7 @@
       FilenameBuffer.resize(PreAppendSize+ActualLen);
     
     // If we found the '>' marker, return success.
-    if (CurTok.getKind() == tok::greater)
+    if (CurTok.is(tok::greater))
       return false;
     
     PP.Lex(CurTok);
@@ -1842,7 +1842,7 @@
 
       // Lex the token after the identifier.
       LexUnexpandedToken(Tok);
-      if (Tok.getKind() != tok::r_paren) {
+      if (Tok.isNot(tok::r_paren)) {
         Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
         return true;
       }
@@ -1893,7 +1893,7 @@
         
         // Lex the token after the identifier.
         LexUnexpandedToken(Tok);
-        if (Tok.getKind() != tok::r_paren) {
+        if (Tok.isNot(tok::r_paren)) {
           Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
           return true;
         }
@@ -1918,7 +1918,7 @@
   ReadMacroName(MacroNameTok, 1);
   
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.getKind() == tok::eom)
+  if (MacroNameTok.is(tok::eom))
     return;
 
   // If we are supposed to keep comments in #defines, reenable comment saving
@@ -1939,9 +1939,9 @@
   // If this is a function-like macro definition, parse the argument list,
   // marking each of the identifiers as being used as macro arguments.  Also,
   // check other constraints on the first token of the macro body.
-  if (Tok.getKind() == tok::eom) {
+  if (Tok.is(tok::eom)) {
     // If there is no body to this macro, we have no special handling here.
-  } else if (Tok.getKind() == tok::l_paren && !Tok.hasLeadingSpace()) {
+  } else if (Tok.is(tok::l_paren) && !Tok.hasLeadingSpace()) {
     // This is a function-like macro definition.  Read the argument list.
     MI->setIsFunctionLike();
     if (ReadMacroDefinitionArgList(MI)) {
@@ -1982,7 +1982,7 @@
   // Read the rest of the macro body.
   if (MI->isObjectLike()) {
     // Object-like macros are very simple, just read their body.
-    while (Tok.getKind() != tok::eom) {
+    while (Tok.isNot(tok::eom)) {
       MI->AddTokenToBody(Tok);
       // Get the next token of the macro.
       LexUnexpandedToken(Tok);
@@ -1991,12 +1991,12 @@
   } else {
     // Otherwise, read the body of a function-like macro.  This has to validate
     // the # (stringize) operator.
-    while (Tok.getKind() != tok::eom) {
+    while (Tok.isNot(tok::eom)) {
       MI->AddTokenToBody(Tok);
 
       // Check C99 6.10.3.2p1: ensure that # operators are followed by macro
       // parameters in function-like macro expansions.
-      if (Tok.getKind() != tok::hash) {
+      if (Tok.isNot(tok::hash)) {
         // Get the next token of the macro.
         LexUnexpandedToken(Tok);
         continue;
@@ -2032,12 +2032,12 @@
   // replacement list.
   unsigned NumTokens = MI->getNumTokens();
   if (NumTokens != 0) {
-    if (MI->getReplacementToken(0).getKind() == tok::hashhash) {
+    if (MI->getReplacementToken(0).is(tok::hashhash)) {
       Diag(MI->getReplacementToken(0), diag::err_paste_at_start);
       delete MI;
       return;
     }
-    if (MI->getReplacementToken(NumTokens-1).getKind() == tok::hashhash) {
+    if (MI->getReplacementToken(NumTokens-1).is(tok::hashhash)) {
       Diag(MI->getReplacementToken(NumTokens-1), diag::err_paste_at_end);
       delete MI;
       return;
@@ -2074,7 +2074,7 @@
   ReadMacroName(MacroNameTok, 1);
   
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.getKind() == tok::eom)
+  if (MacroNameTok.is(tok::eom))
     return;
 
   // Check to see if this is the last token on the #undef line.
@@ -2101,7 +2101,7 @@
   ReadMacroName(MacroNameTok, 2);
   
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.getKind() == tok::eom)
+  if (MacroNameTok.is(tok::eom))
     return;
   
   // Check to see if this is the last token on the #undef line.
@@ -2143,7 +2143,7 @@
   ReadMacroName(MacroNameTok);
   
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.getKind() == tok::eom) {
+  if (MacroNameTok.is(tok::eom)) {
     // Skip code until we get to #endif.  This helps with recovery by not
     // emitting an error when the #endif is reached.
     SkipExcludedConditionalBlock(DirectiveTok.getLocation(),





More information about the cfe-commits mailing list