[cfe-commits] r38540 - in /cfe/cfe/trunk: Basic/Diagnostic.cpp Driver/clang.cpp Lex/Lexer.cpp Lex/MacroExpander.cpp Lex/PPExpressions.cpp Lex/Preprocessor.cpp include/clang/Basic/Diagnostic.h include/clang/Lex/Lexer.h include/clang/Lex/MacroExpander.h include/clang/Lex/Preprocessor.h

sabre at cs.uiuc.edu sabre at cs.uiuc.edu
Wed Jul 11 09:22:20 PDT 2007


Author: sabre
Date: Wed Jul 11 11:22:20 2007
New Revision: 38540

URL: http://llvm.org/viewvc/llvm-project?rev=38540&view=rev
Log:
Remove manual conditional error handling code.

Modified:
    cfe/cfe/trunk/Basic/Diagnostic.cpp
    cfe/cfe/trunk/Driver/clang.cpp
    cfe/cfe/trunk/Lex/Lexer.cpp
    cfe/cfe/trunk/Lex/MacroExpander.cpp
    cfe/cfe/trunk/Lex/PPExpressions.cpp
    cfe/cfe/trunk/Lex/Preprocessor.cpp
    cfe/cfe/trunk/include/clang/Basic/Diagnostic.h
    cfe/cfe/trunk/include/clang/Lex/Lexer.h
    cfe/cfe/trunk/include/clang/Lex/MacroExpander.h
    cfe/cfe/trunk/include/clang/Lex/Preprocessor.h

Modified: cfe/cfe/trunk/Basic/Diagnostic.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/Basic/Diagnostic.cpp?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/Basic/Diagnostic.cpp (original)
+++ cfe/cfe/trunk/Basic/Diagnostic.cpp Wed Jul 11 11:22:20 2007
@@ -100,18 +100,17 @@
 /// Report - Issue the message to the client. If the client wants us to stop
 /// compilation, return true, otherwise return false.  DiagID is a member of
 /// the diag::kind enum.  
-bool Diagnostic::Report(SourceLocation Pos, unsigned DiagID,
+void Diagnostic::Report(SourceLocation Pos, unsigned DiagID,
                         const std::string &Extra) {
   // Figure out the diagnostic level of this message.
   Diagnostic::Level DiagLevel = getDiagnosticLevel(DiagID);
   
-  // If the client doesn't care about this message, don't map to the code.
+  // If the client doesn't care about this message, don't issue it.
   if (DiagLevel == Diagnostic::Ignored)
-    return false;
+    return;
   
   // Finally, report it.
-  return Client.HandleDiagnostic(DiagLevel, Pos, (diag::kind)DiagID, Extra) ||
-         DiagLevel == Fatal;
+  Client.HandleDiagnostic(DiagLevel, Pos, (diag::kind)DiagID, Extra);
 }
 
 DiagnosticClient::~DiagnosticClient() {}

Modified: cfe/cfe/trunk/Driver/clang.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/Driver/clang.cpp?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/Driver/clang.cpp (original)
+++ cfe/cfe/trunk/Driver/clang.cpp Wed Jul 11 11:22:20 2007
@@ -104,7 +104,7 @@
   
   void PrintIncludeStack(SourceLocation Pos);
 
-  virtual bool HandleDiagnostic(Diagnostic::Level DiagLevel,
+  virtual void HandleDiagnostic(Diagnostic::Level DiagLevel,
                                 SourceLocation Pos,
                                 diag::kind ID, const std::string &Msg);
 };
@@ -125,7 +125,7 @@
 }
 
 
-bool DiagnosticPrinterSTDERR::HandleDiagnostic(Diagnostic::Level Level, 
+void DiagnosticPrinterSTDERR::HandleDiagnostic(Diagnostic::Level Level, 
                                                SourceLocation Pos,
                                                diag::kind ID, 
                                                const std::string &Extra) {
@@ -206,7 +206,6 @@
     // Print out the caret itself.
     std::cerr << Indent << "^\n";
   }
-  return false;
 }
 
 
@@ -595,7 +594,7 @@
   char Buffer[256];
   bool isFirstToken = true;
   do {
-    if (PP.Lex(Tok)) return;
+    PP.Lex(Tok);
 
     // If this token is at the start of a line.  Emit the \n and indentation.
     // FIXME: this shouldn't use the isAtStartOfLine flag.  This should use a
@@ -709,7 +708,7 @@
 
     // Lex the file, which will read all the macros.
     LexerToken Tok;
-    if (PP.Lex(Tok)) return 1;
+    PP.Lex(Tok);
     assert(Tok.getKind() == tok::eof && "Didn't read entire file!");
     
     // Once we've read this, we're done.
@@ -739,8 +738,7 @@
   case RunPreprocessorOnly: {        // Just lex as fast as we can, no output.
     LexerToken Tok;
     do {
-      if (PP.Lex(Tok))
-        break;
+      PP.Lex(Tok);
     } while (Tok.getKind() != tok::eof);
     break;
   }
@@ -752,8 +750,7 @@
   case DumpTokens: {                 // Token dump mode.
     LexerToken Tok;
     do {
-      if (PP.Lex(Tok))
-        break;
+      PP.Lex(Tok);
       Tok.dump(true);
       std::cerr << "\n";
     } while (Tok.getKind() != tok::eof);

Modified: cfe/cfe/trunk/Lex/Lexer.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/Lex/Lexer.cpp?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/Lex/Lexer.cpp (original)
+++ cfe/cfe/trunk/Lex/Lexer.cpp Wed Jul 11 11:22:20 2007
@@ -176,9 +176,9 @@
 
 /// Diag - Forwarding function for diagnostics.  This translate a source
 /// position in the current buffer into a SourceLocation object for rendering.
-bool Lexer::Diag(const char *Loc, unsigned DiagID,
+void Lexer::Diag(const char *Loc, unsigned DiagID,
                  const std::string &Msg) const {
-  return PP.Diag(getSourceLocation(Loc), DiagID, Msg);
+  PP.Diag(getSourceLocation(Loc), DiagID, Msg);
 }
 
 //===----------------------------------------------------------------------===//
@@ -434,7 +434,7 @@
 // Helper methods for lexing.
 //===----------------------------------------------------------------------===//
 
-bool Lexer::LexIdentifier(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexIdentifier(LexerToken &Result, const char *CurPtr) {
   // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
   unsigned Size;
   unsigned char C = *CurPtr++;
@@ -479,8 +479,7 @@
       if (!Features.DollarIdents) goto FinishIdentifier;
       
       // Otherwise, emit a diagnostic and continue.
-      if (Diag(CurPtr, diag::ext_dollar_in_identifier))
-        return true;
+      Diag(CurPtr, diag::ext_dollar_in_identifier);
       CurPtr = ConsumeChar(CurPtr, Size, Result);
       C = getCharAndSize(CurPtr, Size);
       continue;
@@ -504,7 +503,7 @@
 /// LexNumericConstant - Lex the remainer of a integer or floating point
 /// constant. From[-1] is the first character lexed.  Return the end of the
 /// constant.
-bool Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
   unsigned Size;
   char C = getCharAndSize(CurPtr, Size);
   char PrevCh = 0;
@@ -527,12 +526,11 @@
 
   // Update the end of token position as well as the BufferPtr instance var.
   Result.SetEnd(BufferPtr = CurPtr);
-  return false;
 }
 
 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed
 /// either " or L".
-bool Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr) {
   const char *NulCharacter = 0; // Does this string contain the \0 character?
   
   char C = getAndAdvanceChar(CurPtr, Result);
@@ -543,8 +541,7 @@
       C = getAndAdvanceChar(CurPtr, Result);
     } else if (C == '\n' || C == '\r' ||             // Newline.
                (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
-      if (Diag(Result.getStart(), diag::err_unterminated_string))
-        return true;
+      Diag(Result.getStart(), diag::err_unterminated_string);
       BufferPtr = CurPtr-1;
       return LexTokenInternal(Result);
     } else if (C == 0) {
@@ -553,19 +550,17 @@
     C = getAndAdvanceChar(CurPtr, Result);
   }
   
-  if (NulCharacter && Diag(NulCharacter, diag::null_in_string))
-    return true;
+  if (NulCharacter) Diag(NulCharacter, diag::null_in_string);
 
   Result.SetKind(tok::string_literal);
 
   // Update the end of token position as well as the BufferPtr instance var.
   Result.SetEnd(BufferPtr = CurPtr);
-  return false;
 }
 
 /// LexAngledStringLiteral - Lex the remainder of an angled string literal,
 /// after having lexed the '<' character.  This is used for #include filenames.
-bool Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
   const char *NulCharacter = 0; // Does this string contain the \0 character?
   
   char C = getAndAdvanceChar(CurPtr, Result);
@@ -576,8 +571,7 @@
       C = getAndAdvanceChar(CurPtr, Result);
     } else if (C == '\n' || C == '\r' ||             // Newline.
                (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
-      if (Diag(Result.getStart(), diag::err_unterminated_string))
-        return true;
+      Diag(Result.getStart(), diag::err_unterminated_string);
       BufferPtr = CurPtr-1;
       return LexTokenInternal(Result);
     } else if (C == 0) {
@@ -586,27 +580,24 @@
     C = getAndAdvanceChar(CurPtr, Result);
   }
   
-  if (NulCharacter && Diag(NulCharacter, diag::null_in_string))
-    return true;
+  if (NulCharacter) Diag(NulCharacter, diag::null_in_string);
   
   Result.SetKind(tok::angle_string_literal);
   
   // Update the end of token position as well as the BufferPtr instance var.
   Result.SetEnd(BufferPtr = CurPtr);
-  return false;
 }
 
 
 /// LexCharConstant - Lex the remainder of a character constant, after having
 /// lexed either ' or L'.
-bool Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
   const char *NulCharacter = 0; // Does this character contain the \0 character?
 
   // Handle the common case of 'x' and '\y' efficiently.
   char C = getAndAdvanceChar(CurPtr, Result);
   if (C == '\'') {
-    if (Diag(Result.getStart(), diag::err_empty_character))
-      return true;
+    Diag(Result.getStart(), diag::err_empty_character);
     BufferPtr = CurPtr;
     return LexTokenInternal(Result);
   } else if (C == '\\') {
@@ -626,8 +617,7 @@
         C = getAndAdvanceChar(CurPtr, Result);
       } else if (C == '\n' || C == '\r' ||               // Newline.
                  (C == 0 && CurPtr-1 == BufferEnd)) {    // End of file.
-        if (Diag(Result.getStart(), diag::err_unterminated_char))
-          return true;
+        Diag(Result.getStart(), diag::err_unterminated_char);
         BufferPtr = CurPtr-1;
         return LexTokenInternal(Result);
       } else if (C == 0) {
@@ -637,19 +627,17 @@
     } while (C != '\'');
   }
   
-  if (NulCharacter && Diag(NulCharacter, diag::null_in_char))
-    return true;
+  if (NulCharacter) Diag(NulCharacter, diag::null_in_char);
 
   Result.SetKind(tok::char_constant);
   
   // Update the end of token position as well as the BufferPtr instance var.
   Result.SetEnd(BufferPtr = CurPtr);
-  return false;
 }
 
 /// SkipWhitespace - Efficiently skip over a series of whitespace characters.
 /// Update BufferPtr to point to the next non-whitespace character and return.
-bool Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
+void Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
   // Whitespace - Skip it, then return the token after the whitespace.
   unsigned char Char = *CurPtr;  // Skip consequtive spaces efficiently.
   while (1) {
@@ -664,7 +652,7 @@
     if (ParsingPreprocessorDirective) {
       // End of preprocessor directive line, let LexTokenInternal handle this.
       BufferPtr = CurPtr;
-      return false;
+      return;
     }
     
     // ok, but handle newline.
@@ -691,18 +679,16 @@
     return SkipBlockComment(Result, CurPtr+2);
   }
   BufferPtr = CurPtr;
-  return false;
 }
 
 // SkipBCPLComment - We have just read the // characters from input.  Skip until
 // we find the newline character thats terminate the comment.  Then update
 /// BufferPtr and return.
-bool Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
+void Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
   // If BCPL comments aren't explicitly enabled for this language, emit an
   // extension warning.
   if (!Features.BCPLComment) {
-    if (Diag(Result.getStart(), diag::ext_bcpl_comment))
-      return true;
+    Diag(Result.getStart(), diag::ext_bcpl_comment);
     
     // Mark them enabled so we only emit one warning for this translation
     // unit.
@@ -740,8 +726,8 @@
     if (CurPtr != OldPtr+1) {
       for (; OldPtr != CurPtr; ++OldPtr)
         if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
-          if (Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment))
-            return true;
+          Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
+          break;
         }
     }
     
@@ -754,7 +740,7 @@
   // return immediately, so that the lexer can return this as an EOM token.
   if (ParsingPreprocessorDirective) {
     BufferPtr = CurPtr;
-    return false;
+    return;
   }
   
   // Otherwise, eat the \n character.  We don't care if this is a \n\r or
@@ -775,20 +761,18 @@
   }
 
   BufferPtr = CurPtr;
-  return false;
+  return;
 
 FoundEOF:   // If we ran off the end of the buffer, return EOF.
   BufferPtr = CurPtr-1;
-  return false;
+  return;
 }
 
-/// isEndOfEscapedNewLine - Return true if the specified newline character
-/// (either \n or \r) is part of an escaped newline sequence.  Issue a
+/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
+/// character (either \n or \r) is part of an escaped newline sequence.  Issue a
 /// diagnostic if so.  We know that the is inside of a block comment.
-bool Lexer::isBlockCommentEndOfEscapedNewLine(const char *CurPtr,
-                                              char &PrevChar) {
+bool Lexer::isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr) {
   assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
-  PrevChar = 0;
   
   // Back up off the newline.
   --CurPtr;
@@ -812,37 +796,32 @@
   
   // If we have a slash, we know this is an escaped newline.
   if (*CurPtr == '\\') {
-    PrevChar = CurPtr[-1];
-    if (PrevChar != '*') return false;
+    if (CurPtr[-1] != '*') return false;
   } else {
     // It isn't a slash, is it the ?? / trigraph?
-    if (*CurPtr != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?')
+    if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
+        CurPtr[-3] != '*')
       return false;
-    // This is the trigraph.  Emit a stern warning!
-    if ((PrevChar = CurPtr[-3]) != '*') return false;
+    
+    // This is the trigraph ending the comment.  Emit a stern warning!
     CurPtr -= 2;
 
     // If no trigraphs are enabled, warn that we ignored this trigraph and
     // ignore this * character.
     if (!Features.Trigraphs) {
-      PrevChar = 0;
-      return Diag(CurPtr, diag::trigraph_ignored_block_comment);
-    } else {
-      if (Diag(CurPtr, diag::trigraph_ends_block_comment))
-        return true;
+      Diag(CurPtr, diag::trigraph_ignored_block_comment);
+      return false;
     }
+    Diag(CurPtr, diag::trigraph_ends_block_comment);
   }
   
   // Warn about having an escaped newline between the */ characters.
-  if (Diag(CurPtr, diag::escaped_newline_block_comment_end))
-    return true;
+  Diag(CurPtr, diag::escaped_newline_block_comment_end);
   
   // If there was space between the backslash and newline, warn about it.
-  if (HasSpace &&
-      Diag(CurPtr, diag::backslash_newline_space))
-    return true;
+  if (HasSpace) Diag(CurPtr, diag::backslash_newline_space);
   
-  return false;
+  return true;
 }
 
 /// SkipBlockComment - We have just read the /* characters from input.  Read
@@ -851,17 +830,16 @@
 /// because they cannot cause the comment to end.  The only thing that can
 /// happen is the comment could end with an escaped newline between the */ end
 /// of comment.
-bool Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
+void Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
   // Scan one character past where we should, looking for a '/' character.  Once
   // we find it, check to see if it was preceeded by a *.  This common
   // optimization helps people who like to put a lot of * characters in their
   // comments.
   unsigned char C = *CurPtr++;
   if (C == 0 && CurPtr == BufferEnd+1) {
-    if (Diag(Result.getStart(), diag::err_unterminated_block_comment))
-      return true;
+    Diag(Result.getStart(), diag::err_unterminated_block_comment);
     BufferPtr = CurPtr-1;
-    return false;
+    return;
   }
   
   while (1) {
@@ -876,10 +854,7 @@
         break;
       
       if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
-        char Prev;
-        if (isBlockCommentEndOfEscapedNewLine(CurPtr-2, Prev))
-          return true;
-        if (Prev == '*') {
+        if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2)) {
           // We found the final */, though it had an escaped newline between the
           // * and /.  We're done!
           break;
@@ -889,17 +864,15 @@
         // If this is a /* inside of the comment, emit a warning.  Don't do this
         // if this is a /*/, which will end the comment.  This misses cases with
         // embedded escaped newlines, but oh well.
-        if (Diag(CurPtr-1, diag::nested_block_comment))
-          return true;
+        Diag(CurPtr-1, diag::nested_block_comment);
       }
     } else if (C == 0 && CurPtr == BufferEnd+1) {
-      if (Diag(Result.getStart(), diag::err_unterminated_block_comment))
-        return true;
+      Diag(Result.getStart(), diag::err_unterminated_block_comment);
       // Note: the user probably forgot a */.  We could continue immediately
       // after the /*, but this would involve lexing a lot of what really is the
       // comment, which surely would confuse the parser.
       BufferPtr = CurPtr-1;
-      return false;
+      return;
     }
     C = *CurPtr++;
   }
@@ -915,7 +888,6 @@
   // Otherwise, just return so that the next character will be lexed as a token.
   BufferPtr = CurPtr;
   Result.SetFlag(LexerToken::LeadingSpace);
-  return false;
 }
 
 //===----------------------------------------------------------------------===//
@@ -924,7 +896,7 @@
 
 /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
 /// (potentially) macro expand the filename.
-bool Lexer::LexIncludeFilename(LexerToken &Result) {
+void Lexer::LexIncludeFilename(LexerToken &Result) {
   assert(ParsingPreprocessorDirective &&
          ParsingFilename == false &&
          "Must be in a preprocessing directive!");
@@ -934,33 +906,30 @@
   
   // There should be exactly two tokens here if everything is good: first the
   // filename, then the EOM.
-  if (Lex(Result)) return true;
+  Lex(Result);
 
   // We should have gotten the filename now.
   ParsingFilename = false;
 
   // No filename?
-  if (Result.getKind() == tok::eom)
-    return Diag(Result.getStart(), diag::err_pp_expects_filename);
+  if (Result.getKind() == tok::eom) {
+    Diag(Result.getStart(), diag::err_pp_expects_filename);
+    return;
+  }
   
   // Verify that there is nothing after the filename, other than EOM.
   LexerToken EndTok;
-  if (Lex(EndTok)) return true;
+  Lex(EndTok);
 
   if (EndTok.getKind() != tok::eom) {
-    if (Diag(Result.getStart(), diag::err_pp_expects_filename))
-      return true;
+    Diag(Result.getStart(), diag::err_pp_expects_filename);
     
     // Lex until the end of the preprocessor directive line.
-    while (EndTok.getKind() != tok::eom) {
-      if (Lex(EndTok)) return true;
-    }
+    while (EndTok.getKind() != tok::eom)
+      Lex(EndTok);
     
     Result.SetKind(tok::eom);
   }
-  
-  // We're done now.
-  return false;
 }
 
 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
@@ -996,9 +965,8 @@
       BufferPtr = CurPtr-1;
       
       // Next, lex the character, which should handle the EOM transition.
-      bool Err = Lex(Tmp);
+      Lex(Tmp);
       assert(Tmp.getKind() == tok::eom && "Unexpected token!");
-      assert(!Err && "Shouldn't have error exiting macro!");
       
       // Finally, we're done, return the string we found.
       return Result;
@@ -1008,7 +976,7 @@
 
 /// LexEndOfFile - CurPtr points to the end of this file.  Handle this
 /// condition, reporting diagnostics and handling other edge cases as required.
-bool Lexer::LexEndOfFile(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexEndOfFile(LexerToken &Result, const char *CurPtr) {
   // If we hit the end of the file while parsing a preprocessor directive,
   // end the preprocessor directive first.  The next token returned will
   // then be the end of file.
@@ -1018,24 +986,21 @@
     Result.SetKind(tok::eom);
     // Update the end of token position as well as the BufferPtr instance var.
     Result.SetEnd(BufferPtr = CurPtr);
-    return false;
+    return;
   }        
 
   // If we are in a #if directive, emit an error.
   while (!ConditionalStack.empty()) {
-    if (Diag(ConditionalStack.back().IfLoc,
-             diag::err_pp_unterminated_conditional))
-      return true;
+    Diag(ConditionalStack.back().IfLoc, diag::err_pp_unterminated_conditional);
     ConditionalStack.pop_back();
   }  
   
   // If the file was empty or didn't end in a newline, issue a pedwarn.
-  if (CurPtr[-1] != '\n' && CurPtr[-1] != '\r' && 
-      Diag(BufferEnd, diag::ext_no_newline_eof))
-    return true;
+  if (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
+    Diag(BufferEnd, diag::ext_no_newline_eof);
   
   BufferPtr = CurPtr;
-  return PP.HandleEndOfFile(Result);
+  PP.HandleEndOfFile(Result);
 }
 
 
@@ -1046,7 +1011,7 @@
 /// preprocessing token, not a normal token, as such, it is an internal
 /// interface.  It assumes that the Flags of result have been cleared before
 /// calling this.
-bool Lexer::LexTokenInternal(LexerToken &Result) {
+void Lexer::LexTokenInternal(LexerToken &Result) {
 LexNextToken:
   // New token, can't need cleaning yet.
   Result.ClearFlag(LexerToken::NeedsCleaning);
@@ -1065,10 +1030,9 @@
     if (CurPtr-1 == BufferEnd)
       return LexEndOfFile(Result, CurPtr-1);  // Retreat back into the file.
     
-    if (Diag(CurPtr-1, diag::null_in_file))
-      return true;
+    Diag(CurPtr-1, diag::null_in_file);
     Result.SetFlag(LexerToken::LeadingSpace);
-    if (SkipWhitespace(Result, CurPtr)) return true;
+    SkipWhitespace(Result, CurPtr);
     goto LexNextToken;   // GCC isn't tail call eliminating.
   case '\n':
   case '\r':
@@ -1088,14 +1052,14 @@
     Result.SetFlag(LexerToken::StartOfLine);
     // No leading whitespace seen so far.
     Result.ClearFlag(LexerToken::LeadingSpace);
-    if (SkipWhitespace(Result, CurPtr)) return true;
+    SkipWhitespace(Result, CurPtr);
     goto LexNextToken;   // GCC isn't tail call eliminating.
   case ' ':
   case '\t':
   case '\f':
   case '\v':
     Result.SetFlag(LexerToken::LeadingSpace);
-    if (SkipWhitespace(Result, CurPtr)) return true;
+    SkipWhitespace(Result, CurPtr);
     goto LexNextToken;   // GCC isn't tail call eliminating.
 
   case 'L':
@@ -1242,13 +1206,11 @@
     Char = getCharAndSize(CurPtr, SizeTmp);
     if (Char == '/') {         // BCPL comment.
       Result.SetFlag(LexerToken::LeadingSpace);
-      if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
-        return true;
+      SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result));
       goto LexNextToken;   // GCC isn't tail call eliminating.
     } else if (Char == '*') {  // /**/ comment.
       Result.SetFlag(LexerToken::LeadingSpace);
-      if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
-        return true;
+      SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result));
       goto LexNextToken;   // GCC isn't tail call eliminating.
     } else if (Char == '=') {
       Result.SetKind(tok::slashequal);
@@ -1281,7 +1243,7 @@
         // FIXME: -fpreprocessed mode??
         if (Result.isAtStartOfLine() && !PP.isSkipping()) {
           BufferPtr = CurPtr;
-          if (PP.HandleDirective(Result)) return true;
+          PP.HandleDirective(Result);
           
           // As an optimization, if the preprocessor didn't switch lexers, tail
           // recurse.
@@ -1326,8 +1288,7 @@
       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
     } else if (Features.CPPMinMax && Char == '?') {     // <?
       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
-      if (Diag(Result.getStart(), diag::min_max_deprecated))
-        return true;
+      Diag(Result.getStart(), diag::min_max_deprecated);
 
       if (getCharAndSize(CurPtr, SizeTmp) == '=') {     // <?= 
         Result.SetKind(tok::lessquestionequal);
@@ -1354,8 +1315,7 @@
       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
     } else if (Features.CPPMinMax && Char == '?') {
       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
-      if (Diag(Result.getStart(), diag::min_max_deprecated))
-        return true;
+      Diag(Result.getStart(), diag::min_max_deprecated);
 
       if (getCharAndSize(CurPtr, SizeTmp) == '=') {
         Result.SetKind(tok::greaterquestionequal);    // >?=
@@ -1428,7 +1388,7 @@
       // FIXME: not in preprocessed mode??
       if (Result.isAtStartOfLine() && !PP.isSkipping()) {
         BufferPtr = CurPtr;
-        if (PP.HandleDirective(Result)) return true;
+        PP.HandleDirective(Result);
         
         // As an optimization, if the preprocessor didn't switch lexers, tail
         // recurse.
@@ -1456,18 +1416,15 @@
       Result.SetKind(tok::at);
       break;
     } else if (CurPtr[-1] == '$' && Features.DollarIdents) {// $ in identifiers.
-      if (Diag(CurPtr-1, diag::ext_dollar_in_identifier))
-        return true;
+      Diag(CurPtr-1, diag::ext_dollar_in_identifier);
       return LexIdentifier(Result, CurPtr);
     }
     
-    if (!PP.isSkipping() && Diag(CurPtr-1, diag::err_stray_character))
-      return true;
+    if (!PP.isSkipping()) Diag(CurPtr-1, diag::err_stray_character);
     BufferPtr = CurPtr;
     goto LexNextToken;   // GCC isn't tail call eliminating.
   }
   
   // Update the end of token position as well as the BufferPtr instance var.
   Result.SetEnd(BufferPtr = CurPtr);
-  return false;
 }

Modified: cfe/cfe/trunk/Lex/MacroExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/Lex/MacroExpander.cpp?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/Lex/MacroExpander.cpp (original)
+++ cfe/cfe/trunk/Lex/MacroExpander.cpp Wed Jul 11 11:22:20 2007
@@ -18,7 +18,7 @@
 using namespace clang;
 
 /// Lex - Lex and return a token from this macro stream.
-bool MacroExpander::Lex(LexerToken &Tok) {
+void MacroExpander::Lex(LexerToken &Tok) {
   // Lexing off the end of the macro, pop this macro off the expansion stack.
   if (CurToken == Macro.getNumTokens())
     return PP.HandleEndOfMacro(Tok);
@@ -38,5 +38,4 @@
     return PP.HandleIdentifier(Tok);
 
   // Otherwise, return a normal token.
-  return false;
 }

Modified: cfe/cfe/trunk/Lex/PPExpressions.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/Lex/PPExpressions.cpp?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/Lex/PPExpressions.cpp (original)
+++ cfe/cfe/trunk/Lex/PPExpressions.cpp Wed Jul 11 11:22:20 2007
@@ -30,47 +30,37 @@
 ///
 /// MinPrec is the minimum precedence that this range of the expression is
 /// allowed to include.
-bool Preprocessor::EvaluateDirectiveExpression(bool &Result) {
+void Preprocessor::EvaluateDirectiveExpression(bool &Result) {
   // Peek ahead one token.
   LexerToken Tok;
-  if (Lex(Tok)) return true;
+  Lex(Tok);
 
   // In error cases, bail out with false value.
   Result = false;
   
-  bool StopParse = false;
-  
   int ResVal = 0;
-  if (EvaluateValue(ResVal, Tok, StopParse)) {
+  if (EvaluateValue(ResVal, Tok) ||
+      EvaluateDirectiveSubExpr(ResVal, 1, Tok)) {
     // Skip the rest of the macro line.
-    if (!StopParse && Tok.getKind() != tok::eom)
-      StopParse |= DiscardUntilEndOfDirective();
-    return StopParse;
-  }
-  
-  if (EvaluateDirectiveSubExpr(ResVal, 1, Tok, StopParse)) {
-    // Skip the rest of the macro line.
-    if (!StopParse && Tok.getKind() != tok::eom)
-      StopParse |= DiscardUntilEndOfDirective();
-    return StopParse;
+    if (Tok.getKind() != tok::eom)
+      DiscardUntilEndOfDirective();
+    return;
   }
   
   // If we aren't at the tok::eom token, something bad happened, like an extra
   // ')' token.
   if (Tok.getKind() != tok::eom) {
-    return Diag(Tok, diag::err_pp_expected_eol) ||
-           DiscardUntilEndOfDirective();
+    Diag(Tok, diag::err_pp_expected_eol);
+    DiscardUntilEndOfDirective();
   }
   
   Result = ResVal != 0;
-  return false;
 }
 
 /// EvaluateValue - Evaluate the token PeekTok (and any others needed) and
 /// return the computed value in Result.  Return true if there was an error
-/// parsing, setting StopParse if parsing should be aborted.
-bool Preprocessor::EvaluateValue(int &Result, LexerToken &PeekTok, 
-                                 bool &StopParse) {
+/// parsing.
+bool Preprocessor::EvaluateValue(int &Result, LexerToken &PeekTok) {
   Result = 0;
   
   // If this token's spelling is a pp-identifier, check to see if it is
@@ -81,7 +71,8 @@
     // into a simple 0.
     if (strcmp(II->getName(), "defined")) {
       Result = 0;
-      return (StopParse = Lex(PeekTok));
+      Lex(PeekTok);
+      return false;
     }
 
     // Handle "defined X" and "defined(X)".
@@ -91,20 +82,20 @@
     DisableMacroExpansion = true;
 
     // Get the next token.
-    if ((StopParse = Lex(PeekTok))) return true;
+    Lex(PeekTok);
 
     // Two options, it can either be a pp-identifier or a (.
     bool InParens = false;
     if (PeekTok.getKind() == tok::l_paren) {
       // Found a paren, remember we saw it and skip it.
       InParens = true;
-      if ((StopParse = Lex(PeekTok))) return true;
+      Lex(PeekTok);
     }
     
     // If we don't have a pp-identifier now, this is an error.
     if ((II = PeekTok.getIdentifierInfo()) == 0) {
       DisableMacroExpansion = false;
-      StopParse = Diag(PeekTok, diag::err_pp_defined_requires_identifier);
+      Diag(PeekTok, diag::err_pp_defined_requires_identifier);
       return true;
     }
     
@@ -112,16 +103,16 @@
     Result = II->getMacroInfo() != 0;
 
     // Consume identifier.
-    if ((StopParse = Lex(PeekTok))) return true;
+    Lex(PeekTok);
 
     // If we are in parens, ensure we have a trailing ).
     if (InParens) {
       if (PeekTok.getKind() != tok::r_paren) {
-        StopParse = Diag(PeekTok, diag::err_pp_missing_rparen);
+        Diag(PeekTok, diag::err_pp_missing_rparen);
         return true;
       }
       // Consume the ).
-      if ((StopParse = Lex(PeekTok))) return true;
+      Lex(PeekTok);
     }
     
     DisableMacroExpansion = false;
@@ -130,55 +121,55 @@
   
   switch (PeekTok.getKind()) {
   default:  // Non-value token.
-    StopParse = Diag(PeekTok, diag::err_pp_expr_bad_token);
+    Diag(PeekTok, diag::err_pp_expr_bad_token);
     return true;
   case tok::eom:
   case tok::r_paren:
     // If there is no expression, report and exit.
-    StopParse = Diag(PeekTok, diag::err_pp_expected_value_in_expr);
+    Diag(PeekTok, diag::err_pp_expected_value_in_expr);
     return true;
   case tok::numeric_constant: {
     // FIXME: faster.  FIXME: track signs.
     std::string Spell = Lexer::getSpelling(PeekTok, getLangOptions());
     // FIXME: COMPUTE integer constants CORRECTLY.
     Result = atoi(Spell.c_str());
-    return (StopParse = Lex(PeekTok));
+    Lex(PeekTok);
+    return false;
   }
   case tok::l_paren:
-    if (StopParse = Lex(PeekTok)) return true;  // Eat the (.
-    // Parse the value.
-    if (EvaluateValue(Result, PeekTok, StopParse)) return true;
-      
-    // If there are any binary operators involved, parse them.
-    if (EvaluateDirectiveSubExpr(Result, 1, PeekTok, StopParse))
-      return StopParse;
+    Lex(PeekTok);  // Eat the (.
+    // Parse the value and if there are any binary operators involved, parse
+    // them.
+    if (EvaluateValue(Result, PeekTok) ||
+        EvaluateDirectiveSubExpr(Result, 1, PeekTok))
+      return true;
 
     if (PeekTok.getKind() != tok::r_paren) {
-      StopParse = Diag(PeekTok, diag::err_pp_expected_rparen);
+      Diag(PeekTok, diag::err_pp_expected_rparen);
       return true;
     }
-    if (StopParse = Lex(PeekTok)) return true;  // Eat the ).
+    Lex(PeekTok);  // Eat the ).
     return false;
  
   case tok::plus:
     // Unary plus doesn't modify the value.
-    if (StopParse = Lex(PeekTok)) return true;
-    return EvaluateValue(Result, PeekTok, StopParse);
+    Lex(PeekTok);
+    return EvaluateValue(Result, PeekTok);
   case tok::minus:
-    if (StopParse = Lex(PeekTok)) return true;
-    if (EvaluateValue(Result, PeekTok, StopParse)) return true;
+    Lex(PeekTok);
+    if (EvaluateValue(Result, PeekTok)) return true;
     Result = -Result;
     return false;
     
   case tok::tilde:
-    if (StopParse = Lex(PeekTok)) return true;
-    if (EvaluateValue(Result, PeekTok, StopParse)) return true;
+    Lex(PeekTok);
+    if (EvaluateValue(Result, PeekTok)) return true;
     Result = ~Result;
     return false;
     
   case tok::exclaim:
-    if (StopParse = Lex(PeekTok)) return true;
-    if (EvaluateValue(Result, PeekTok, StopParse)) return true;
+    Lex(PeekTok);
+    if (EvaluateValue(Result, PeekTok)) return true;
     Result = !Result;
     return false;
     
@@ -240,12 +231,11 @@
 /// EvaluateDirectiveSubExpr - Evaluate the subexpression whose first token is
 /// PeekTok, and whose precedence is PeekPrec.
 bool Preprocessor::EvaluateDirectiveSubExpr(int &LHS, unsigned MinPrec,
-                                            LexerToken &PeekTok,
-                                            bool &StopParse) {
+                                            LexerToken &PeekTok) {
   unsigned PeekPrec = getPrecedence(PeekTok.getKind());
   // If this token isn't valid, report the error.
   if (PeekPrec == ~0U) {
-    StopParse = Diag(PeekTok, diag::err_pp_expr_bad_token);
+    Diag(PeekTok, diag::err_pp_expr_bad_token);
     return true;
   }
   
@@ -259,11 +249,11 @@
 
     // Consume the operator, saving the operator token for error reporting.
     LexerToken OpToken = PeekTok;
-    if (StopParse = Lex(PeekTok)) return true;
+    Lex(PeekTok);
 
     int RHS;
     // Parse the RHS of the operator.
-    if (EvaluateValue(RHS, PeekTok, StopParse)) return true;
+    if (EvaluateValue(RHS, PeekTok)) return true;
 
     // Remember the precedence of this operator and get the precedence of the
     // operator immediately to the right of the RHS.
@@ -272,7 +262,7 @@
 
     // If this token isn't valid, report the error.
     if (PeekPrec == ~0U) {
-      StopParse = Diag(PeekTok, diag::err_pp_expr_bad_token);
+      Diag(PeekTok, diag::err_pp_expr_bad_token);
       return true;
     }
     
@@ -282,7 +272,7 @@
     // more tightly with RHS than we do, evaluate it completely first.
     if (ThisPrec < PeekPrec ||
         (ThisPrec == PeekPrec && isRightAssoc)) {
-      if (EvaluateDirectiveSubExpr(RHS, ThisPrec+1, PeekTok, StopParse))
+      if (EvaluateDirectiveSubExpr(RHS, ThisPrec+1, PeekTok))
         return true;
       PeekPrec = getPrecedence(PeekTok.getKind());
     }
@@ -292,14 +282,14 @@
     default: assert(0 && "Unknown operator token!");
     case tok::percent:
       if (RHS == 0) {
-        StopParse = Diag(OpToken, diag::err_pp_remainder_by_zero);
+        Diag(OpToken, diag::err_pp_remainder_by_zero);
         return true;
       }
       LHS %= RHS;
       break;
     case tok::slash:
       if (RHS == 0) {
-        StopParse = Diag(OpToken, diag::err_pp_division_by_zero);
+        Diag(OpToken, diag::err_pp_division_by_zero);
         return true;
       }
       LHS /= RHS;
@@ -327,26 +317,25 @@
     case tok::ampamp:          LHS = LHS && RHS; break;
     case tok::pipepipe:        LHS = LHS || RHS; break;
     case tok::comma:
-      if ((StopParse = Diag(OpToken, diag::ext_pp_comma_expr)))
-        return true;
+      Diag(OpToken, diag::ext_pp_comma_expr);
       LHS = RHS; // LHS = LHS,RHS -> RHS.
       break; 
     case tok::question: {
       // Parse the : part of the expression.
       if (PeekTok.getKind() != tok::colon) {
-        StopParse = Diag(OpToken, diag::err_pp_question_without_colon);
+        Diag(OpToken, diag::err_pp_question_without_colon);
         return true;
       }
       // Consume the :.
-      if (StopParse = Lex(PeekTok)) return true;
+      Lex(PeekTok);
 
       // Evaluate the value after the :.
       int AfterColonVal = 0;
-      if (EvaluateValue(AfterColonVal, PeekTok, StopParse)) return true;
+      if (EvaluateValue(AfterColonVal, PeekTok)) return true;
 
       // Parse anything after the : RHS that has a higher precedence than ?.
       if (EvaluateDirectiveSubExpr(AfterColonVal, ThisPrec+1,
-                                   PeekTok, StopParse))
+                                   PeekTok))
         return true;
       
       // Now that we have the condition, the LHS and the RHS of the :, evaluate.
@@ -358,7 +347,7 @@
     }
     case tok::colon:
       // Don't allow :'s to float around without being part of ?: exprs.
-      StopParse = Diag(OpToken, diag::err_pp_colon_without_question);
+      Diag(OpToken, diag::err_pp_colon_without_question);
       return true;
     }
   }

Modified: cfe/cfe/trunk/Lex/Preprocessor.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/Lex/Preprocessor.cpp?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/Lex/Preprocessor.cpp (original)
+++ cfe/cfe/trunk/Lex/Preprocessor.cpp Wed Jul 11 11:22:20 2007
@@ -110,23 +110,23 @@
 /// Diag - Forwarding function for diagnostics.  This emits a diagnostic at
 /// the specified LexerToken's location, translating the token's start
 /// position in the current buffer into a SourcePosition object for rendering.
-bool Preprocessor::Diag(SourceLocation Loc, unsigned DiagID, 
+void Preprocessor::Diag(SourceLocation Loc, unsigned DiagID, 
                         const std::string &Msg) {
   // If we are in a '#if 0' block, don't emit any diagnostics for notes,
   // warnings or extensions.
   if (isSkipping() && Diagnostic::isNoteWarningOrExtension(DiagID))
-    return false;
+    return;
   
-  return Diags.Report(Loc, DiagID, Msg);
+  Diags.Report(Loc, DiagID, Msg);
 }
-bool Preprocessor::Diag(const LexerToken &Tok, unsigned DiagID,
+void Preprocessor::Diag(const LexerToken &Tok, unsigned DiagID,
                         const std::string &Msg) {
   // If we are in a '#if 0' block, don't emit any diagnostics for notes,
   // warnings or extensions.
   if (isSkipping() && Diagnostic::isNoteWarningOrExtension(DiagID))
-    return false;
+    return;
   
-  return Diag(Tok.getSourceLocation(), DiagID, Msg);
+  Diag(Tok.getSourceLocation(), DiagID, Msg);
 }
 
 void Preprocessor::PrintStats() {
@@ -251,8 +251,8 @@
 }
 
 /// EnterMacro - Add a Macro to the top of the include stack and start lexing
-/// tokens from it instead of the current buffer.  Return true on failure.
-bool Preprocessor::EnterMacro(LexerToken &Tok) {
+/// tokens from it instead of the current buffer.
+void Preprocessor::EnterMacro(LexerToken &Tok) {
   IdentifierTokenInfo *Identifier = Tok.getIdentifierInfo();
   MacroInfo &MI = *Identifier->getMacroInfo();
   SourceLocation ExpandLoc = Tok.getSourceLocation();
@@ -277,7 +277,6 @@
   CurMacroExpander = new MacroExpander(MI, MacroID, *this,
                                        Tok.isAtStartOfLine(), 
                                        Tok.hasLeadingSpace());
-  return false;
 }
 
 
@@ -288,12 +287,12 @@
 /// HandleIdentifier - This callback is invoked when the lexer reads an
 /// identifier.  This callback looks up the identifier in the map and/or
 /// potentially macro expands it or turns it into a named token (like 'for').
-bool Preprocessor::HandleIdentifier(LexerToken &Identifier) {
+void Preprocessor::HandleIdentifier(LexerToken &Identifier) {
   if (Identifier.getIdentifierInfo() == 0) {
     // If we are skipping tokens (because we are in a #if 0 block), there will
     // be no identifier info, just return the token.
     assert(isSkipping() && "Token isn't an identifier?");
-    return false;
+    return;
   }
   IdentifierTokenInfo &ITI = *Identifier.getIdentifierInfo();
   
@@ -313,7 +312,7 @@
         bool HadLeadingSpace = Identifier.hasLeadingSpace();
         bool IsAtStartOfLine = Identifier.isAtStartOfLine();
         
-        if (Lex(Identifier)) return true;
+        Lex(Identifier);
         
         // If the identifier isn't on some OTHER line, inherit the leading
         // whitespace/first-on-a-line property of this token.  This handles
@@ -324,7 +323,7 @@
           if (HadLeadingSpace) Identifier.SetFlag(LexerToken::LeadingSpace);
         }
         ++NumFastMacroExpanded;
-        return false;
+        return;
         
       } else if (MI->getNumTokens() == 1 &&
                  // Don't handle identifiers, which might need recursive
@@ -353,12 +352,11 @@
         // Since this is not an identifier token, it can't be macro expanded, so
         // we're done.
         ++NumFastMacroExpanded;
-        return false;
+        return;
       }
     
       // Start expanding the macro (FIXME, pass arguments).
-      if (EnterMacro(Identifier))
-        return true;
+      EnterMacro(Identifier);
     
       // Now that the macro is at the top of the include stack, ask the
       // preprocessor to read the next token from it.
@@ -371,15 +369,13 @@
   Identifier.SetKind(ITI.getTokenID());
     
   // If this is an extension token, diagnose its use.
-  if (ITI.isExtensionToken() && Diag(Identifier, diag::ext_token_used))
-    return true;
-  return false;  
+  if (ITI.isExtensionToken()) Diag(Identifier, diag::ext_token_used);
 }
 
 /// HandleEndOfFile - This callback is invoked when the lexer hits the end of
 /// the current file.  This either returns the EOF token or pops a level off
 /// the include stack and keeps going.
-bool Preprocessor::HandleEndOfFile(LexerToken &Result) {
+void Preprocessor::HandleEndOfFile(LexerToken &Result) {
   assert(!CurMacroExpander &&
          "Ending a file when currently in a macro!");
   
@@ -392,7 +388,7 @@
     Result.SetKind(tok::eof);
     Result.SetStart(CurLexer->BufferEnd);
     Result.SetEnd(CurLexer->BufferEnd);
-    return false;
+    return;
   }
   
   // If this is a #include'd file, pop it off the include stack and continue
@@ -414,13 +410,11 @@
   // We're done with the #included file.
   delete CurLexer;
   CurLexer = 0;
-  return false;
 }
 
 /// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
-/// the current macro.  This either returns the EOF token or pops a level off
-/// the include stack and keeps going.
-bool Preprocessor::HandleEndOfMacro(LexerToken &Result) {
+/// the current macro line.
+void Preprocessor::HandleEndOfMacro(LexerToken &Result) {
   assert(CurMacroExpander && !CurLexer &&
          "Ending a macro when currently in a #include file!");
 
@@ -447,29 +441,26 @@
 
 /// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
 /// current line until the tok::eom token is found.
-bool Preprocessor::DiscardUntilEndOfDirective() {
+void Preprocessor::DiscardUntilEndOfDirective() {
   LexerToken Tmp;
   do {
-    if (LexUnexpandedToken(Tmp)) return true;
+    LexUnexpandedToken(Tmp);
   } while (Tmp.getKind() != tok::eom);
-  return false;
 }
 
 /// ReadMacroName - Lex and validate a macro name, which occurs after a
 /// #define or #undef.  This sets the token kind to eom and discards the rest
 /// of the macro line if the macro name is invalid.
-bool Preprocessor::ReadMacroName(LexerToken &MacroNameTok) {
+void Preprocessor::ReadMacroName(LexerToken &MacroNameTok) {
   // Read the token, don't allow macro expansion on it.
-  if (LexUnexpandedToken(MacroNameTok))
-    return true;
+  LexUnexpandedToken(MacroNameTok);
   
   // Missing macro name?
   if (MacroNameTok.getKind() == tok::eom)
     return Diag(MacroNameTok, diag::err_pp_missing_macro_name);
   
   if (MacroNameTok.getIdentifierInfo() == 0) {
-    if (Diag(MacroNameTok, diag::err_pp_macro_not_identifier))
-      return true;
+    Diag(MacroNameTok, diag::err_pp_macro_not_identifier);
     // Fall through on error.
   } else if (0) {
     // FIXME: Error if defining a C++ named operator.
@@ -479,7 +470,7 @@
     // in C99 6.10.8.4.
   } else {
     // Okay, we got a good identifier node.  Return it.
-    return false;
+    return;
   }
   
   
@@ -491,17 +482,15 @@
 
 /// CheckEndOfDirective - Ensure that the next token is a tok::eom token.  If
 /// not, emit a diagnostic and consume up until the eom.
-bool Preprocessor::CheckEndOfDirective(const char *DirType) {
+void Preprocessor::CheckEndOfDirective(const char *DirType) {
   LexerToken Tmp;
-  if (Lex(Tmp)) return true;
+  Lex(Tmp);
   // There should be no tokens after the directive, but we allow them as an
   // extension.
   if (Tmp.getKind() != tok::eom) {
-    if (Diag(Tmp, diag::ext_pp_extra_tokens_at_eol, DirType) ||
-        DiscardUntilEndOfDirective())
-      return true;
+    Diag(Tmp, diag::ext_pp_extra_tokens_at_eol, DirType);
+    DiscardUntilEndOfDirective();
   }
-  return false;
 }
 
 
@@ -514,7 +503,7 @@
 /// is true, then #else directives are ok, if not, then we have already seen one
 /// so a #else directive is a duplicate.  When this returns, the caller can lex
 /// the first valid token.
-bool Preprocessor::SkipExcludedConditionalBlock(const char *IfTokenLoc,
+void Preprocessor::SkipExcludedConditionalBlock(const char *IfTokenLoc,
                                                 bool FoundNonSkipPortion,
                                                 bool FoundElse) {
   ++NumSkipped;
@@ -536,7 +525,7 @@
   SkippingContents = true;
   LexerToken Tok;
   while (1) {
-    if (CurLexer->Lex(Tok)) return true;
+    CurLexer->Lex(Tok);
     
     // If this is the end of the buffer, we have an error.  The lexer will have
     // already handled this error condition, so just return and let the caller
@@ -553,7 +542,7 @@
     CurLexer->ParsingPreprocessorDirective = true;
     
     // Read the next token, the directive flavor.
-    if (LexUnexpandedToken(Tok)) return true;
+    LexUnexpandedToken(Tok);
     
     // If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
     // something bogus), skip it.
@@ -581,13 +570,13 @@
       if (Directive == "if" || Directive == "ifdef" || Directive == "ifndef") {
         // We know the entire #if/#ifdef/#ifndef block will be skipped, don't
         // bother parsing the condition.
-        if (DiscardUntilEndOfDirective()) return true;
+        DiscardUntilEndOfDirective();
         CurLexer->pushConditionalLevel(Tok.getStart(), /*wasskipping*/true,
                                        /*foundnonskip*/false,/*fnddelse*/false);
       }
     } else if (FirstChar == 'e') {
       if (Directive == "endif") {
-        if (CheckEndOfDirective("#endif")) return true;
+        CheckEndOfDirective("#endif");
         PPConditionalInfo CondInfo;
         CondInfo.WasSkipping = true; // Silence bogus warning.
         bool InCond = CurLexer->popConditionalLevel(CondInfo);
@@ -600,12 +589,11 @@
         // #else directive in a skipping conditional.  If not in some other
         // skipping conditional, and if #else hasn't already been seen, enter it
         // as a non-skipping conditional.
-        if (CheckEndOfDirective("#else")) return true;
+        CheckEndOfDirective("#else");
         PPConditionalInfo &CondInfo = CurLexer->peekConditionalLevel();
         
         // If this is a #else with a #else before it, report the error.
-        if (CondInfo.FoundElse && Diag(Tok, diag::pp_err_else_after_else))
-          return true;
+        if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_else_after_else);
         
         // Note that we've seen a #else in this conditional.
         CondInfo.FoundElse = true;
@@ -623,7 +611,7 @@
         // If this is in a skipping block or if we're already handled this #if
         // block, don't bother parsing the condition.
         if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
-          if (DiscardUntilEndOfDirective()) return true;
+          DiscardUntilEndOfDirective();
           ShouldEnter = false;
         } else {
           // Evaluate the #elif condition!
@@ -633,14 +621,12 @@
           // looked up, etc, inside the #elif expression.
           assert(SkippingContents && "We have to be skipping here!");
           SkippingContents = false;
-          if (EvaluateDirectiveExpression(ShouldEnter))
-            return true;
+          EvaluateDirectiveExpression(ShouldEnter);
           SkippingContents = true;
         }
         
         // If this is a #elif with a #else before it, report the error.
-        if (CondInfo.FoundElse && Diag(Tok, diag::pp_err_elif_after_else))
-          return true;
+        if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
         
         // If this condition is true, enter it!
         if (ShouldEnter) {
@@ -657,8 +643,6 @@
   // of the file, just stop skipping and return to lexing whatever came after
   // the #if block.
   SkippingContents = false;
-
-  return false;
 }
 
 //===----------------------------------------------------------------------===//
@@ -669,7 +653,7 @@
 /// at the start of a line.  This consumes the directive, modifies the 
 /// lexer/preprocessor state, and advances the lexer(s) so that the next token
 /// read is the correct one.
-bool Preprocessor::HandleDirective(LexerToken &Result) {
+void Preprocessor::HandleDirective(LexerToken &Result) {
   // FIXME: TRADITIONAL: # with whitespace before it not recognized by K&R?
   
   // We just parsed a # character at the start of a line, so we're in directive
@@ -680,13 +664,12 @@
   ++NumDirectives;
   
   // Read the next token, the directive flavor.
-  if (LexUnexpandedToken(Result))
-    return true;   // Bail out.
+  LexUnexpandedToken(Result);
   
   switch (Result.getKind()) {
   default: break;
   case tok::eom:
-    return false;   // null directive.
+    return;   // null directive.
 
 #if 0
   case tok::numeric_constant:
@@ -737,10 +720,10 @@
 #if 1
         // Read the rest of the PP line.
         do {
-          if (Lex(Result)) return true;
+          Lex(Result);
         } while (Result.getKind() != tok::eom);
         
-        return false;
+        return;
 #endif
       } else if (Directive == "assert") {
         isExtension = true;
@@ -749,9 +732,10 @@
     case 7:
       if (Directive == "include")  // Handle #include.
         return HandleIncludeDirective(Result);
-      if (Directive == "warning")
-        return Diag(Result, diag::ext_pp_warning_directive) ||
-               HandleUserDiagnosticDirective(Result, true);
+      if (Directive == "warning") {
+        Diag(Result, diag::ext_pp_warning_directive);
+        HandleUserDiagnosticDirective(Result, true);
+      }
       break;
     case 8:
       if (Directive == "unassert") {
@@ -767,19 +751,17 @@
   }
   
   // If we reached here, the preprocessing token is not valid!
-  if (Diag(Result, diag::err_pp_invalid_directive))
-    return true;
+  Diag(Result, diag::err_pp_invalid_directive);
   
   // Read the rest of the PP line.
   do {
-    if (Lex(Result)) return true;
+    Lex(Result);
   } while (Result.getKind() != tok::eom);
   
   // Okay, we're done parsing the directive.
-  return false;
 }
 
-bool Preprocessor::HandleUserDiagnosticDirective(LexerToken &Result, 
+void Preprocessor::HandleUserDiagnosticDirective(LexerToken &Result, 
                                                  bool isWarning) {
   // Read the rest of the line raw.  We do this because we don't want macros
   // to be expanded and we don't require that the tokens be valid preprocessing
@@ -796,17 +778,16 @@
 /// file to be included from the lexer, then include it!  This is a common
 /// routine with functionality shared between #include, #include_next and
 /// #import.
-bool Preprocessor::HandleIncludeDirective(LexerToken &IncludeTok,
+void Preprocessor::HandleIncludeDirective(LexerToken &IncludeTok,
                                           const DirectoryLookup *LookupFrom,
                                           bool isImport) {
   ++NumIncluded;
   LexerToken FilenameTok;
-  if (CurLexer->LexIncludeFilename(FilenameTok))
-    return true;
+  CurLexer->LexIncludeFilename(FilenameTok);
   
   // If the token kind is EOM, the error has already been diagnosed.
   if (FilenameTok.getKind() == tok::eom)
-    return false;
+    return;
 
   // Check that we don't have infinite #include recursion.
   if (IncludeStack.size() == MaxAllowedIncludeStackDepth-1)
@@ -853,12 +834,12 @@
     FileInfo.isImport = true;
     
     // Has this already been #import'ed or #include'd?
-    if (FileInfo.NumIncludes) return false;
+    if (FileInfo.NumIncludes) return;
   } else {
     // Otherwise, if this is a #include of a file that was previously #import'd
     // or if this is the second #include of a #pragma once file, ignore it.
     if (FileInfo.isImport)
-      return false;
+      return;
   }
 
   // Look up the file, create a File ID for it.
@@ -872,15 +853,12 @@
 
   // Increment the number of times this file has been included.
   ++FileInfo.NumIncludes;
-  
-  return false;
 }
 
 /// HandleIncludeNextDirective - Implements #include_next.
 ///
-bool Preprocessor::HandleIncludeNextDirective(LexerToken &IncludeNextTok) {
-  if (Diag(IncludeNextTok, diag::ext_pp_include_next_directive))
-    return true;
+void Preprocessor::HandleIncludeNextDirective(LexerToken &IncludeNextTok) {
+  Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
   
   // #include_next is like #include, except that we start searching after
   // the current found directory.  If we can't do this, issue a
@@ -888,11 +866,9 @@
   const DirectoryLookup *Lookup = CurNextDirLookup;
   if (IncludeStack.empty()) {
     Lookup = 0;
-    if (Diag(IncludeNextTok, diag::pp_include_next_in_primary))
-      return true;
+    Diag(IncludeNextTok, diag::pp_include_next_in_primary);
   } else if (Lookup == 0) {
-    if (Diag(IncludeNextTok, diag::pp_include_next_absolute_path))
-      return true;
+    Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
   }
   
   return HandleIncludeDirective(IncludeNextTok, Lookup);
@@ -900,8 +876,8 @@
 
 /// HandleImportDirective - Implements #import.
 ///
-bool Preprocessor::HandleImportDirective(LexerToken &ImportTok) {
-  if (Diag(ImportTok, diag::ext_pp_import_directive)) return true;
+void Preprocessor::HandleImportDirective(LexerToken &ImportTok) {
+  Diag(ImportTok, diag::ext_pp_import_directive);
   
   return HandleIncludeDirective(ImportTok, 0, true);
 }
@@ -909,35 +885,32 @@
 /// HandleDefineDirective - Implements #define.  This consumes the entire macro
 /// line then lets the caller lex the next real token.
 ///
-bool Preprocessor::HandleDefineDirective(LexerToken &DefineTok) {
+void Preprocessor::HandleDefineDirective(LexerToken &DefineTok) {
   ++NumDefined;
   LexerToken MacroNameTok;
-  if (ReadMacroName(MacroNameTok))
-    return true;
+  ReadMacroName(MacroNameTok);
   
   // Error reading macro name?  If so, diagnostic already issued.
   if (MacroNameTok.getKind() == tok::eom)
-    return false;
+    return;
   
   MacroInfo *MI = new MacroInfo(MacroNameTok.getSourceLocation());
   
   LexerToken Tok;
-  if (LexUnexpandedToken(Tok)) return true;
+  LexUnexpandedToken(Tok);
   
   if (Tok.getKind() == tok::eom) {
     // If there is no body to this macro, we have no special handling here.
   } else if (Tok.getKind() == tok::l_paren && !Tok.hasLeadingSpace()) {
     // This is a function-like macro definition.
     //assert(0 && "Function-like macros not implemented!");
-#warning function like macros
     return DiscardUntilEndOfDirective();
 
   } else if (!Tok.hasLeadingSpace()) {
     // C99 requires whitespace between the macro definition and the body.  Emit
     // a diagnostic for something like "#define X+".
     if (Features.C99) {
-      if (Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name))
-        return true;
+      Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name);
     } else {
       // FIXME: C90/C++ do not get this diagnostic, but it does get a similar
       // one in some cases!
@@ -955,7 +928,7 @@
     // FIXME: See create_iso_definition.
     
     // Get the next token of the macro.
-    if (LexUnexpandedToken(Tok)) return true;
+    LexUnexpandedToken(Tok);
   }
   
   // Finally, if this identifier already had a macro defined for it, verify that
@@ -968,30 +941,28 @@
   }
   
   MacroNameTok.getIdentifierInfo()->setMacroInfo(MI);
-  return false;
 }
 
 
 /// HandleUndefDirective - Implements #undef.
 ///
-bool Preprocessor::HandleUndefDirective(LexerToken &UndefTok) {
+void Preprocessor::HandleUndefDirective(LexerToken &UndefTok) {
   ++NumUndefined;
   LexerToken MacroNameTok;
-  if (ReadMacroName(MacroNameTok))
-    return true;
+  ReadMacroName(MacroNameTok);
   
   // Error reading macro name?  If so, diagnostic already issued.
   if (MacroNameTok.getKind() == tok::eom)
-    return false;
+    return;
   
   // Check to see if this is the last token on the #undef line.
-  if (CheckEndOfDirective("#undef")) return true;
+  CheckEndOfDirective("#undef");
   
   // Okay, we finally have a valid identifier to undef.
   MacroInfo *MI = MacroNameTok.getIdentifierInfo()->getMacroInfo();
   
   // If the macro is not defined, this is a noop undef, just return.
-  if (MI == 0) return false;
+  if (MI == 0) return;
   
 #if 0 // FIXME: implement warn_unused_macros.
   if (CPP_OPTION (pfile, warn_unused_macros))
@@ -1001,72 +972,67 @@
   // Free macro definition.
   delete MI;
   MacroNameTok.getIdentifierInfo()->setMacroInfo(0);
-  return false;
 }
 
 
 /// HandleIfdefDirective - Implements the #ifdef/#ifndef directive.  isIfndef is
 /// true when this is a #ifndef directive.
 ///
-bool Preprocessor::HandleIfdefDirective(LexerToken &Result, bool isIfndef) {
+void Preprocessor::HandleIfdefDirective(LexerToken &Result, bool isIfndef) {
   ++NumIf;
   LexerToken DirectiveTok = Result;
   
   LexerToken MacroNameTok;
-  if (ReadMacroName(MacroNameTok))
-    return true;
+  ReadMacroName(MacroNameTok);
   
   // Error reading macro name?  If so, diagnostic already issued.
   if (MacroNameTok.getKind() == tok::eom)
-    return false;
+    return;
   
   // Check to see if this is the last token on the #if[n]def line.
-  if (CheckEndOfDirective("#ifdef")) return true;
+  CheckEndOfDirective("#ifdef");
   
   // Should we include the stuff contained by this directive?
   if (!MacroNameTok.getIdentifierInfo()->getMacroInfo() == isIfndef) {
     // Yes, remember that we are inside a conditional, then lex the next token.
     CurLexer->pushConditionalLevel(DirectiveTok.getStart(), /*wasskip*/false,
                                    /*foundnonskip*/true, /*foundelse*/false);
-    return false;
   } else {
     // No, skip the contents of this block and return the first token after it.
-    return SkipExcludedConditionalBlock(DirectiveTok.getStart(),
-                                        /*Foundnonskip*/false, 
-                                        /*FoundElse*/false);
+    SkipExcludedConditionalBlock(DirectiveTok.getStart(),
+                                 /*Foundnonskip*/false, 
+                                 /*FoundElse*/false);
   }
 }
 
 /// HandleIfDirective - Implements the #if directive.
 ///
-bool Preprocessor::HandleIfDirective(LexerToken &IfToken) {
+void Preprocessor::HandleIfDirective(LexerToken &IfToken) {
   ++NumIf;
   const char *Start = CurLexer->BufferPtr;
 
   bool ConditionalTrue = false;
-  if (EvaluateDirectiveExpression(ConditionalTrue))
-    return true;
+  EvaluateDirectiveExpression(ConditionalTrue);
   
   // Should we include the stuff contained by this directive?
   if (ConditionalTrue) {
     // Yes, remember that we are inside a conditional, then lex the next token.
     CurLexer->pushConditionalLevel(IfToken.getStart(), /*wasskip*/false,
                                    /*foundnonskip*/true, /*foundelse*/false);
-    return false;
   } else {
     // No, skip the contents of this block and return the first token after it.
-    return SkipExcludedConditionalBlock(IfToken.getStart(),
-                                        /*Foundnonskip*/false, 
-                                        /*FoundElse*/false);
+    SkipExcludedConditionalBlock(IfToken.getStart(),
+                                 /*Foundnonskip*/false, 
+                                 /*FoundElse*/false);
   }
 }
 
 /// HandleEndifDirective - Implements the #endif directive.
 ///
-bool Preprocessor::HandleEndifDirective(LexerToken &EndifToken) {
+void Preprocessor::HandleEndifDirective(LexerToken &EndifToken) {
   ++NumEndif;
   // Check that this is the whole directive.
-  if (CheckEndOfDirective("#endif")) return true;
+  CheckEndOfDirective("#endif");
   
   PPConditionalInfo CondInfo;
   if (CurLexer->popConditionalLevel(CondInfo)) {
@@ -1076,22 +1042,20 @@
   
   assert(!CondInfo.WasSkipping && !isSkipping() &&
          "This code should only be reachable in the non-skipping case!");
-  return false;
 }
 
 
-bool Preprocessor::HandleElseDirective(LexerToken &Result) {
+void Preprocessor::HandleElseDirective(LexerToken &Result) {
   ++NumElse;
   // #else directive in a non-skipping conditional... start skipping.
-  if (CheckEndOfDirective("#else")) return true;
+  CheckEndOfDirective("#else");
   
   PPConditionalInfo CI;
   if (CurLexer->popConditionalLevel(CI))
     return Diag(Result, diag::pp_err_else_without_if);
 
   // If this is a #else with a #else before it, report the error.
-  if (CI.FoundElse && Diag(Result, diag::pp_err_else_after_else))
-    return true;
+  if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
   
   // Finally, skip the rest of the contents of this block and return the first
   // token after it.
@@ -1099,20 +1063,19 @@
                                       /*FoundElse*/true);
 }
 
-bool Preprocessor::HandleElifDirective(LexerToken &ElifToken) {
+void Preprocessor::HandleElifDirective(LexerToken &ElifToken) {
   ++NumElse;
   // #elif directive in a non-skipping conditional... start skipping.
   // We don't care what the condition is, because we will always skip it (since
   // the block immediately before it was included).
-  if (DiscardUntilEndOfDirective()) return true;
+  DiscardUntilEndOfDirective();
 
   PPConditionalInfo CI;
   if (CurLexer->popConditionalLevel(CI))
     return Diag(ElifToken, diag::pp_err_elif_without_if);
   
   // If this is a #elif with a #else before it, report the error.
-  if (CI.FoundElse && Diag(ElifToken, diag::pp_err_elif_after_else))
-    return true;
+  if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
 
   // Finally, skip the rest of the contents of this block and return the first
   // token after it.

Modified: cfe/cfe/trunk/include/clang/Basic/Diagnostic.h
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/include/clang/Basic/Diagnostic.h?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/include/clang/Basic/Diagnostic.h (original)
+++ cfe/cfe/trunk/include/clang/Basic/Diagnostic.h Wed Jul 11 11:22:20 2007
@@ -90,10 +90,9 @@
   /// the DiagnosticClient.
   Level getDiagnosticLevel(unsigned DiagID) const;
   
-  /// Report - Issue the message to the client. If the client wants us to stop
-  /// compilation, return true, otherwise return false.  DiagID is a member of
-  /// the diag::kind enum.  
-  bool Report(SourceLocation Pos, unsigned DiagID,
+  /// Report - Issue the message to the client.  DiagID is a member of the
+  /// diag::kind enum.  
+  void Report(SourceLocation Pos, unsigned DiagID,
               const std::string &Extra = "");
 };
 
@@ -105,9 +104,8 @@
   virtual ~DiagnosticClient();
   
   /// HandleDiagnostic - Handle this diagnostic, reporting it to the user or 
-  /// capturing it to a log as needed.  If this returns true, compilation will
-  /// be gracefully terminated, otherwise compilation will continue.
-  virtual bool HandleDiagnostic(Diagnostic::Level DiagLevel, SourceLocation Pos,
+  /// capturing it to a log as needed.
+  virtual void HandleDiagnostic(Diagnostic::Level DiagLevel, SourceLocation Pos,
                                 diag::kind ID, const std::string &Msg) = 0;
 };
 

Modified: cfe/cfe/trunk/include/clang/Lex/Lexer.h
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/include/clang/Lex/Lexer.h?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/include/clang/Lex/Lexer.h (original)
+++ cfe/cfe/trunk/include/clang/Lex/Lexer.h Wed Jul 11 11:22:20 2007
@@ -214,7 +214,7 @@
   /// return the tok::eof token.  Return true if an error occurred and
   /// compilation should terminate, false if normal.  This implicitly involves
   /// the preprocessor.
-  bool Lex(LexerToken &Result) {
+  void Lex(LexerToken &Result) {
     // Start a new token.
     Result.StartToken(this);
     
@@ -226,14 +226,9 @@
     }
    
     // Get a token.
-    return LexTokenInternal(Result);
+    LexTokenInternal(Result);
   }
   
-  /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
-  /// (potentially) macro expand the filename.  If the sequence parsed is not
-  /// lexically legal, emit a diagnostic and return a result EOM token.
-  bool LexIncludeFilename(LexerToken &Result);
-  
   /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
   /// uninterpreted string.  This switches the lexer out of directive mode.
   std::string ReadToEndOfLine();
@@ -264,7 +259,7 @@
   
   /// Diag - Forwarding function for diagnostics.  This translate a source
   /// position in the current buffer into a SourceLocation object for rendering.
-  bool Diag(const char *Loc, unsigned DiagID,
+  void Diag(const char *Loc, unsigned DiagID,
             const std::string &Msg = "") const;
 
   /// getSourceLocation - Return a source location identifier for the specified
@@ -278,7 +273,7 @@
   /// LexTokenInternal - Internal interface to lex a preprocessing token. Called
   /// by Lex.
   ///
-  bool LexTokenInternal(LexerToken &Result);
+  void LexTokenInternal(LexerToken &Result);
     
   
   //===--------------------------------------------------------------------===//
@@ -394,19 +389,25 @@
   // Other lexer functions.
   
   // Part of block comment parsing.
-  bool isBlockCommentEndOfEscapedNewLine(const char *CurPtr, char &PrevChar);
+  bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr);
   
   // Helper functions to lex the remainder of a token of the specific type.
-  bool LexIdentifier         (LexerToken &Result, const char *CurPtr);
-  bool LexNumericConstant    (LexerToken &Result, const char *CurPtr);
-  bool LexStringLiteral      (LexerToken &Result, const char *CurPtr);
-  bool LexAngledStringLiteral(LexerToken &Result, const char *CurPtr);
-  bool LexCharConstant       (LexerToken &Result, const char *CurPtr);
-  bool LexEndOfFile          (LexerToken &Result, const char *CurPtr);
-  
-  bool SkipWhitespace        (LexerToken &Result, const char *CurPtr);
-  bool SkipBCPLComment       (LexerToken &Result, const char *CurPtr);
-  bool SkipBlockComment      (LexerToken &Result, const char *CurPtr);
+  void LexIdentifier         (LexerToken &Result, const char *CurPtr);
+  void LexNumericConstant    (LexerToken &Result, const char *CurPtr);
+  void LexStringLiteral      (LexerToken &Result, const char *CurPtr);
+  void LexAngledStringLiteral(LexerToken &Result, const char *CurPtr);
+  void LexCharConstant       (LexerToken &Result, const char *CurPtr);
+  void LexEndOfFile          (LexerToken &Result, const char *CurPtr);
+  
+  void SkipWhitespace        (LexerToken &Result, const char *CurPtr);
+  void SkipBCPLComment       (LexerToken &Result, const char *CurPtr);
+  void SkipBlockComment      (LexerToken &Result, const char *CurPtr);
+  
+  
+  /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
+  /// (potentially) macro expand the filename.  If the sequence parsed is not
+  /// lexically legal, emit a diagnostic and return a result EOM token.
+  void LexIncludeFilename(LexerToken &Result);
 };
 
 

Modified: cfe/cfe/trunk/include/clang/Lex/MacroExpander.h
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/include/clang/Lex/MacroExpander.h?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/include/clang/Lex/MacroExpander.h (original)
+++ cfe/cfe/trunk/include/clang/Lex/MacroExpander.h Wed Jul 11 11:22:20 2007
@@ -53,8 +53,7 @@
   MacroInfo &getMacro() const { return Macro; }
 
   /// Lex - Lex and return a token from this macro stream.
-  bool Lex(LexerToken &Tok);
-  
+  void Lex(LexerToken &Tok);
 };
   
 }  // end namespace llvm

Modified: cfe/cfe/trunk/include/clang/Lex/Preprocessor.h
URL: http://llvm.org/viewvc/llvm-project/cfe/cfe/trunk/include/clang/Lex/Preprocessor.h?rev=38540&r1=38539&r2=38540&view=diff

==============================================================================
--- cfe/cfe/trunk/include/clang/Lex/Preprocessor.h (original)
+++ cfe/cfe/trunk/include/clang/Lex/Preprocessor.h Wed Jul 11 11:22:20 2007
@@ -241,38 +241,37 @@
   void EnterSourceFile(unsigned CurFileID, const DirectoryLookup *Dir);
 
   /// EnterMacro - Add a Macro to the top of the include stack and start lexing
-  /// tokens from it instead of the current buffer.  Return true on failure.
-  bool EnterMacro(LexerToken &Identifier);
+  /// tokens from it instead of the current buffer.
+  void EnterMacro(LexerToken &Identifier);
   
   
   /// Lex - To lex a token from the preprocessor, just pull a token from the
   /// current lexer or macro object.
-  bool Lex(LexerToken &Result) {
+  void Lex(LexerToken &Result) {
     if (CurLexer)
-      return CurLexer->Lex(Result);
+      CurLexer->Lex(Result);
     else
-      return CurMacroExpander->Lex(Result);
+      CurMacroExpander->Lex(Result);
   }
   
   /// LexUnexpandedToken - This is just like Lex, but this disables macro
   /// expansion of identifier tokens.
-  bool LexUnexpandedToken(LexerToken &Result) {
+  void LexUnexpandedToken(LexerToken &Result) {
     // Disable macro expansion.
     bool OldVal = DisableMacroExpansion;
     DisableMacroExpansion = true;
     // Lex the token.
-    bool ResVal = Lex(Result);
+    Lex(Result);
     
     // Reenable it.
     DisableMacroExpansion = OldVal;
-    return ResVal;
   }
   
   /// Diag - Forwarding function for diagnostics.  This emits a diagnostic at
   /// the specified LexerToken's location, translating the token's start
   /// position in the current buffer into a SourcePosition object for rendering.
-  bool Diag(const LexerToken &Tok, unsigned DiagID, const std::string &Msg="");  
-  bool Diag(SourceLocation Loc, unsigned DiagID, const std::string &Msg="");  
+  void Diag(const LexerToken &Tok, unsigned DiagID, const std::string &Msg="");  
+  void Diag(SourceLocation Loc, unsigned DiagID, const std::string &Msg="");  
   
   void PrintStats();
 
@@ -284,23 +283,22 @@
   /// identifier and has filled in the tokens IdentifierInfo member.  This
   /// callback potentially macro expands it or turns it into a named token (like
   /// 'for').
-  bool HandleIdentifier(LexerToken &Identifier);
+  void HandleIdentifier(LexerToken &Identifier);
 
   /// HandleEndOfFile - This callback is invoked when the lexer hits the end of
   /// the current file.  This either returns the EOF token or pops a level off
   /// the include stack and keeps going.
-  bool HandleEndOfFile(LexerToken &Result);
+  void HandleEndOfFile(LexerToken &Result);
   
   /// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
-  /// the current macro.  This either returns the EOF token or pops a level off
-  /// the include stack and keeps going.
-  bool HandleEndOfMacro(LexerToken &Result);
+  /// the current macro line.
+  void HandleEndOfMacro(LexerToken &Result);
   
   /// HandleDirective - This callback is invoked when the lexer sees a # token
   /// at the start of a line.  This consumes the directive, modifies the 
   /// lexer/preprocessor state, and advances the lexer(s) so that the next token
   /// read is the correct one.
-  bool HandleDirective(LexerToken &Result);
+  void HandleDirective(LexerToken &Result);
 
 private:
   /// getFileInfo - Return the PerFileInfo structure for the specified
@@ -309,16 +307,16 @@
 
   /// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
   /// current line until the tok::eom token is found.
-  bool DiscardUntilEndOfDirective();
+  void DiscardUntilEndOfDirective();
 
   /// ReadMacroName - Lex and validate a macro name, which occurs after a
   /// #define or #undef.  This emits a diagnostic, sets the token kind to eom,
   /// and discards the rest of the macro line if the macro name is invalid.
-  bool ReadMacroName(LexerToken &MacroNameTok);
+  void ReadMacroName(LexerToken &MacroNameTok);
   
   /// CheckEndOfDirective - Ensure that the next token is a tok::eom token.  If
   /// not, emit a diagnostic and consume up until the eom.
-  bool CheckEndOfDirective(const char *Directive);
+  void CheckEndOfDirective(const char *Directive);
   
   /// SkipExcludedConditionalBlock - We just read a #if or related directive and
   /// decided that the subsequent tokens are in the #if'd out portion of the
@@ -328,46 +326,45 @@
   /// FoundElse is false, then #else directives are ok, if not, then we have
   /// already seen one so a #else directive is a duplicate.  When this returns,
   /// the caller can lex the first valid token.
-  bool SkipExcludedConditionalBlock(const char *IfTokenLoc,
+  void SkipExcludedConditionalBlock(const char *IfTokenLoc,
                                     bool FoundNonSkipPortion, bool FoundElse);
   
   /// EvaluateDirectiveExpression - Evaluate an integer constant expression that
   /// may occur after a #if or #elif directive.  Sets Result to the result of
-  /// the expression.  Returns false normally, true if lexing must be aborted.
-  bool EvaluateDirectiveExpression(bool &Result);
+  /// the expression.
+  void EvaluateDirectiveExpression(bool &Result);
   /// EvaluateValue - Used to implement EvaluateDirectiveExpression,
   /// see PPExpressions.cpp.
-  bool EvaluateValue(int &Result, LexerToken &PeekTok, bool &StopParse);
+  bool EvaluateValue(int &Result, LexerToken &PeekTok);
   /// EvaluateDirectiveSubExpr - Used to implement EvaluateDirectiveExpression,
   /// see PPExpressions.cpp.
   bool EvaluateDirectiveSubExpr(int &LHS, unsigned MinPrec,
-                                LexerToken &PeekTok, bool &StopParse);
+                                LexerToken &PeekTok);
   
   //===--------------------------------------------------------------------===//
   /// Handle*Directive - implement the various preprocessor directives.  These
   /// should side-effect the current preprocessor object so that the next call
-  /// to Lex() will return the appropriate token next.  If a fatal error occurs
-  /// return true, otherwise return false.
+  /// to Lex() will return the appropriate token next.
   
-  bool HandleUserDiagnosticDirective(LexerToken &Result, bool isWarning);
+  void HandleUserDiagnosticDirective(LexerToken &Result, bool isWarning);
   
   // File inclusion.
-  bool HandleIncludeDirective(LexerToken &Result,
+  void HandleIncludeDirective(LexerToken &Result,
                               const DirectoryLookup *LookupFrom = 0,
                               bool isImport = false);
-  bool HandleIncludeNextDirective(LexerToken &Result);
-  bool HandleImportDirective(LexerToken &Result);
+  void HandleIncludeNextDirective(LexerToken &Result);
+  void HandleImportDirective(LexerToken &Result);
   
   // Macro handling.
-  bool HandleDefineDirective(LexerToken &Result);
-  bool HandleUndefDirective(LexerToken &Result);
+  void HandleDefineDirective(LexerToken &Result);
+  void HandleUndefDirective(LexerToken &Result);
   
   // Conditional Inclusion.
-  bool HandleIfdefDirective(LexerToken &Result, bool isIfndef);
-  bool HandleIfDirective(LexerToken &Result);
-  bool HandleEndifDirective(LexerToken &Result);
-  bool HandleElseDirective(LexerToken &Result);
-  bool HandleElifDirective(LexerToken &Result);
+  void HandleIfdefDirective(LexerToken &Result, bool isIfndef);
+  void HandleIfDirective(LexerToken &Result);
+  void HandleEndifDirective(LexerToken &Result);
+  void HandleElseDirective(LexerToken &Result);
+  void HandleElifDirective(LexerToken &Result);
 };
 
 }  // end namespace clang





More information about the cfe-commits mailing list