1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implement the Lexer for TableGen.
11 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/StringSwitch.h"
16 #include "llvm/ADT/Twine.h"
17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define
18 #include "llvm/Support/Compiler.h"
19 #include "llvm/Support/MemoryBuffer.h"
20 #include "llvm/Support/SourceMgr.h"
21 #include "llvm/TableGen/Error.h"
33 // A list of supported preprocessing directives with their
34 // internal token kinds and names.
38 } PreprocessorDirs
[] = {
39 { tgtok::Ifdef
, "ifdef" },
40 { tgtok::Ifndef
, "ifndef" },
41 { tgtok::Else
, "else" },
42 { tgtok::Endif
, "endif" },
43 { tgtok::Define
, "define" }
45 } // end anonymous namespace
47 TGLexer::TGLexer(SourceMgr
&SM
, ArrayRef
<std::string
> Macros
) : SrcMgr(SM
) {
48 CurBuffer
= SrcMgr
.getMainFileID();
49 CurBuf
= SrcMgr
.getMemoryBuffer(CurBuffer
)->getBuffer();
50 CurPtr
= CurBuf
.begin();
53 // Pretend that we enter the "top-level" include file.
54 PrepIncludeStack
.push_back(
55 std::make_unique
<std::vector
<PreprocessorControlDesc
>>());
57 // Put all macros defined in the command line into the DefinedMacros set.
58 for (const std::string
&MacroName
: Macros
)
59 DefinedMacros
.insert(MacroName
);
62 SMLoc
TGLexer::getLoc() const {
63 return SMLoc::getFromPointer(TokStart
);
66 SMRange
TGLexer::getLocRange() const {
67 return {getLoc(), SMLoc::getFromPointer(CurPtr
)};
70 /// ReturnError - Set the error to the specified string at the specified
71 /// location. This is defined to always return tgtok::Error.
72 tgtok::TokKind
TGLexer::ReturnError(SMLoc Loc
, const Twine
&Msg
) {
77 tgtok::TokKind
TGLexer::ReturnError(const char *Loc
, const Twine
&Msg
) {
78 return ReturnError(SMLoc::getFromPointer(Loc
), Msg
);
81 bool TGLexer::processEOF() {
82 SMLoc ParentIncludeLoc
= SrcMgr
.getParentIncludeLoc(CurBuffer
);
83 if (ParentIncludeLoc
!= SMLoc()) {
84 // If prepExitInclude() detects a problem with the preprocessing
85 // control stack, it will return false. Pretend that we reached
86 // the final EOF and stop lexing more tokens by returning false
88 if (!prepExitInclude(false))
91 CurBuffer
= SrcMgr
.FindBufferContainingLoc(ParentIncludeLoc
);
92 CurBuf
= SrcMgr
.getMemoryBuffer(CurBuffer
)->getBuffer();
93 CurPtr
= ParentIncludeLoc
.getPointer();
94 // Make sure TokStart points into the parent file's buffer.
95 // LexToken() assigns to it before calling getNextChar(),
96 // so it is pointing into the included file now.
101 // Pretend that we exit the "top-level" include file.
102 // Note that in case of an error (e.g. control stack imbalance)
103 // the routine will issue a fatal error.
104 prepExitInclude(true);
108 int TGLexer::getNextChar() {
109 char CurChar
= *CurPtr
++;
112 return (unsigned char)CurChar
;
115 // A NUL character in the stream is either the end of the current buffer or
116 // a spurious NUL in the file. Disambiguate that here.
117 if (CurPtr
- 1 == CurBuf
.end()) {
118 --CurPtr
; // Arrange for another call to return EOF again.
122 "NUL character is invalid in source; treated as space");
128 // Handle the newline character by ignoring it and incrementing the line
129 // count. However, be careful about 'dos style' files with \n\r in them.
130 // Only treat a \n\r or \r\n as a single line.
131 if ((*CurPtr
== '\n' || (*CurPtr
== '\r')) &&
133 ++CurPtr
; // Eat the two char newline sequence.
138 int TGLexer::peekNextChar(int Index
) const {
139 return *(CurPtr
+ Index
);
142 tgtok::TokKind
TGLexer::LexToken(bool FileOrLineStart
) {
144 // This always consumes at least one character.
145 int CurChar
= getNextChar();
149 // Handle letters: [a-zA-Z_]
150 if (isalpha(CurChar
) || CurChar
== '_')
151 return LexIdentifier();
153 // Unknown character, emit an error.
154 return ReturnError(TokStart
, "Unexpected character");
156 // Lex next token, if we just left an include file.
157 // Note that leaving an include file means that the next
158 // symbol is located at the end of the 'include "..."'
159 // construct, so LexToken() is called with default
164 // Return EOF denoting the end of lexing.
167 case ':': return tgtok::colon
;
168 case ';': return tgtok::semi
;
169 case ',': return tgtok::comma
;
170 case '<': return tgtok::less
;
171 case '>': return tgtok::greater
;
172 case ']': return tgtok::r_square
;
173 case '{': return tgtok::l_brace
;
174 case '}': return tgtok::r_brace
;
175 case '(': return tgtok::l_paren
;
176 case ')': return tgtok::r_paren
;
177 case '=': return tgtok::equal
;
178 case '?': return tgtok::question
;
180 if (FileOrLineStart
) {
181 tgtok::TokKind Kind
= prepIsDirective();
182 if (Kind
!= tgtok::Error
)
183 return lexPreprocessor(Kind
);
188 // The period is a separate case so we can recognize the "..."
191 if (peekNextChar(0) == '.') {
192 ++CurPtr
; // Eat second dot.
193 if (peekNextChar(0) == '.') {
194 ++CurPtr
; // Eat third dot.
195 return tgtok::dotdotdot
;
197 return ReturnError(TokStart
, "Invalid '..' punctuation");
202 PrintFatalError("getNextChar() must never return '\r'");
207 // Ignore whitespace.
208 return LexToken(FileOrLineStart
);
210 // Ignore whitespace, and identify the new line.
211 return LexToken(true);
213 // If this is the start of a // comment, skip until the end of the line or
214 // the end of the buffer.
217 else if (*CurPtr
== '*') {
220 } else // Otherwise, this is an error.
221 return ReturnError(TokStart
, "Unexpected character");
222 return LexToken(FileOrLineStart
);
224 case '0': case '1': case '2': case '3': case '4': case '5': case '6':
225 case '7': case '8': case '9': {
227 if (isdigit(CurChar
)) {
228 // Allow identifiers to start with a number if it is followed by
229 // an identifier. This can happen with paste operations like
233 NextChar
= peekNextChar(i
++);
234 } while (isdigit(NextChar
));
236 if (NextChar
== 'x' || NextChar
== 'b') {
237 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most
239 int NextNextChar
= peekNextChar(i
);
240 switch (NextNextChar
) {
247 case '2': case '3': case '4': case '5':
248 case '6': case '7': case '8': case '9':
249 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
250 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
258 if (isalpha(NextChar
) || NextChar
== '_')
259 return LexIdentifier();
263 case '"': return LexString();
264 case '$': return LexVarName();
265 case '[': return LexBracket();
266 case '!': return LexExclaim();
270 /// LexString - Lex "[^"]*"
271 tgtok::TokKind
TGLexer::LexString() {
272 const char *StrStart
= CurPtr
;
276 while (*CurPtr
!= '"') {
277 // If we hit the end of the buffer, report an error.
278 if (*CurPtr
== 0 && CurPtr
== CurBuf
.end())
279 return ReturnError(StrStart
, "End of file in string literal");
281 if (*CurPtr
== '\n' || *CurPtr
== '\r')
282 return ReturnError(StrStart
, "End of line in string literal");
284 if (*CurPtr
!= '\\') {
285 CurStrVal
+= *CurPtr
++;
292 case '\\': case '\'': case '"':
293 // These turn into their literal character.
294 CurStrVal
+= *CurPtr
++;
307 return ReturnError(CurPtr
, "escaped newlines not supported in tblgen");
309 // If we hit the end of the buffer, report an error.
311 if (CurPtr
== CurBuf
.end())
312 return ReturnError(StrStart
, "End of file in string literal");
315 return ReturnError(CurPtr
, "invalid escape in string literal");
320 return tgtok::StrVal
;
323 tgtok::TokKind
TGLexer::LexVarName() {
324 if (!isalpha(CurPtr
[0]) && CurPtr
[0] != '_')
325 return ReturnError(TokStart
, "Invalid variable name");
327 // Otherwise, we're ok, consume the rest of the characters.
328 const char *VarNameStart
= CurPtr
++;
330 while (isalpha(*CurPtr
) || isdigit(*CurPtr
) || *CurPtr
== '_')
333 CurStrVal
.assign(VarNameStart
, CurPtr
);
334 return tgtok::VarName
;
337 tgtok::TokKind
TGLexer::LexIdentifier() {
338 // The first letter is [a-zA-Z_].
339 const char *IdentStart
= TokStart
;
341 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
342 while (isalpha(*CurPtr
) || isdigit(*CurPtr
) || *CurPtr
== '_')
345 // Check to see if this identifier is a reserved keyword.
346 StringRef
Str(IdentStart
, CurPtr
-IdentStart
);
348 tgtok::TokKind Kind
= StringSwitch
<tgtok::TokKind
>(Str
)
349 .Case("int", tgtok::Int
)
350 .Case("bit", tgtok::Bit
)
351 .Case("bits", tgtok::Bits
)
352 .Case("string", tgtok::String
)
353 .Case("list", tgtok::List
)
354 .Case("code", tgtok::Code
)
355 .Case("dag", tgtok::Dag
)
356 .Case("class", tgtok::Class
)
357 .Case("def", tgtok::Def
)
358 .Case("true", tgtok::TrueVal
)
359 .Case("false", tgtok::FalseVal
)
360 .Case("foreach", tgtok::Foreach
)
361 .Case("defm", tgtok::Defm
)
362 .Case("defset", tgtok::Defset
)
363 .Case("multiclass", tgtok::MultiClass
)
364 .Case("field", tgtok::Field
)
365 .Case("let", tgtok::Let
)
366 .Case("in", tgtok::In
)
367 .Case("defvar", tgtok::Defvar
)
368 .Case("include", tgtok::Include
)
369 .Case("if", tgtok::If
)
370 .Case("then", tgtok::Then
)
371 .Case("else", tgtok::ElseKW
)
372 .Case("assert", tgtok::Assert
)
375 // A couple of tokens require special processing.
378 if (LexInclude()) return tgtok::Error
;
381 CurStrVal
.assign(Str
.begin(), Str
.end());
390 /// LexInclude - We just read the "include" token. Get the string token that
391 /// comes next and enter the include.
392 bool TGLexer::LexInclude() {
393 // The token after the include must be a string.
394 tgtok::TokKind Tok
= LexToken();
395 if (Tok
== tgtok::Error
) return true;
396 if (Tok
!= tgtok::StrVal
) {
397 PrintError(getLoc(), "Expected filename after include");
402 std::string Filename
= CurStrVal
;
403 std::string IncludedFile
;
405 CurBuffer
= SrcMgr
.AddIncludeFile(Filename
, SMLoc::getFromPointer(CurPtr
),
408 PrintError(getLoc(), "Could not find include file '" + Filename
+ "'");
412 Dependencies
.insert(IncludedFile
);
413 // Save the line number and lex buffer of the includer.
414 CurBuf
= SrcMgr
.getMemoryBuffer(CurBuffer
)->getBuffer();
415 CurPtr
= CurBuf
.begin();
417 PrepIncludeStack
.push_back(
418 std::make_unique
<std::vector
<PreprocessorControlDesc
>>());
422 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF.
423 /// Or we may end up at the end of the buffer.
424 void TGLexer::SkipBCPLComment() {
425 ++CurPtr
; // skip the second slash.
426 auto EOLPos
= CurBuf
.find_first_of("\r\n", CurPtr
- CurBuf
.data());
427 CurPtr
= (EOLPos
== StringRef::npos
) ? CurBuf
.end() : CurBuf
.data() + EOLPos
;
430 /// SkipCComment - This skips C-style /**/ comments. The only difference from C
431 /// is that we allow nesting.
432 bool TGLexer::SkipCComment() {
433 ++CurPtr
; // skip the star.
434 unsigned CommentDepth
= 1;
437 int CurChar
= getNextChar();
440 PrintError(TokStart
, "Unterminated comment!");
443 // End of the comment?
444 if (CurPtr
[0] != '/') break;
446 ++CurPtr
; // End the */.
447 if (--CommentDepth
== 0)
451 // Start of a nested comment?
452 if (CurPtr
[0] != '*') break;
464 tgtok::TokKind
TGLexer::LexNumber() {
465 if (CurPtr
[-1] == '0') {
466 if (CurPtr
[0] == 'x') {
468 const char *NumStart
= CurPtr
;
469 while (isxdigit(CurPtr
[0]))
472 // Requires at least one hex digit.
473 if (CurPtr
== NumStart
)
474 return ReturnError(TokStart
, "Invalid hexadecimal number");
477 CurIntVal
= strtoll(NumStart
, nullptr, 16);
479 return ReturnError(TokStart
, "Invalid hexadecimal number");
480 if (errno
== ERANGE
) {
482 CurIntVal
= (int64_t)strtoull(NumStart
, nullptr, 16);
484 return ReturnError(TokStart
, "Invalid hexadecimal number");
486 return ReturnError(TokStart
, "Hexadecimal number out of range");
488 return tgtok::IntVal
;
489 } else if (CurPtr
[0] == 'b') {
491 const char *NumStart
= CurPtr
;
492 while (CurPtr
[0] == '0' || CurPtr
[0] == '1')
495 // Requires at least one binary digit.
496 if (CurPtr
== NumStart
)
497 return ReturnError(CurPtr
-2, "Invalid binary number");
498 CurIntVal
= strtoll(NumStart
, nullptr, 2);
499 return tgtok::BinaryIntVal
;
503 // Check for a sign without a digit.
504 if (!isdigit(CurPtr
[0])) {
505 if (CurPtr
[-1] == '-')
507 else if (CurPtr
[-1] == '+')
511 while (isdigit(CurPtr
[0]))
513 CurIntVal
= strtoll(TokStart
, nullptr, 10);
514 return tgtok::IntVal
;
517 /// LexBracket - We just read '['. If this is a code block, return it,
518 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
519 tgtok::TokKind
TGLexer::LexBracket() {
520 if (CurPtr
[0] != '{')
521 return tgtok::l_square
;
523 const char *CodeStart
= CurPtr
;
525 int Char
= getNextChar();
526 if (Char
== EOF
) break;
528 if (Char
!= '}') continue;
530 Char
= getNextChar();
531 if (Char
== EOF
) break;
533 CurStrVal
.assign(CodeStart
, CurPtr
-2);
534 return tgtok::CodeFragment
;
538 return ReturnError(CodeStart
- 2, "Unterminated code block");
541 /// LexExclaim - Lex '!' and '![a-zA-Z]+'.
542 tgtok::TokKind
TGLexer::LexExclaim() {
543 if (!isalpha(*CurPtr
))
544 return ReturnError(CurPtr
- 1, "Invalid \"!operator\"");
546 const char *Start
= CurPtr
++;
547 while (isalpha(*CurPtr
))
550 // Check to see which operator this is.
551 tgtok::TokKind Kind
=
552 StringSwitch
<tgtok::TokKind
>(StringRef(Start
, CurPtr
- Start
))
553 .Case("eq", tgtok::XEq
)
554 .Case("ne", tgtok::XNe
)
555 .Case("le", tgtok::XLe
)
556 .Case("lt", tgtok::XLt
)
557 .Case("ge", tgtok::XGe
)
558 .Case("gt", tgtok::XGt
)
559 .Case("if", tgtok::XIf
)
560 .Case("cond", tgtok::XCond
)
561 .Case("isa", tgtok::XIsA
)
562 .Case("head", tgtok::XHead
)
563 .Case("tail", tgtok::XTail
)
564 .Case("size", tgtok::XSize
)
565 .Case("con", tgtok::XConcat
)
566 .Case("dag", tgtok::XDag
)
567 .Case("add", tgtok::XADD
)
568 .Case("sub", tgtok::XSUB
)
569 .Case("mul", tgtok::XMUL
)
570 .Case("div", tgtok::XDIV
)
571 .Case("not", tgtok::XNOT
)
572 .Case("logtwo", tgtok::XLOG2
)
573 .Case("and", tgtok::XAND
)
574 .Case("or", tgtok::XOR
)
575 .Case("xor", tgtok::XXOR
)
576 .Case("shl", tgtok::XSHL
)
577 .Case("sra", tgtok::XSRA
)
578 .Case("srl", tgtok::XSRL
)
579 .Case("cast", tgtok::XCast
)
580 .Case("empty", tgtok::XEmpty
)
581 .Case("subst", tgtok::XSubst
)
582 .Case("foldl", tgtok::XFoldl
)
583 .Case("foreach", tgtok::XForEach
)
584 .Case("filter", tgtok::XFilter
)
585 .Case("listconcat", tgtok::XListConcat
)
586 .Case("listsplat", tgtok::XListSplat
)
587 .Case("listremove", tgtok::XListRemove
)
588 .Case("range", tgtok::XRange
)
589 .Case("strconcat", tgtok::XStrConcat
)
590 .Case("interleave", tgtok::XInterleave
)
591 .Case("substr", tgtok::XSubstr
)
592 .Case("find", tgtok::XFind
)
593 .Cases("setdagop", "setop", tgtok::XSetDagOp
) // !setop is deprecated.
594 .Cases("getdagop", "getop", tgtok::XGetDagOp
) // !getop is deprecated.
595 .Case("getdagarg", tgtok::XGetDagArg
)
596 .Case("getdagname", tgtok::XGetDagName
)
597 .Case("setdagarg", tgtok::XSetDagArg
)
598 .Case("setdagname", tgtok::XSetDagName
)
599 .Case("exists", tgtok::XExists
)
600 .Case("tolower", tgtok::XToLower
)
601 .Case("toupper", tgtok::XToUpper
)
602 .Default(tgtok::Error
);
604 return Kind
!= tgtok::Error
? Kind
: ReturnError(Start
-1, "Unknown operator");
607 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty
) {
608 // Report an error, if preprocessor control stack for the current
609 // file is not empty.
610 if (!PrepIncludeStack
.back()->empty()) {
611 prepReportPreprocessorStackError();
616 // Pop the preprocessing controls from the include stack.
617 if (PrepIncludeStack
.empty()) {
618 PrintFatalError("Preprocessor include stack is empty");
621 PrepIncludeStack
.pop_back();
623 if (IncludeStackMustBeEmpty
) {
624 if (!PrepIncludeStack
.empty())
625 PrintFatalError("Preprocessor include stack is not empty");
627 if (PrepIncludeStack
.empty())
628 PrintFatalError("Preprocessor include stack is empty");
634 tgtok::TokKind
TGLexer::prepIsDirective() const {
635 for (const auto &PD
: PreprocessorDirs
) {
636 int NextChar
= *CurPtr
;
639 for (; I
< strlen(PD
.Word
); ++I
) {
640 if (NextChar
!= PD
.Word
[I
]) {
645 NextChar
= peekNextChar(I
+ 1);
648 // Check for whitespace after the directive. If there is no whitespace,
649 // then we do not recognize it as a preprocessing directive.
651 tgtok::TokKind Kind
= PD
.Kind
;
653 // New line and EOF may follow only #else/#endif. It will be reported
654 // as an error for #ifdef/#define after the call to prepLexMacroName().
655 if (NextChar
== ' ' || NextChar
== '\t' || NextChar
== EOF
||
657 // It looks like TableGen does not support '\r' as the actual
658 // carriage return, e.g. getNextChar() treats a single '\r'
659 // as '\n'. So we do the same here.
663 // Allow comments after some directives, e.g.:
664 // #else// OR #else/**/
665 // #endif// OR #endif/**/
667 // Note that we do allow comments after #ifdef/#define here, e.g.
668 // #ifdef/**/ AND #ifdef//
669 // #define/**/ AND #define//
671 // These cases will be reported as incorrect after calling
672 // prepLexMacroName(). We could have supported C-style comments
673 // after #ifdef/#define, but this would complicate the code
674 // for little benefit.
675 if (NextChar
== '/') {
676 NextChar
= peekNextChar(I
+ 1);
678 if (NextChar
== '*' || NextChar
== '/')
681 // Pretend that we do not recognize the directive.
689 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind
) {
692 for (const auto &PD
: PreprocessorDirs
)
693 if (PD
.Kind
== Kind
) {
694 // Advance CurPtr to the end of the preprocessing word.
695 CurPtr
+= strlen(PD
.Word
);
699 PrintFatalError("Unsupported preprocessing token in "
700 "prepEatPreprocessorDirective()");
704 tgtok::TokKind
TGLexer::lexPreprocessor(
705 tgtok::TokKind Kind
, bool ReturnNextLiveToken
) {
707 // We must be looking at a preprocessing directive. Eat it!
708 if (!prepEatPreprocessorDirective(Kind
))
709 PrintFatalError("lexPreprocessor() called for unknown "
710 "preprocessor directive");
712 if (Kind
== tgtok::Ifdef
|| Kind
== tgtok::Ifndef
) {
713 StringRef MacroName
= prepLexMacroName();
714 StringRef IfTokName
= Kind
== tgtok::Ifdef
? "#ifdef" : "#ifndef";
715 if (MacroName
.empty())
716 return ReturnError(TokStart
, "Expected macro name after " + IfTokName
);
718 bool MacroIsDefined
= DefinedMacros
.count(MacroName
) != 0;
720 // Canonicalize ifndef to ifdef equivalent
721 if (Kind
== tgtok::Ifndef
) {
722 MacroIsDefined
= !MacroIsDefined
;
726 // Regardless of whether we are processing tokens or not,
727 // we put the #ifdef control on stack.
728 PrepIncludeStack
.back()->push_back(
729 {Kind
, MacroIsDefined
, SMLoc::getFromPointer(TokStart
)});
731 if (!prepSkipDirectiveEnd())
732 return ReturnError(CurPtr
, "Only comments are supported after " +
733 IfTokName
+ " NAME");
735 // If we were not processing tokens before this #ifdef,
736 // then just return back to the lines skipping code.
737 if (!ReturnNextLiveToken
)
740 // If we were processing tokens before this #ifdef,
741 // and the macro is defined, then just return the next token.
745 // We were processing tokens before this #ifdef, and the macro
746 // is not defined, so we have to start skipping the lines.
747 // If the skipping is successful, it will return the token following
748 // either #else or #endif corresponding to this #ifdef.
749 if (prepSkipRegion(ReturnNextLiveToken
))
753 } else if (Kind
== tgtok::Else
) {
754 // Check if this #else is correct before calling prepSkipDirectiveEnd(),
755 // which will move CurPtr away from the beginning of #else.
756 if (PrepIncludeStack
.back()->empty())
757 return ReturnError(TokStart
, "#else without #ifdef or #ifndef");
759 PreprocessorControlDesc IfdefEntry
= PrepIncludeStack
.back()->back();
761 if (IfdefEntry
.Kind
!= tgtok::Ifdef
) {
762 PrintError(TokStart
, "double #else");
763 return ReturnError(IfdefEntry
.SrcPos
, "Previous #else is here");
766 // Replace the corresponding #ifdef's control with its negation
767 // on the control stack.
768 PrepIncludeStack
.back()->pop_back();
769 PrepIncludeStack
.back()->push_back(
770 {Kind
, !IfdefEntry
.IsDefined
, SMLoc::getFromPointer(TokStart
)});
772 if (!prepSkipDirectiveEnd())
773 return ReturnError(CurPtr
, "Only comments are supported after #else");
775 // If we were processing tokens before this #else,
776 // we have to start skipping lines until the matching #endif.
777 if (ReturnNextLiveToken
) {
778 if (prepSkipRegion(ReturnNextLiveToken
))
784 // Return to the lines skipping code.
786 } else if (Kind
== tgtok::Endif
) {
787 // Check if this #endif is correct before calling prepSkipDirectiveEnd(),
788 // which will move CurPtr away from the beginning of #endif.
789 if (PrepIncludeStack
.back()->empty())
790 return ReturnError(TokStart
, "#endif without #ifdef");
792 auto &IfdefOrElseEntry
= PrepIncludeStack
.back()->back();
794 if (IfdefOrElseEntry
.Kind
!= tgtok::Ifdef
&&
795 IfdefOrElseEntry
.Kind
!= tgtok::Else
) {
796 PrintFatalError("Invalid preprocessor control on the stack");
800 if (!prepSkipDirectiveEnd())
801 return ReturnError(CurPtr
, "Only comments are supported after #endif");
803 PrepIncludeStack
.back()->pop_back();
805 // If we were processing tokens before this #endif, then
806 // we should continue it.
807 if (ReturnNextLiveToken
) {
811 // Return to the lines skipping code.
813 } else if (Kind
== tgtok::Define
) {
814 StringRef MacroName
= prepLexMacroName();
815 if (MacroName
.empty())
816 return ReturnError(TokStart
, "Expected macro name after #define");
818 if (!DefinedMacros
.insert(MacroName
).second
)
819 PrintWarning(getLoc(),
820 "Duplicate definition of macro: " + Twine(MacroName
));
822 if (!prepSkipDirectiveEnd())
823 return ReturnError(CurPtr
,
824 "Only comments are supported after #define NAME");
826 if (!ReturnNextLiveToken
) {
827 PrintFatalError("#define must be ignored during the lines skipping");
834 PrintFatalError("Preprocessing directive is not supported");
838 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse
) {
839 if (!MustNeverBeFalse
)
840 PrintFatalError("Invalid recursion.");
843 // Skip all symbols to the line end.
846 // Find the first non-whitespace symbol in the next line(s).
847 if (!prepSkipLineBegin())
850 // If the first non-blank/comment symbol on the line is '#',
851 // it may be a start of preprocessing directive.
853 // If it is not '#' just go to the next line.
859 tgtok::TokKind Kind
= prepIsDirective();
861 // If we did not find a preprocessing directive or it is #define,
862 // then just skip to the next line. We do not have to do anything
863 // for #define in the line-skipping mode.
864 if (Kind
== tgtok::Error
|| Kind
== tgtok::Define
)
867 tgtok::TokKind ProcessedKind
= lexPreprocessor(Kind
, false);
869 // If lexPreprocessor() encountered an error during lexing this
870 // preprocessor idiom, then return false to the calling lexPreprocessor().
871 // This will force tgtok::Error to be returned to the tokens processing.
872 if (ProcessedKind
== tgtok::Error
)
875 if (Kind
!= ProcessedKind
)
876 PrintFatalError("prepIsDirective() and lexPreprocessor() "
877 "returned different token kinds");
879 // If this preprocessing directive enables tokens processing,
880 // then return to the lexPreprocessor() and get to the next token.
881 // We can move from line-skipping mode to processing tokens only
882 // due to #else or #endif.
883 if (prepIsProcessingEnabled()) {
884 if (Kind
!= tgtok::Else
&& Kind
!= tgtok::Endif
) {
885 PrintFatalError("Tokens processing was enabled by an unexpected "
886 "preprocessing directive");
892 } while (CurPtr
!= CurBuf
.end());
894 // We have reached the end of the file, but never left the lines-skipping
895 // mode. This means there is no matching #endif.
896 prepReportPreprocessorStackError();
900 StringRef
TGLexer::prepLexMacroName() {
901 // Skip whitespaces between the preprocessing directive and the macro name.
902 while (*CurPtr
== ' ' || *CurPtr
== '\t')
906 // Macro names start with [a-zA-Z_].
907 if (*CurPtr
!= '_' && !isalpha(*CurPtr
))
910 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
911 while (isalpha(*CurPtr
) || isdigit(*CurPtr
) || *CurPtr
== '_')
914 return StringRef(TokStart
, CurPtr
- TokStart
);
917 bool TGLexer::prepSkipLineBegin() {
918 while (CurPtr
!= CurBuf
.end()) {
927 int NextChar
= peekNextChar(1);
928 if (NextChar
== '*') {
929 // Skip C-style comment.
930 // Note that we do not care about skipping the C++-style comments.
931 // If the line contains "//", it may not contain any processable
932 // preprocessing directive. Just return CurPtr pointing to
933 // the first '/' in this case. We also do not care about
934 // incorrect symbols after the first '/' - we are in lines-skipping
935 // mode, so incorrect code is allowed to some extent.
937 // Set TokStart to the beginning of the comment to enable proper
938 // diagnostic printing in case of error in SkipCComment().
941 // CurPtr must point to '*' before call to SkipCComment().
946 // CurPtr points to the non-whitespace '/'.
950 // We must not increment CurPtr after the comment was lexed.
961 // We have reached the end of the file. Return to the lines skipping
962 // code, and allow it to handle the EOF as needed.
966 bool TGLexer::prepSkipDirectiveEnd() {
967 while (CurPtr
!= CurBuf
.end()) {
978 int NextChar
= peekNextChar(1);
979 if (NextChar
== '/') {
980 // Skip C++-style comment.
981 // We may just return true now, but let's skip to the line/buffer end
982 // to simplify the method specification.
985 } else if (NextChar
== '*') {
986 // When we are skipping C-style comment at the end of a preprocessing
987 // directive, we can skip several lines. If any meaningful TD token
988 // follows the end of the C-style comment on the same line, it will
989 // be considered as an invalid usage of TD token.
990 // For example, we want to forbid usages like this one:
991 // #define MACRO class Class {}
992 // But with C-style comments we also disallow the following:
993 // #define MACRO /* This macro is used
994 // to ... */ class Class {}
995 // One can argue that this should be allowed, but it does not seem
996 // to be worth of the complication. Moreover, this matches
997 // the C preprocessor behavior.
999 // Set TokStart to the beginning of the comment to enable proper
1000 // diagnostic printer in case of error in SkipCComment().
1007 PrintError(CurPtr
, "Unexpected character");
1011 // We must not increment CurPtr after the comment was lexed.
1016 // Do not allow any non-whitespaces after the directive.
1027 void TGLexer::prepSkipToLineEnd() {
1028 while (*CurPtr
!= '\n' && *CurPtr
!= '\r' && CurPtr
!= CurBuf
.end())
1032 bool TGLexer::prepIsProcessingEnabled() {
1033 for (const PreprocessorControlDesc
&I
:
1034 llvm::reverse(*PrepIncludeStack
.back()))
1041 void TGLexer::prepReportPreprocessorStackError() {
1042 if (PrepIncludeStack
.back()->empty())
1043 PrintFatalError("prepReportPreprocessorStackError() called with "
1044 "empty control stack");
1046 auto &PrepControl
= PrepIncludeStack
.back()->back();
1047 PrintError(CurBuf
.end(), "Reached EOF without matching #endif");
1048 PrintError(PrepControl
.SrcPos
, "The latest preprocessor control is here");