1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implement the Lexer for TableGen.
11 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/StringExtras.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/Twine.h"
18 #include "llvm/Config/config.h" // for strtoull()/strtoll() define
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/MemoryBuffer.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/TableGen/Error.h"
33 // A list of supported preprocessing directives with their
34 // internal token kinds and names.
35 struct PreprocessorDir
{
39 } // end anonymous namespace
41 /// Returns true if `C` is a valid character in an identifier. If `First` is
42 /// true, returns true if `C` is a valid first character of an identifier,
43 /// else returns true if `C` is a valid non-first character of an identifier.
44 /// Identifiers match the following regular expression:
45 /// [a-zA-Z_][0-9a-zA-Z_]*
46 static bool isValidIDChar(char C
, bool First
) {
47 if (C
== '_' || isAlpha(C
))
49 return !First
&& isDigit(C
);
52 constexpr PreprocessorDir PreprocessorDirs
[] = {{tgtok::Ifdef
, "ifdef"},
53 {tgtok::Ifndef
, "ifndef"},
54 {tgtok::Else
, "else"},
55 {tgtok::Endif
, "endif"},
56 {tgtok::Define
, "define"}};
58 // Returns a pointer past the end of a valid macro name at the start of `Str`.
59 // Valid macro names match the regular expression [a-zA-Z_][0-9a-zA-Z_]*.
60 static const char *lexMacroName(StringRef Str
) {
63 // Macro names start with [a-zA-Z_].
64 const char *Next
= Str
.begin();
65 if (!isValidIDChar(*Next
, /*First=*/true))
67 // Eat the first character of the name.
70 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
71 const char *End
= Str
.end();
72 while (Next
!= End
&& isValidIDChar(*Next
, /*First=*/false))
77 TGLexer::TGLexer(SourceMgr
&SM
, ArrayRef
<std::string
> Macros
) : SrcMgr(SM
) {
78 CurBuffer
= SrcMgr
.getMainFileID();
79 CurBuf
= SrcMgr
.getMemoryBuffer(CurBuffer
)->getBuffer();
80 CurPtr
= CurBuf
.begin();
83 // Pretend that we enter the "top-level" include file.
84 PrepIncludeStack
.push_back(
85 std::make_unique
<std::vector
<PreprocessorControlDesc
>>());
87 // Add all macros defined on the command line to the DefinedMacros set.
88 // Check invalid macro names and print fatal error if we find one.
89 for (StringRef MacroName
: Macros
) {
90 const char *End
= lexMacroName(MacroName
);
91 if (End
!= MacroName
.end())
92 PrintFatalError("invalid macro name `" + MacroName
+
93 "` specified on command line");
95 DefinedMacros
.insert(MacroName
);
99 SMLoc
TGLexer::getLoc() const {
100 return SMLoc::getFromPointer(TokStart
);
103 SMRange
TGLexer::getLocRange() const {
104 return {getLoc(), SMLoc::getFromPointer(CurPtr
)};
107 /// ReturnError - Set the error to the specified string at the specified
108 /// location. This is defined to always return tgtok::Error.
109 tgtok::TokKind
TGLexer::ReturnError(SMLoc Loc
, const Twine
&Msg
) {
110 PrintError(Loc
, Msg
);
114 tgtok::TokKind
TGLexer::ReturnError(const char *Loc
, const Twine
&Msg
) {
115 return ReturnError(SMLoc::getFromPointer(Loc
), Msg
);
118 bool TGLexer::processEOF() {
119 SMLoc ParentIncludeLoc
= SrcMgr
.getParentIncludeLoc(CurBuffer
);
120 if (ParentIncludeLoc
!= SMLoc()) {
121 // If prepExitInclude() detects a problem with the preprocessing
122 // control stack, it will return false. Pretend that we reached
123 // the final EOF and stop lexing more tokens by returning false
125 if (!prepExitInclude(false))
128 CurBuffer
= SrcMgr
.FindBufferContainingLoc(ParentIncludeLoc
);
129 CurBuf
= SrcMgr
.getMemoryBuffer(CurBuffer
)->getBuffer();
130 CurPtr
= ParentIncludeLoc
.getPointer();
131 // Make sure TokStart points into the parent file's buffer.
132 // LexToken() assigns to it before calling getNextChar(),
133 // so it is pointing into the included file now.
138 // Pretend that we exit the "top-level" include file.
139 // Note that in case of an error (e.g. control stack imbalance)
140 // the routine will issue a fatal error.
141 prepExitInclude(true);
145 int TGLexer::getNextChar() {
146 char CurChar
= *CurPtr
++;
149 return (unsigned char)CurChar
;
152 // A NUL character in the stream is either the end of the current buffer or
153 // a spurious NUL in the file. Disambiguate that here.
154 if (CurPtr
- 1 == CurBuf
.end()) {
155 --CurPtr
; // Arrange for another call to return EOF again.
159 "NUL character is invalid in source; treated as space");
165 // Handle the newline character by ignoring it and incrementing the line
166 // count. However, be careful about 'dos style' files with \n\r in them.
167 // Only treat a \n\r or \r\n as a single line.
168 if ((*CurPtr
== '\n' || (*CurPtr
== '\r')) &&
170 ++CurPtr
; // Eat the two char newline sequence.
175 int TGLexer::peekNextChar(int Index
) const {
176 return *(CurPtr
+ Index
);
179 tgtok::TokKind
TGLexer::LexToken(bool FileOrLineStart
) {
181 // This always consumes at least one character.
182 int CurChar
= getNextChar();
186 // Handle letters: [a-zA-Z_]
187 if (isValidIDChar(CurChar
, /*First=*/true))
188 return LexIdentifier();
190 // Unknown character, emit an error.
191 return ReturnError(TokStart
, "unexpected character");
193 // Lex next token, if we just left an include file.
194 // Note that leaving an include file means that the next
195 // symbol is located at the end of the 'include "..."'
196 // construct, so LexToken() is called with default
201 // Return EOF denoting the end of lexing.
204 case ':': return tgtok::colon
;
205 case ';': return tgtok::semi
;
206 case ',': return tgtok::comma
;
207 case '<': return tgtok::less
;
208 case '>': return tgtok::greater
;
209 case ']': return tgtok::r_square
;
210 case '{': return tgtok::l_brace
;
211 case '}': return tgtok::r_brace
;
212 case '(': return tgtok::l_paren
;
213 case ')': return tgtok::r_paren
;
214 case '=': return tgtok::equal
;
215 case '?': return tgtok::question
;
217 if (FileOrLineStart
) {
218 tgtok::TokKind Kind
= prepIsDirective();
219 if (Kind
!= tgtok::Error
)
220 return lexPreprocessor(Kind
);
225 // The period is a separate case so we can recognize the "..."
228 if (peekNextChar(0) == '.') {
229 ++CurPtr
; // Eat second dot.
230 if (peekNextChar(0) == '.') {
231 ++CurPtr
; // Eat third dot.
232 return tgtok::dotdotdot
;
234 return ReturnError(TokStart
, "invalid '..' punctuation");
239 PrintFatalError("getNextChar() must never return '\r'");
244 // Ignore whitespace.
245 return LexToken(FileOrLineStart
);
247 // Ignore whitespace, and identify the new line.
248 return LexToken(true);
250 // If this is the start of a // comment, skip until the end of the line or
251 // the end of the buffer.
254 else if (*CurPtr
== '*') {
257 } else // Otherwise, this is an error.
258 return ReturnError(TokStart
, "unexpected character");
259 return LexToken(FileOrLineStart
);
261 case '0': case '1': case '2': case '3': case '4': case '5': case '6':
262 case '7': case '8': case '9': {
264 if (isDigit(CurChar
)) {
265 // Allow identifiers to start with a number if it is followed by
266 // an identifier. This can happen with paste operations like
270 NextChar
= peekNextChar(i
++);
271 } while (isDigit(NextChar
));
273 if (NextChar
== 'x' || NextChar
== 'b') {
274 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most
276 int NextNextChar
= peekNextChar(i
);
277 switch (NextNextChar
) {
284 case '2': case '3': case '4': case '5':
285 case '6': case '7': case '8': case '9':
286 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
287 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
295 if (isValidIDChar(NextChar
, /*First=*/true))
296 return LexIdentifier();
300 case '"': return LexString();
301 case '$': return LexVarName();
302 case '[': return LexBracket();
303 case '!': return LexExclaim();
307 /// LexString - Lex "[^"]*"
308 tgtok::TokKind
TGLexer::LexString() {
309 const char *StrStart
= CurPtr
;
313 while (*CurPtr
!= '"') {
314 // If we hit the end of the buffer, report an error.
315 if (*CurPtr
== 0 && CurPtr
== CurBuf
.end())
316 return ReturnError(StrStart
, "end of file in string literal");
318 if (*CurPtr
== '\n' || *CurPtr
== '\r')
319 return ReturnError(StrStart
, "end of line in string literal");
321 if (*CurPtr
!= '\\') {
322 CurStrVal
+= *CurPtr
++;
329 case '\\': case '\'': case '"':
330 // These turn into their literal character.
331 CurStrVal
+= *CurPtr
++;
344 return ReturnError(CurPtr
, "escaped newlines not supported in tblgen");
346 // If we hit the end of the buffer, report an error.
348 if (CurPtr
== CurBuf
.end())
349 return ReturnError(StrStart
, "end of file in string literal");
352 return ReturnError(CurPtr
, "invalid escape in string literal");
357 return tgtok::StrVal
;
360 tgtok::TokKind
TGLexer::LexVarName() {
361 if (!isValidIDChar(CurPtr
[0], /*First=*/true))
362 return ReturnError(TokStart
, "invalid variable name");
364 // Otherwise, we're ok, consume the rest of the characters.
365 const char *VarNameStart
= CurPtr
++;
367 while (isValidIDChar(*CurPtr
, /*First=*/false))
370 CurStrVal
.assign(VarNameStart
, CurPtr
);
371 return tgtok::VarName
;
374 tgtok::TokKind
TGLexer::LexIdentifier() {
375 // The first letter is [a-zA-Z_].
376 const char *IdentStart
= TokStart
;
378 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
379 while (isValidIDChar(*CurPtr
, /*First=*/false))
382 // Check to see if this identifier is a reserved keyword.
383 StringRef
Str(IdentStart
, CurPtr
-IdentStart
);
385 tgtok::TokKind Kind
= StringSwitch
<tgtok::TokKind
>(Str
)
386 .Case("int", tgtok::Int
)
387 .Case("bit", tgtok::Bit
)
388 .Case("bits", tgtok::Bits
)
389 .Case("string", tgtok::String
)
390 .Case("list", tgtok::List
)
391 .Case("code", tgtok::Code
)
392 .Case("dag", tgtok::Dag
)
393 .Case("class", tgtok::Class
)
394 .Case("def", tgtok::Def
)
395 .Case("true", tgtok::TrueVal
)
396 .Case("false", tgtok::FalseVal
)
397 .Case("foreach", tgtok::Foreach
)
398 .Case("defm", tgtok::Defm
)
399 .Case("defset", tgtok::Defset
)
400 .Case("deftype", tgtok::Deftype
)
401 .Case("multiclass", tgtok::MultiClass
)
402 .Case("field", tgtok::Field
)
403 .Case("let", tgtok::Let
)
404 .Case("in", tgtok::In
)
405 .Case("defvar", tgtok::Defvar
)
406 .Case("include", tgtok::Include
)
407 .Case("if", tgtok::If
)
408 .Case("then", tgtok::Then
)
409 .Case("else", tgtok::ElseKW
)
410 .Case("assert", tgtok::Assert
)
411 .Case("dump", tgtok::Dump
)
414 // A couple of tokens require special processing.
417 if (LexInclude()) return tgtok::Error
;
420 CurStrVal
.assign(Str
.begin(), Str
.end());
429 /// LexInclude - We just read the "include" token. Get the string token that
430 /// comes next and enter the include.
431 bool TGLexer::LexInclude() {
432 // The token after the include must be a string.
433 tgtok::TokKind Tok
= LexToken();
434 if (Tok
== tgtok::Error
) return true;
435 if (Tok
!= tgtok::StrVal
) {
436 PrintError(getLoc(), "expected filename after include");
441 std::string Filename
= CurStrVal
;
442 std::string IncludedFile
;
444 CurBuffer
= SrcMgr
.AddIncludeFile(Filename
, SMLoc::getFromPointer(CurPtr
),
447 PrintError(getLoc(), "could not find include file '" + Filename
+ "'");
451 Dependencies
.insert(IncludedFile
);
452 // Save the line number and lex buffer of the includer.
453 CurBuf
= SrcMgr
.getMemoryBuffer(CurBuffer
)->getBuffer();
454 CurPtr
= CurBuf
.begin();
456 PrepIncludeStack
.push_back(
457 std::make_unique
<std::vector
<PreprocessorControlDesc
>>());
461 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF.
462 /// Or we may end up at the end of the buffer.
463 void TGLexer::SkipBCPLComment() {
464 ++CurPtr
; // skip the second slash.
465 auto EOLPos
= CurBuf
.find_first_of("\r\n", CurPtr
- CurBuf
.data());
466 CurPtr
= (EOLPos
== StringRef::npos
) ? CurBuf
.end() : CurBuf
.data() + EOLPos
;
469 /// SkipCComment - This skips C-style /**/ comments. The only difference from C
470 /// is that we allow nesting.
471 bool TGLexer::SkipCComment() {
472 ++CurPtr
; // skip the star.
473 unsigned CommentDepth
= 1;
476 int CurChar
= getNextChar();
479 PrintError(TokStart
, "unterminated comment");
482 // End of the comment?
483 if (CurPtr
[0] != '/') break;
485 ++CurPtr
; // End the */.
486 if (--CommentDepth
== 0)
490 // Start of a nested comment?
491 if (CurPtr
[0] != '*') break;
503 tgtok::TokKind
TGLexer::LexNumber() {
505 const char *NumStart
;
507 // Check if it's a hex or a binary value.
508 if (CurPtr
[-1] == '0') {
509 NumStart
= CurPtr
+ 1;
510 if (CurPtr
[0] == 'x') {
514 while (isHexDigit(CurPtr
[0]));
515 } else if (CurPtr
[0] == 'b') {
519 while (CurPtr
[0] == '0' || CurPtr
[0] == '1');
523 // For a hex or binary value, we always convert it to an unsigned value.
524 bool IsMinus
= false;
526 // Check if it's a decimal value.
528 // Check for a sign without a digit.
529 if (!isDigit(CurPtr
[0])) {
530 if (CurPtr
[-1] == '-')
532 else if (CurPtr
[-1] == '+')
538 IsMinus
= CurPtr
[-1] == '-';
540 while (isDigit(CurPtr
[0]))
544 // Requires at least one digit.
545 if (CurPtr
== NumStart
)
546 return ReturnError(TokStart
, "invalid number");
550 CurIntVal
= strtoll(NumStart
, nullptr, Base
);
552 CurIntVal
= strtoull(NumStart
, nullptr, Base
);
555 return ReturnError(TokStart
, "invalid number");
557 return ReturnError(TokStart
, "number out of range");
559 return Base
== 2 ? tgtok::BinaryIntVal
: tgtok::IntVal
;
562 /// LexBracket - We just read '['. If this is a code block, return it,
563 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
564 tgtok::TokKind
TGLexer::LexBracket() {
565 if (CurPtr
[0] != '{')
566 return tgtok::l_square
;
568 const char *CodeStart
= CurPtr
;
570 int Char
= getNextChar();
571 if (Char
== EOF
) break;
573 if (Char
!= '}') continue;
575 Char
= getNextChar();
576 if (Char
== EOF
) break;
578 CurStrVal
.assign(CodeStart
, CurPtr
-2);
579 return tgtok::CodeFragment
;
583 return ReturnError(CodeStart
- 2, "unterminated code block");
586 /// LexExclaim - Lex '!' and '![a-zA-Z]+'.
587 tgtok::TokKind
TGLexer::LexExclaim() {
588 if (!isAlpha(*CurPtr
))
589 return ReturnError(CurPtr
- 1, "invalid \"!operator\"");
591 const char *Start
= CurPtr
++;
592 while (isAlpha(*CurPtr
))
595 // Check to see which operator this is.
596 tgtok::TokKind Kind
=
597 StringSwitch
<tgtok::TokKind
>(StringRef(Start
, CurPtr
- Start
))
598 .Case("eq", tgtok::XEq
)
599 .Case("ne", tgtok::XNe
)
600 .Case("le", tgtok::XLe
)
601 .Case("lt", tgtok::XLt
)
602 .Case("ge", tgtok::XGe
)
603 .Case("gt", tgtok::XGt
)
604 .Case("if", tgtok::XIf
)
605 .Case("cond", tgtok::XCond
)
606 .Case("isa", tgtok::XIsA
)
607 .Case("head", tgtok::XHead
)
608 .Case("tail", tgtok::XTail
)
609 .Case("size", tgtok::XSize
)
610 .Case("con", tgtok::XConcat
)
611 .Case("dag", tgtok::XDag
)
612 .Case("add", tgtok::XADD
)
613 .Case("sub", tgtok::XSUB
)
614 .Case("mul", tgtok::XMUL
)
615 .Case("div", tgtok::XDIV
)
616 .Case("not", tgtok::XNOT
)
617 .Case("logtwo", tgtok::XLOG2
)
618 .Case("and", tgtok::XAND
)
619 .Case("or", tgtok::XOR
)
620 .Case("xor", tgtok::XXOR
)
621 .Case("shl", tgtok::XSHL
)
622 .Case("sra", tgtok::XSRA
)
623 .Case("srl", tgtok::XSRL
)
624 .Case("cast", tgtok::XCast
)
625 .Case("empty", tgtok::XEmpty
)
626 .Case("subst", tgtok::XSubst
)
627 .Case("foldl", tgtok::XFoldl
)
628 .Case("foreach", tgtok::XForEach
)
629 .Case("filter", tgtok::XFilter
)
630 .Case("listconcat", tgtok::XListConcat
)
631 .Case("listflatten", tgtok::XListFlatten
)
632 .Case("listsplat", tgtok::XListSplat
)
633 .Case("listremove", tgtok::XListRemove
)
634 .Case("range", tgtok::XRange
)
635 .Case("strconcat", tgtok::XStrConcat
)
636 .Case("interleave", tgtok::XInterleave
)
637 .Case("substr", tgtok::XSubstr
)
638 .Case("find", tgtok::XFind
)
639 .Cases("setdagop", "setop", tgtok::XSetDagOp
) // !setop is deprecated.
640 .Cases("getdagop", "getop", tgtok::XGetDagOp
) // !getop is deprecated.
641 .Case("getdagarg", tgtok::XGetDagArg
)
642 .Case("getdagname", tgtok::XGetDagName
)
643 .Case("setdagarg", tgtok::XSetDagArg
)
644 .Case("setdagname", tgtok::XSetDagName
)
645 .Case("exists", tgtok::XExists
)
646 .Case("tolower", tgtok::XToLower
)
647 .Case("toupper", tgtok::XToUpper
)
648 .Case("repr", tgtok::XRepr
)
649 .Default(tgtok::Error
);
651 return Kind
!= tgtok::Error
? Kind
652 : ReturnError(Start
- 1, "unknown operator");
655 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty
) {
656 // Report an error, if preprocessor control stack for the current
657 // file is not empty.
658 if (!PrepIncludeStack
.back()->empty()) {
659 prepReportPreprocessorStackError();
664 // Pop the preprocessing controls from the include stack.
665 if (PrepIncludeStack
.empty()) {
666 PrintFatalError("preprocessor include stack is empty");
669 PrepIncludeStack
.pop_back();
671 if (IncludeStackMustBeEmpty
) {
672 if (!PrepIncludeStack
.empty())
673 PrintFatalError("preprocessor include stack is not empty");
675 if (PrepIncludeStack
.empty())
676 PrintFatalError("preprocessor include stack is empty");
682 tgtok::TokKind
TGLexer::prepIsDirective() const {
683 for (const auto [Kind
, Word
] : PreprocessorDirs
) {
684 if (StringRef(CurPtr
, Word
.size()) != Word
)
686 int NextChar
= peekNextChar(Word
.size());
688 // Check for whitespace after the directive. If there is no whitespace,
689 // then we do not recognize it as a preprocessing directive.
691 // New line and EOF may follow only #else/#endif. It will be reported
692 // as an error for #ifdef/#define after the call to prepLexMacroName().
693 if (NextChar
== ' ' || NextChar
== '\t' || NextChar
== EOF
||
695 // It looks like TableGen does not support '\r' as the actual
696 // carriage return, e.g. getNextChar() treats a single '\r'
697 // as '\n'. So we do the same here.
701 // Allow comments after some directives, e.g.:
702 // #else// OR #else/**/
703 // #endif// OR #endif/**/
705 // Note that we do allow comments after #ifdef/#define here, e.g.
706 // #ifdef/**/ AND #ifdef//
707 // #define/**/ AND #define//
709 // These cases will be reported as incorrect after calling
710 // prepLexMacroName(). We could have supported C-style comments
711 // after #ifdef/#define, but this would complicate the code
712 // for little benefit.
713 if (NextChar
== '/') {
714 NextChar
= peekNextChar(Word
.size() + 1);
716 if (NextChar
== '*' || NextChar
== '/')
719 // Pretend that we do not recognize the directive.
726 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind
) {
729 for (const auto [PKind
, PWord
] : PreprocessorDirs
)
731 // Advance CurPtr to the end of the preprocessing word.
732 CurPtr
+= PWord
.size();
736 PrintFatalError("unsupported preprocessing token in "
737 "prepEatPreprocessorDirective()");
741 tgtok::TokKind
TGLexer::lexPreprocessor(tgtok::TokKind Kind
,
742 bool ReturnNextLiveToken
) {
743 // We must be looking at a preprocessing directive. Eat it!
744 if (!prepEatPreprocessorDirective(Kind
))
745 PrintFatalError("lexPreprocessor() called for unknown "
746 "preprocessor directive");
748 if (Kind
== tgtok::Ifdef
|| Kind
== tgtok::Ifndef
) {
749 StringRef MacroName
= prepLexMacroName();
750 StringRef IfTokName
= Kind
== tgtok::Ifdef
? "#ifdef" : "#ifndef";
751 if (MacroName
.empty())
752 return ReturnError(TokStart
, "expected macro name after " + IfTokName
);
754 bool MacroIsDefined
= DefinedMacros
.count(MacroName
) != 0;
756 // Canonicalize ifndef's MacroIsDefined to its ifdef equivalent.
757 if (Kind
== tgtok::Ifndef
)
758 MacroIsDefined
= !MacroIsDefined
;
760 // Regardless of whether we are processing tokens or not,
761 // we put the #ifdef control on stack.
762 // Note that MacroIsDefined has been canonicalized against ifdef.
763 PrepIncludeStack
.back()->push_back(
764 {tgtok::Ifdef
, MacroIsDefined
, SMLoc::getFromPointer(TokStart
)});
766 if (!prepSkipDirectiveEnd())
767 return ReturnError(CurPtr
, "only comments are supported after " +
768 IfTokName
+ " NAME");
770 // If we were not processing tokens before this #ifdef,
771 // then just return back to the lines skipping code.
772 if (!ReturnNextLiveToken
)
775 // If we were processing tokens before this #ifdef,
776 // and the macro is defined, then just return the next token.
780 // We were processing tokens before this #ifdef, and the macro
781 // is not defined, so we have to start skipping the lines.
782 // If the skipping is successful, it will return the token following
783 // either #else or #endif corresponding to this #ifdef.
784 if (prepSkipRegion(ReturnNextLiveToken
))
788 } else if (Kind
== tgtok::Else
) {
789 // Check if this #else is correct before calling prepSkipDirectiveEnd(),
790 // which will move CurPtr away from the beginning of #else.
791 if (PrepIncludeStack
.back()->empty())
792 return ReturnError(TokStart
, "#else without #ifdef or #ifndef");
794 PreprocessorControlDesc IfdefEntry
= PrepIncludeStack
.back()->back();
796 if (IfdefEntry
.Kind
!= tgtok::Ifdef
) {
797 PrintError(TokStart
, "double #else");
798 return ReturnError(IfdefEntry
.SrcPos
, "previous #else is here");
801 // Replace the corresponding #ifdef's control with its negation
802 // on the control stack.
803 PrepIncludeStack
.back()->pop_back();
804 PrepIncludeStack
.back()->push_back(
805 {Kind
, !IfdefEntry
.IsDefined
, SMLoc::getFromPointer(TokStart
)});
807 if (!prepSkipDirectiveEnd())
808 return ReturnError(CurPtr
, "only comments are supported after #else");
810 // If we were processing tokens before this #else,
811 // we have to start skipping lines until the matching #endif.
812 if (ReturnNextLiveToken
) {
813 if (prepSkipRegion(ReturnNextLiveToken
))
819 // Return to the lines skipping code.
821 } else if (Kind
== tgtok::Endif
) {
822 // Check if this #endif is correct before calling prepSkipDirectiveEnd(),
823 // which will move CurPtr away from the beginning of #endif.
824 if (PrepIncludeStack
.back()->empty())
825 return ReturnError(TokStart
, "#endif without #ifdef");
827 auto &IfdefOrElseEntry
= PrepIncludeStack
.back()->back();
829 if (IfdefOrElseEntry
.Kind
!= tgtok::Ifdef
&&
830 IfdefOrElseEntry
.Kind
!= tgtok::Else
) {
831 PrintFatalError("invalid preprocessor control on the stack");
835 if (!prepSkipDirectiveEnd())
836 return ReturnError(CurPtr
, "only comments are supported after #endif");
838 PrepIncludeStack
.back()->pop_back();
840 // If we were processing tokens before this #endif, then
841 // we should continue it.
842 if (ReturnNextLiveToken
) {
846 // Return to the lines skipping code.
848 } else if (Kind
== tgtok::Define
) {
849 StringRef MacroName
= prepLexMacroName();
850 if (MacroName
.empty())
851 return ReturnError(TokStart
, "expected macro name after #define");
853 if (!DefinedMacros
.insert(MacroName
).second
)
854 PrintWarning(getLoc(),
855 "duplicate definition of macro: " + Twine(MacroName
));
857 if (!prepSkipDirectiveEnd())
858 return ReturnError(CurPtr
,
859 "only comments are supported after #define NAME");
861 if (!ReturnNextLiveToken
) {
862 PrintFatalError("#define must be ignored during the lines skipping");
869 PrintFatalError("preprocessing directive is not supported");
873 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse
) {
874 if (!MustNeverBeFalse
)
875 PrintFatalError("invalid recursion.");
878 // Skip all symbols to the line end.
879 while (*CurPtr
!= '\n')
882 // Find the first non-whitespace symbol in the next line(s).
883 if (!prepSkipLineBegin())
886 // If the first non-blank/comment symbol on the line is '#',
887 // it may be a start of preprocessing directive.
889 // If it is not '#' just go to the next line.
895 tgtok::TokKind Kind
= prepIsDirective();
897 // If we did not find a preprocessing directive or it is #define,
898 // then just skip to the next line. We do not have to do anything
899 // for #define in the line-skipping mode.
900 if (Kind
== tgtok::Error
|| Kind
== tgtok::Define
)
903 tgtok::TokKind ProcessedKind
= lexPreprocessor(Kind
, false);
905 // If lexPreprocessor() encountered an error during lexing this
906 // preprocessor idiom, then return false to the calling lexPreprocessor().
907 // This will force tgtok::Error to be returned to the tokens processing.
908 if (ProcessedKind
== tgtok::Error
)
911 if (Kind
!= ProcessedKind
)
912 PrintFatalError("prepIsDirective() and lexPreprocessor() "
913 "returned different token kinds");
915 // If this preprocessing directive enables tokens processing,
916 // then return to the lexPreprocessor() and get to the next token.
917 // We can move from line-skipping mode to processing tokens only
918 // due to #else or #endif.
919 if (prepIsProcessingEnabled()) {
920 if (Kind
!= tgtok::Else
&& Kind
!= tgtok::Endif
) {
921 PrintFatalError("tokens processing was enabled by an unexpected "
922 "preprocessing directive");
928 } while (CurPtr
!= CurBuf
.end());
930 // We have reached the end of the file, but never left the lines-skipping
931 // mode. This means there is no matching #endif.
932 prepReportPreprocessorStackError();
936 StringRef
TGLexer::prepLexMacroName() {
937 // Skip whitespaces between the preprocessing directive and the macro name.
938 while (*CurPtr
== ' ' || *CurPtr
== '\t')
942 CurPtr
= lexMacroName(StringRef(CurPtr
, CurBuf
.end() - CurPtr
));
943 return StringRef(TokStart
, CurPtr
- TokStart
);
946 bool TGLexer::prepSkipLineBegin() {
947 while (CurPtr
!= CurBuf
.end()) {
956 int NextChar
= peekNextChar(1);
957 if (NextChar
== '*') {
958 // Skip C-style comment.
959 // Note that we do not care about skipping the C++-style comments.
960 // If the line contains "//", it may not contain any processable
961 // preprocessing directive. Just return CurPtr pointing to
962 // the first '/' in this case. We also do not care about
963 // incorrect symbols after the first '/' - we are in lines-skipping
964 // mode, so incorrect code is allowed to some extent.
966 // Set TokStart to the beginning of the comment to enable proper
967 // diagnostic printing in case of error in SkipCComment().
970 // CurPtr must point to '*' before call to SkipCComment().
975 // CurPtr points to the non-whitespace '/'.
979 // We must not increment CurPtr after the comment was lexed.
990 // We have reached the end of the file. Return to the lines skipping
991 // code, and allow it to handle the EOF as needed.
995 bool TGLexer::prepSkipDirectiveEnd() {
996 while (CurPtr
!= CurBuf
.end()) {
1007 int NextChar
= peekNextChar(1);
1008 if (NextChar
== '/') {
1009 // Skip C++-style comment.
1010 // We may just return true now, but let's skip to the line/buffer end
1011 // to simplify the method specification.
1014 } else if (NextChar
== '*') {
1015 // When we are skipping C-style comment at the end of a preprocessing
1016 // directive, we can skip several lines. If any meaningful TD token
1017 // follows the end of the C-style comment on the same line, it will
1018 // be considered as an invalid usage of TD token.
1019 // For example, we want to forbid usages like this one:
1020 // #define MACRO class Class {}
1021 // But with C-style comments we also disallow the following:
1022 // #define MACRO /* This macro is used
1023 // to ... */ class Class {}
1024 // One can argue that this should be allowed, but it does not seem
1025 // to be worth of the complication. Moreover, this matches
1026 // the C preprocessor behavior.
1028 // Set TokStart to the beginning of the comment to enable proper
1029 // diagnostic printer in case of error in SkipCComment().
1036 PrintError(CurPtr
, "unexpected character");
1040 // We must not increment CurPtr after the comment was lexed.
1045 // Do not allow any non-whitespaces after the directive.
1056 bool TGLexer::prepIsProcessingEnabled() {
1057 for (const PreprocessorControlDesc
&I
:
1058 llvm::reverse(*PrepIncludeStack
.back()))
1065 void TGLexer::prepReportPreprocessorStackError() {
1066 if (PrepIncludeStack
.back()->empty())
1067 PrintFatalError("prepReportPreprocessorStackError() called with "
1068 "empty control stack");
1070 auto &PrepControl
= PrepIncludeStack
.back()->back();
1071 PrintError(CurBuf
.end(), "reached EOF without matching #endif");
1072 PrintError(PrepControl
.SrcPos
, "the latest preprocessor control is here");