[ControlHeightReduction] Add assert to avoid underflow (#116339)
[llvm-project.git] / llvm / lib / TableGen / TGLexer.cpp
blob1e93b2c160ba58e6bc5ccf603e57b3cf50dd5280
1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implement the Lexer for TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "TGLexer.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/StringExtras.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/Twine.h"
18 #include "llvm/Config/config.h" // for strtoull()/strtoll() define
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/MemoryBuffer.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/TableGen/Error.h"
23 #include <algorithm>
24 #include <cerrno>
25 #include <cstdint>
26 #include <cstdio>
27 #include <cstdlib>
28 #include <cstring>
30 using namespace llvm;
32 namespace {
33 // A list of supported preprocessing directives with their
34 // internal token kinds and names.
35 struct PreprocessorDir {
36 tgtok::TokKind Kind;
37 StringRef Word;
39 } // end anonymous namespace
41 /// Returns true if `C` is a valid character in an identifier. If `First` is
42 /// true, returns true if `C` is a valid first character of an identifier,
43 /// else returns true if `C` is a valid non-first character of an identifier.
44 /// Identifiers match the following regular expression:
45 /// [a-zA-Z_][0-9a-zA-Z_]*
46 static bool isValidIDChar(char C, bool First) {
47 if (C == '_' || isAlpha(C))
48 return true;
49 return !First && isDigit(C);
52 constexpr PreprocessorDir PreprocessorDirs[] = {{tgtok::Ifdef, "ifdef"},
53 {tgtok::Ifndef, "ifndef"},
54 {tgtok::Else, "else"},
55 {tgtok::Endif, "endif"},
56 {tgtok::Define, "define"}};
58 // Returns a pointer past the end of a valid macro name at the start of `Str`.
59 // Valid macro names match the regular expression [a-zA-Z_][0-9a-zA-Z_]*.
60 static const char *lexMacroName(StringRef Str) {
61 assert(!Str.empty());
63 // Macro names start with [a-zA-Z_].
64 const char *Next = Str.begin();
65 if (!isValidIDChar(*Next, /*First=*/true))
66 return Next;
67 // Eat the first character of the name.
68 ++Next;
70 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
71 const char *End = Str.end();
72 while (Next != End && isValidIDChar(*Next, /*First=*/false))
73 ++Next;
74 return Next;
77 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) {
78 CurBuffer = SrcMgr.getMainFileID();
79 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
80 CurPtr = CurBuf.begin();
81 TokStart = nullptr;
83 // Pretend that we enter the "top-level" include file.
84 PrepIncludeStack.push_back(
85 std::make_unique<std::vector<PreprocessorControlDesc>>());
87 // Add all macros defined on the command line to the DefinedMacros set.
88 // Check invalid macro names and print fatal error if we find one.
89 for (StringRef MacroName : Macros) {
90 const char *End = lexMacroName(MacroName);
91 if (End != MacroName.end())
92 PrintFatalError("invalid macro name `" + MacroName +
93 "` specified on command line");
95 DefinedMacros.insert(MacroName);
99 SMLoc TGLexer::getLoc() const {
100 return SMLoc::getFromPointer(TokStart);
103 SMRange TGLexer::getLocRange() const {
104 return {getLoc(), SMLoc::getFromPointer(CurPtr)};
107 /// ReturnError - Set the error to the specified string at the specified
108 /// location. This is defined to always return tgtok::Error.
109 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) {
110 PrintError(Loc, Msg);
111 return tgtok::Error;
114 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) {
115 return ReturnError(SMLoc::getFromPointer(Loc), Msg);
118 bool TGLexer::processEOF() {
119 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer);
120 if (ParentIncludeLoc != SMLoc()) {
121 // If prepExitInclude() detects a problem with the preprocessing
122 // control stack, it will return false. Pretend that we reached
123 // the final EOF and stop lexing more tokens by returning false
124 // to LexToken().
125 if (!prepExitInclude(false))
126 return false;
128 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc);
129 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
130 CurPtr = ParentIncludeLoc.getPointer();
131 // Make sure TokStart points into the parent file's buffer.
132 // LexToken() assigns to it before calling getNextChar(),
133 // so it is pointing into the included file now.
134 TokStart = CurPtr;
135 return true;
138 // Pretend that we exit the "top-level" include file.
139 // Note that in case of an error (e.g. control stack imbalance)
140 // the routine will issue a fatal error.
141 prepExitInclude(true);
142 return false;
145 int TGLexer::getNextChar() {
146 char CurChar = *CurPtr++;
147 switch (CurChar) {
148 default:
149 return (unsigned char)CurChar;
151 case 0: {
152 // A NUL character in the stream is either the end of the current buffer or
153 // a spurious NUL in the file. Disambiguate that here.
154 if (CurPtr - 1 == CurBuf.end()) {
155 --CurPtr; // Arrange for another call to return EOF again.
156 return EOF;
158 PrintError(getLoc(),
159 "NUL character is invalid in source; treated as space");
160 return ' ';
163 case '\n':
164 case '\r':
165 // Handle the newline character by ignoring it and incrementing the line
166 // count. However, be careful about 'dos style' files with \n\r in them.
167 // Only treat a \n\r or \r\n as a single line.
168 if ((*CurPtr == '\n' || (*CurPtr == '\r')) &&
169 *CurPtr != CurChar)
170 ++CurPtr; // Eat the two char newline sequence.
171 return '\n';
175 int TGLexer::peekNextChar(int Index) const {
176 return *(CurPtr + Index);
179 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
180 TokStart = CurPtr;
181 // This always consumes at least one character.
182 int CurChar = getNextChar();
184 switch (CurChar) {
185 default:
186 // Handle letters: [a-zA-Z_]
187 if (isValidIDChar(CurChar, /*First=*/true))
188 return LexIdentifier();
190 // Unknown character, emit an error.
191 return ReturnError(TokStart, "unexpected character");
192 case EOF:
193 // Lex next token, if we just left an include file.
194 // Note that leaving an include file means that the next
195 // symbol is located at the end of the 'include "..."'
196 // construct, so LexToken() is called with default
197 // false parameter.
198 if (processEOF())
199 return LexToken();
201 // Return EOF denoting the end of lexing.
202 return tgtok::Eof;
204 case ':': return tgtok::colon;
205 case ';': return tgtok::semi;
206 case ',': return tgtok::comma;
207 case '<': return tgtok::less;
208 case '>': return tgtok::greater;
209 case ']': return tgtok::r_square;
210 case '{': return tgtok::l_brace;
211 case '}': return tgtok::r_brace;
212 case '(': return tgtok::l_paren;
213 case ')': return tgtok::r_paren;
214 case '=': return tgtok::equal;
215 case '?': return tgtok::question;
216 case '#':
217 if (FileOrLineStart) {
218 tgtok::TokKind Kind = prepIsDirective();
219 if (Kind != tgtok::Error)
220 return lexPreprocessor(Kind);
223 return tgtok::paste;
225 // The period is a separate case so we can recognize the "..."
226 // range punctuator.
227 case '.':
228 if (peekNextChar(0) == '.') {
229 ++CurPtr; // Eat second dot.
230 if (peekNextChar(0) == '.') {
231 ++CurPtr; // Eat third dot.
232 return tgtok::dotdotdot;
234 return ReturnError(TokStart, "invalid '..' punctuation");
236 return tgtok::dot;
238 case '\r':
239 PrintFatalError("getNextChar() must never return '\r'");
240 return tgtok::Error;
242 case ' ':
243 case '\t':
244 // Ignore whitespace.
245 return LexToken(FileOrLineStart);
246 case '\n':
247 // Ignore whitespace, and identify the new line.
248 return LexToken(true);
249 case '/':
250 // If this is the start of a // comment, skip until the end of the line or
251 // the end of the buffer.
252 if (*CurPtr == '/')
253 SkipBCPLComment();
254 else if (*CurPtr == '*') {
255 if (SkipCComment())
256 return tgtok::Error;
257 } else // Otherwise, this is an error.
258 return ReturnError(TokStart, "unexpected character");
259 return LexToken(FileOrLineStart);
260 case '-': case '+':
261 case '0': case '1': case '2': case '3': case '4': case '5': case '6':
262 case '7': case '8': case '9': {
263 int NextChar = 0;
264 if (isDigit(CurChar)) {
265 // Allow identifiers to start with a number if it is followed by
266 // an identifier. This can happen with paste operations like
267 // foo#8i.
268 int i = 0;
269 do {
270 NextChar = peekNextChar(i++);
271 } while (isDigit(NextChar));
273 if (NextChar == 'x' || NextChar == 'b') {
274 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most
275 // likely a number.
276 int NextNextChar = peekNextChar(i);
277 switch (NextNextChar) {
278 default:
279 break;
280 case '0': case '1':
281 if (NextChar == 'b')
282 return LexNumber();
283 [[fallthrough]];
284 case '2': case '3': case '4': case '5':
285 case '6': case '7': case '8': case '9':
286 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
287 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
288 if (NextChar == 'x')
289 return LexNumber();
290 break;
295 if (isValidIDChar(NextChar, /*First=*/true))
296 return LexIdentifier();
298 return LexNumber();
300 case '"': return LexString();
301 case '$': return LexVarName();
302 case '[': return LexBracket();
303 case '!': return LexExclaim();
307 /// LexString - Lex "[^"]*"
308 tgtok::TokKind TGLexer::LexString() {
309 const char *StrStart = CurPtr;
311 CurStrVal = "";
313 while (*CurPtr != '"') {
314 // If we hit the end of the buffer, report an error.
315 if (*CurPtr == 0 && CurPtr == CurBuf.end())
316 return ReturnError(StrStart, "end of file in string literal");
318 if (*CurPtr == '\n' || *CurPtr == '\r')
319 return ReturnError(StrStart, "end of line in string literal");
321 if (*CurPtr != '\\') {
322 CurStrVal += *CurPtr++;
323 continue;
326 ++CurPtr;
328 switch (*CurPtr) {
329 case '\\': case '\'': case '"':
330 // These turn into their literal character.
331 CurStrVal += *CurPtr++;
332 break;
333 case 't':
334 CurStrVal += '\t';
335 ++CurPtr;
336 break;
337 case 'n':
338 CurStrVal += '\n';
339 ++CurPtr;
340 break;
342 case '\n':
343 case '\r':
344 return ReturnError(CurPtr, "escaped newlines not supported in tblgen");
346 // If we hit the end of the buffer, report an error.
347 case '\0':
348 if (CurPtr == CurBuf.end())
349 return ReturnError(StrStart, "end of file in string literal");
350 [[fallthrough]];
351 default:
352 return ReturnError(CurPtr, "invalid escape in string literal");
356 ++CurPtr;
357 return tgtok::StrVal;
360 tgtok::TokKind TGLexer::LexVarName() {
361 if (!isValidIDChar(CurPtr[0], /*First=*/true))
362 return ReturnError(TokStart, "invalid variable name");
364 // Otherwise, we're ok, consume the rest of the characters.
365 const char *VarNameStart = CurPtr++;
367 while (isValidIDChar(*CurPtr, /*First=*/false))
368 ++CurPtr;
370 CurStrVal.assign(VarNameStart, CurPtr);
371 return tgtok::VarName;
374 tgtok::TokKind TGLexer::LexIdentifier() {
375 // The first letter is [a-zA-Z_].
376 const char *IdentStart = TokStart;
378 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
379 while (isValidIDChar(*CurPtr, /*First=*/false))
380 ++CurPtr;
382 // Check to see if this identifier is a reserved keyword.
383 StringRef Str(IdentStart, CurPtr-IdentStart);
385 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str)
386 .Case("int", tgtok::Int)
387 .Case("bit", tgtok::Bit)
388 .Case("bits", tgtok::Bits)
389 .Case("string", tgtok::String)
390 .Case("list", tgtok::List)
391 .Case("code", tgtok::Code)
392 .Case("dag", tgtok::Dag)
393 .Case("class", tgtok::Class)
394 .Case("def", tgtok::Def)
395 .Case("true", tgtok::TrueVal)
396 .Case("false", tgtok::FalseVal)
397 .Case("foreach", tgtok::Foreach)
398 .Case("defm", tgtok::Defm)
399 .Case("defset", tgtok::Defset)
400 .Case("deftype", tgtok::Deftype)
401 .Case("multiclass", tgtok::MultiClass)
402 .Case("field", tgtok::Field)
403 .Case("let", tgtok::Let)
404 .Case("in", tgtok::In)
405 .Case("defvar", tgtok::Defvar)
406 .Case("include", tgtok::Include)
407 .Case("if", tgtok::If)
408 .Case("then", tgtok::Then)
409 .Case("else", tgtok::ElseKW)
410 .Case("assert", tgtok::Assert)
411 .Case("dump", tgtok::Dump)
412 .Default(tgtok::Id);
414 // A couple of tokens require special processing.
415 switch (Kind) {
416 case tgtok::Include:
417 if (LexInclude()) return tgtok::Error;
418 return Lex();
419 case tgtok::Id:
420 CurStrVal.assign(Str.begin(), Str.end());
421 break;
422 default:
423 break;
426 return Kind;
429 /// LexInclude - We just read the "include" token. Get the string token that
430 /// comes next and enter the include.
431 bool TGLexer::LexInclude() {
432 // The token after the include must be a string.
433 tgtok::TokKind Tok = LexToken();
434 if (Tok == tgtok::Error) return true;
435 if (Tok != tgtok::StrVal) {
436 PrintError(getLoc(), "expected filename after include");
437 return true;
440 // Get the string.
441 std::string Filename = CurStrVal;
442 std::string IncludedFile;
444 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr),
445 IncludedFile);
446 if (!CurBuffer) {
447 PrintError(getLoc(), "could not find include file '" + Filename + "'");
448 return true;
451 Dependencies.insert(IncludedFile);
452 // Save the line number and lex buffer of the includer.
453 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
454 CurPtr = CurBuf.begin();
456 PrepIncludeStack.push_back(
457 std::make_unique<std::vector<PreprocessorControlDesc>>());
458 return false;
461 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF.
462 /// Or we may end up at the end of the buffer.
463 void TGLexer::SkipBCPLComment() {
464 ++CurPtr; // skip the second slash.
465 auto EOLPos = CurBuf.find_first_of("\r\n", CurPtr - CurBuf.data());
466 CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos;
469 /// SkipCComment - This skips C-style /**/ comments. The only difference from C
470 /// is that we allow nesting.
471 bool TGLexer::SkipCComment() {
472 ++CurPtr; // skip the star.
473 unsigned CommentDepth = 1;
475 while (true) {
476 int CurChar = getNextChar();
477 switch (CurChar) {
478 case EOF:
479 PrintError(TokStart, "unterminated comment");
480 return true;
481 case '*':
482 // End of the comment?
483 if (CurPtr[0] != '/') break;
485 ++CurPtr; // End the */.
486 if (--CommentDepth == 0)
487 return false;
488 break;
489 case '/':
490 // Start of a nested comment?
491 if (CurPtr[0] != '*') break;
492 ++CurPtr;
493 ++CommentDepth;
494 break;
499 /// LexNumber - Lex:
500 /// [-+]?[0-9]+
501 /// 0x[0-9a-fA-F]+
502 /// 0b[01]+
503 tgtok::TokKind TGLexer::LexNumber() {
504 unsigned Base = 0;
505 const char *NumStart;
507 // Check if it's a hex or a binary value.
508 if (CurPtr[-1] == '0') {
509 NumStart = CurPtr + 1;
510 if (CurPtr[0] == 'x') {
511 Base = 16;
513 ++CurPtr;
514 while (isHexDigit(CurPtr[0]));
515 } else if (CurPtr[0] == 'b') {
516 Base = 2;
518 ++CurPtr;
519 while (CurPtr[0] == '0' || CurPtr[0] == '1');
523 // For a hex or binary value, we always convert it to an unsigned value.
524 bool IsMinus = false;
526 // Check if it's a decimal value.
527 if (Base == 0) {
528 // Check for a sign without a digit.
529 if (!isDigit(CurPtr[0])) {
530 if (CurPtr[-1] == '-')
531 return tgtok::minus;
532 else if (CurPtr[-1] == '+')
533 return tgtok::plus;
536 Base = 10;
537 NumStart = TokStart;
538 IsMinus = CurPtr[-1] == '-';
540 while (isDigit(CurPtr[0]))
541 ++CurPtr;
544 // Requires at least one digit.
545 if (CurPtr == NumStart)
546 return ReturnError(TokStart, "invalid number");
548 errno = 0;
549 if (IsMinus)
550 CurIntVal = strtoll(NumStart, nullptr, Base);
551 else
552 CurIntVal = strtoull(NumStart, nullptr, Base);
554 if (errno == EINVAL)
555 return ReturnError(TokStart, "invalid number");
556 if (errno == ERANGE)
557 return ReturnError(TokStart, "number out of range");
559 return Base == 2 ? tgtok::BinaryIntVal : tgtok::IntVal;
562 /// LexBracket - We just read '['. If this is a code block, return it,
563 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
564 tgtok::TokKind TGLexer::LexBracket() {
565 if (CurPtr[0] != '{')
566 return tgtok::l_square;
567 ++CurPtr;
568 const char *CodeStart = CurPtr;
569 while (true) {
570 int Char = getNextChar();
571 if (Char == EOF) break;
573 if (Char != '}') continue;
575 Char = getNextChar();
576 if (Char == EOF) break;
577 if (Char == ']') {
578 CurStrVal.assign(CodeStart, CurPtr-2);
579 return tgtok::CodeFragment;
583 return ReturnError(CodeStart - 2, "unterminated code block");
586 /// LexExclaim - Lex '!' and '![a-zA-Z]+'.
587 tgtok::TokKind TGLexer::LexExclaim() {
588 if (!isAlpha(*CurPtr))
589 return ReturnError(CurPtr - 1, "invalid \"!operator\"");
591 const char *Start = CurPtr++;
592 while (isAlpha(*CurPtr))
593 ++CurPtr;
595 // Check to see which operator this is.
596 tgtok::TokKind Kind =
597 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start))
598 .Case("eq", tgtok::XEq)
599 .Case("ne", tgtok::XNe)
600 .Case("le", tgtok::XLe)
601 .Case("lt", tgtok::XLt)
602 .Case("ge", tgtok::XGe)
603 .Case("gt", tgtok::XGt)
604 .Case("if", tgtok::XIf)
605 .Case("cond", tgtok::XCond)
606 .Case("isa", tgtok::XIsA)
607 .Case("head", tgtok::XHead)
608 .Case("tail", tgtok::XTail)
609 .Case("size", tgtok::XSize)
610 .Case("con", tgtok::XConcat)
611 .Case("dag", tgtok::XDag)
612 .Case("add", tgtok::XADD)
613 .Case("sub", tgtok::XSUB)
614 .Case("mul", tgtok::XMUL)
615 .Case("div", tgtok::XDIV)
616 .Case("not", tgtok::XNOT)
617 .Case("logtwo", tgtok::XLOG2)
618 .Case("and", tgtok::XAND)
619 .Case("or", tgtok::XOR)
620 .Case("xor", tgtok::XXOR)
621 .Case("shl", tgtok::XSHL)
622 .Case("sra", tgtok::XSRA)
623 .Case("srl", tgtok::XSRL)
624 .Case("cast", tgtok::XCast)
625 .Case("empty", tgtok::XEmpty)
626 .Case("subst", tgtok::XSubst)
627 .Case("foldl", tgtok::XFoldl)
628 .Case("foreach", tgtok::XForEach)
629 .Case("filter", tgtok::XFilter)
630 .Case("listconcat", tgtok::XListConcat)
631 .Case("listflatten", tgtok::XListFlatten)
632 .Case("listsplat", tgtok::XListSplat)
633 .Case("listremove", tgtok::XListRemove)
634 .Case("range", tgtok::XRange)
635 .Case("strconcat", tgtok::XStrConcat)
636 .Case("interleave", tgtok::XInterleave)
637 .Case("substr", tgtok::XSubstr)
638 .Case("find", tgtok::XFind)
639 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated.
640 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated.
641 .Case("getdagarg", tgtok::XGetDagArg)
642 .Case("getdagname", tgtok::XGetDagName)
643 .Case("setdagarg", tgtok::XSetDagArg)
644 .Case("setdagname", tgtok::XSetDagName)
645 .Case("exists", tgtok::XExists)
646 .Case("tolower", tgtok::XToLower)
647 .Case("toupper", tgtok::XToUpper)
648 .Case("repr", tgtok::XRepr)
649 .Default(tgtok::Error);
651 return Kind != tgtok::Error ? Kind
652 : ReturnError(Start - 1, "unknown operator");
655 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) {
656 // Report an error, if preprocessor control stack for the current
657 // file is not empty.
658 if (!PrepIncludeStack.back()->empty()) {
659 prepReportPreprocessorStackError();
661 return false;
664 // Pop the preprocessing controls from the include stack.
665 if (PrepIncludeStack.empty()) {
666 PrintFatalError("preprocessor include stack is empty");
669 PrepIncludeStack.pop_back();
671 if (IncludeStackMustBeEmpty) {
672 if (!PrepIncludeStack.empty())
673 PrintFatalError("preprocessor include stack is not empty");
674 } else {
675 if (PrepIncludeStack.empty())
676 PrintFatalError("preprocessor include stack is empty");
679 return true;
682 tgtok::TokKind TGLexer::prepIsDirective() const {
683 for (const auto [Kind, Word] : PreprocessorDirs) {
684 if (StringRef(CurPtr, Word.size()) != Word)
685 continue;
686 int NextChar = peekNextChar(Word.size());
688 // Check for whitespace after the directive. If there is no whitespace,
689 // then we do not recognize it as a preprocessing directive.
691 // New line and EOF may follow only #else/#endif. It will be reported
692 // as an error for #ifdef/#define after the call to prepLexMacroName().
693 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF ||
694 NextChar == '\n' ||
695 // It looks like TableGen does not support '\r' as the actual
696 // carriage return, e.g. getNextChar() treats a single '\r'
697 // as '\n'. So we do the same here.
698 NextChar == '\r')
699 return Kind;
701 // Allow comments after some directives, e.g.:
702 // #else// OR #else/**/
703 // #endif// OR #endif/**/
705 // Note that we do allow comments after #ifdef/#define here, e.g.
706 // #ifdef/**/ AND #ifdef//
707 // #define/**/ AND #define//
709 // These cases will be reported as incorrect after calling
710 // prepLexMacroName(). We could have supported C-style comments
711 // after #ifdef/#define, but this would complicate the code
712 // for little benefit.
713 if (NextChar == '/') {
714 NextChar = peekNextChar(Word.size() + 1);
716 if (NextChar == '*' || NextChar == '/')
717 return Kind;
719 // Pretend that we do not recognize the directive.
723 return tgtok::Error;
726 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) {
727 TokStart = CurPtr;
729 for (const auto [PKind, PWord] : PreprocessorDirs)
730 if (PKind == Kind) {
731 // Advance CurPtr to the end of the preprocessing word.
732 CurPtr += PWord.size();
733 return true;
736 PrintFatalError("unsupported preprocessing token in "
737 "prepEatPreprocessorDirective()");
738 return false;
741 tgtok::TokKind TGLexer::lexPreprocessor(tgtok::TokKind Kind,
742 bool ReturnNextLiveToken) {
743 // We must be looking at a preprocessing directive. Eat it!
744 if (!prepEatPreprocessorDirective(Kind))
745 PrintFatalError("lexPreprocessor() called for unknown "
746 "preprocessor directive");
748 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) {
749 StringRef MacroName = prepLexMacroName();
750 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef";
751 if (MacroName.empty())
752 return ReturnError(TokStart, "expected macro name after " + IfTokName);
754 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0;
756 // Canonicalize ifndef's MacroIsDefined to its ifdef equivalent.
757 if (Kind == tgtok::Ifndef)
758 MacroIsDefined = !MacroIsDefined;
760 // Regardless of whether we are processing tokens or not,
761 // we put the #ifdef control on stack.
762 // Note that MacroIsDefined has been canonicalized against ifdef.
763 PrepIncludeStack.back()->push_back(
764 {tgtok::Ifdef, MacroIsDefined, SMLoc::getFromPointer(TokStart)});
766 if (!prepSkipDirectiveEnd())
767 return ReturnError(CurPtr, "only comments are supported after " +
768 IfTokName + " NAME");
770 // If we were not processing tokens before this #ifdef,
771 // then just return back to the lines skipping code.
772 if (!ReturnNextLiveToken)
773 return Kind;
775 // If we were processing tokens before this #ifdef,
776 // and the macro is defined, then just return the next token.
777 if (MacroIsDefined)
778 return LexToken();
780 // We were processing tokens before this #ifdef, and the macro
781 // is not defined, so we have to start skipping the lines.
782 // If the skipping is successful, it will return the token following
783 // either #else or #endif corresponding to this #ifdef.
784 if (prepSkipRegion(ReturnNextLiveToken))
785 return LexToken();
787 return tgtok::Error;
788 } else if (Kind == tgtok::Else) {
789 // Check if this #else is correct before calling prepSkipDirectiveEnd(),
790 // which will move CurPtr away from the beginning of #else.
791 if (PrepIncludeStack.back()->empty())
792 return ReturnError(TokStart, "#else without #ifdef or #ifndef");
794 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back();
796 if (IfdefEntry.Kind != tgtok::Ifdef) {
797 PrintError(TokStart, "double #else");
798 return ReturnError(IfdefEntry.SrcPos, "previous #else is here");
801 // Replace the corresponding #ifdef's control with its negation
802 // on the control stack.
803 PrepIncludeStack.back()->pop_back();
804 PrepIncludeStack.back()->push_back(
805 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)});
807 if (!prepSkipDirectiveEnd())
808 return ReturnError(CurPtr, "only comments are supported after #else");
810 // If we were processing tokens before this #else,
811 // we have to start skipping lines until the matching #endif.
812 if (ReturnNextLiveToken) {
813 if (prepSkipRegion(ReturnNextLiveToken))
814 return LexToken();
816 return tgtok::Error;
819 // Return to the lines skipping code.
820 return Kind;
821 } else if (Kind == tgtok::Endif) {
822 // Check if this #endif is correct before calling prepSkipDirectiveEnd(),
823 // which will move CurPtr away from the beginning of #endif.
824 if (PrepIncludeStack.back()->empty())
825 return ReturnError(TokStart, "#endif without #ifdef");
827 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back();
829 if (IfdefOrElseEntry.Kind != tgtok::Ifdef &&
830 IfdefOrElseEntry.Kind != tgtok::Else) {
831 PrintFatalError("invalid preprocessor control on the stack");
832 return tgtok::Error;
835 if (!prepSkipDirectiveEnd())
836 return ReturnError(CurPtr, "only comments are supported after #endif");
838 PrepIncludeStack.back()->pop_back();
840 // If we were processing tokens before this #endif, then
841 // we should continue it.
842 if (ReturnNextLiveToken) {
843 return LexToken();
846 // Return to the lines skipping code.
847 return Kind;
848 } else if (Kind == tgtok::Define) {
849 StringRef MacroName = prepLexMacroName();
850 if (MacroName.empty())
851 return ReturnError(TokStart, "expected macro name after #define");
853 if (!DefinedMacros.insert(MacroName).second)
854 PrintWarning(getLoc(),
855 "duplicate definition of macro: " + Twine(MacroName));
857 if (!prepSkipDirectiveEnd())
858 return ReturnError(CurPtr,
859 "only comments are supported after #define NAME");
861 if (!ReturnNextLiveToken) {
862 PrintFatalError("#define must be ignored during the lines skipping");
863 return tgtok::Error;
866 return LexToken();
869 PrintFatalError("preprocessing directive is not supported");
870 return tgtok::Error;
873 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
874 if (!MustNeverBeFalse)
875 PrintFatalError("invalid recursion.");
877 do {
878 // Skip all symbols to the line end.
879 while (*CurPtr != '\n')
880 ++CurPtr;
882 // Find the first non-whitespace symbol in the next line(s).
883 if (!prepSkipLineBegin())
884 return false;
886 // If the first non-blank/comment symbol on the line is '#',
887 // it may be a start of preprocessing directive.
889 // If it is not '#' just go to the next line.
890 if (*CurPtr == '#')
891 ++CurPtr;
892 else
893 continue;
895 tgtok::TokKind Kind = prepIsDirective();
897 // If we did not find a preprocessing directive or it is #define,
898 // then just skip to the next line. We do not have to do anything
899 // for #define in the line-skipping mode.
900 if (Kind == tgtok::Error || Kind == tgtok::Define)
901 continue;
903 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false);
905 // If lexPreprocessor() encountered an error during lexing this
906 // preprocessor idiom, then return false to the calling lexPreprocessor().
907 // This will force tgtok::Error to be returned to the tokens processing.
908 if (ProcessedKind == tgtok::Error)
909 return false;
911 if (Kind != ProcessedKind)
912 PrintFatalError("prepIsDirective() and lexPreprocessor() "
913 "returned different token kinds");
915 // If this preprocessing directive enables tokens processing,
916 // then return to the lexPreprocessor() and get to the next token.
917 // We can move from line-skipping mode to processing tokens only
918 // due to #else or #endif.
919 if (prepIsProcessingEnabled()) {
920 if (Kind != tgtok::Else && Kind != tgtok::Endif) {
921 PrintFatalError("tokens processing was enabled by an unexpected "
922 "preprocessing directive");
923 return false;
926 return true;
928 } while (CurPtr != CurBuf.end());
930 // We have reached the end of the file, but never left the lines-skipping
931 // mode. This means there is no matching #endif.
932 prepReportPreprocessorStackError();
933 return false;
936 StringRef TGLexer::prepLexMacroName() {
937 // Skip whitespaces between the preprocessing directive and the macro name.
938 while (*CurPtr == ' ' || *CurPtr == '\t')
939 ++CurPtr;
941 TokStart = CurPtr;
942 CurPtr = lexMacroName(StringRef(CurPtr, CurBuf.end() - CurPtr));
943 return StringRef(TokStart, CurPtr - TokStart);
946 bool TGLexer::prepSkipLineBegin() {
947 while (CurPtr != CurBuf.end()) {
948 switch (*CurPtr) {
949 case ' ':
950 case '\t':
951 case '\n':
952 case '\r':
953 break;
955 case '/': {
956 int NextChar = peekNextChar(1);
957 if (NextChar == '*') {
958 // Skip C-style comment.
959 // Note that we do not care about skipping the C++-style comments.
960 // If the line contains "//", it may not contain any processable
961 // preprocessing directive. Just return CurPtr pointing to
962 // the first '/' in this case. We also do not care about
963 // incorrect symbols after the first '/' - we are in lines-skipping
964 // mode, so incorrect code is allowed to some extent.
966 // Set TokStart to the beginning of the comment to enable proper
967 // diagnostic printing in case of error in SkipCComment().
968 TokStart = CurPtr;
970 // CurPtr must point to '*' before call to SkipCComment().
971 ++CurPtr;
972 if (SkipCComment())
973 return false;
974 } else {
975 // CurPtr points to the non-whitespace '/'.
976 return true;
979 // We must not increment CurPtr after the comment was lexed.
980 continue;
983 default:
984 return true;
987 ++CurPtr;
990 // We have reached the end of the file. Return to the lines skipping
991 // code, and allow it to handle the EOF as needed.
992 return true;
995 bool TGLexer::prepSkipDirectiveEnd() {
996 while (CurPtr != CurBuf.end()) {
997 switch (*CurPtr) {
998 case ' ':
999 case '\t':
1000 break;
1002 case '\n':
1003 case '\r':
1004 return true;
1006 case '/': {
1007 int NextChar = peekNextChar(1);
1008 if (NextChar == '/') {
1009 // Skip C++-style comment.
1010 // We may just return true now, but let's skip to the line/buffer end
1011 // to simplify the method specification.
1012 ++CurPtr;
1013 SkipBCPLComment();
1014 } else if (NextChar == '*') {
1015 // When we are skipping C-style comment at the end of a preprocessing
1016 // directive, we can skip several lines. If any meaningful TD token
1017 // follows the end of the C-style comment on the same line, it will
1018 // be considered as an invalid usage of TD token.
1019 // For example, we want to forbid usages like this one:
1020 // #define MACRO class Class {}
1021 // But with C-style comments we also disallow the following:
1022 // #define MACRO /* This macro is used
1023 // to ... */ class Class {}
1024 // One can argue that this should be allowed, but it does not seem
1025 // to be worth of the complication. Moreover, this matches
1026 // the C preprocessor behavior.
1028 // Set TokStart to the beginning of the comment to enable proper
1029 // diagnostic printer in case of error in SkipCComment().
1030 TokStart = CurPtr;
1031 ++CurPtr;
1032 if (SkipCComment())
1033 return false;
1034 } else {
1035 TokStart = CurPtr;
1036 PrintError(CurPtr, "unexpected character");
1037 return false;
1040 // We must not increment CurPtr after the comment was lexed.
1041 continue;
1044 default:
1045 // Do not allow any non-whitespaces after the directive.
1046 TokStart = CurPtr;
1047 return false;
1050 ++CurPtr;
1053 return true;
1056 bool TGLexer::prepIsProcessingEnabled() {
1057 for (const PreprocessorControlDesc &I :
1058 llvm::reverse(*PrepIncludeStack.back()))
1059 if (!I.IsDefined)
1060 return false;
1062 return true;
1065 void TGLexer::prepReportPreprocessorStackError() {
1066 if (PrepIncludeStack.back()->empty())
1067 PrintFatalError("prepReportPreprocessorStackError() called with "
1068 "empty control stack");
1070 auto &PrepControl = PrepIncludeStack.back()->back();
1071 PrintError(CurBuf.end(), "reached EOF without matching #endif");
1072 PrintError(PrepControl.SrcPos, "the latest preprocessor control is here");
1074 TokStart = CurPtr;