[RISCV] Refactor predicates for rvv intrinsic patterns.
[llvm-project.git] / llvm / lib / TableGen / TGLexer.cpp
blob24ec4031a7edaa0bca120bc57c3f448a220864bb
1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implement the Lexer for TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "TGLexer.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/StringSwitch.h"
16 #include "llvm/ADT/Twine.h"
17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define
18 #include "llvm/Support/Compiler.h"
19 #include "llvm/Support/MemoryBuffer.h"
20 #include "llvm/Support/SourceMgr.h"
21 #include "llvm/TableGen/Error.h"
22 #include <algorithm>
23 #include <cctype>
24 #include <cerrno>
25 #include <cstdint>
26 #include <cstdio>
27 #include <cstdlib>
28 #include <cstring>
30 using namespace llvm;
32 namespace {
33 // A list of supported preprocessing directives with their
34 // internal token kinds and names.
35 struct {
36 tgtok::TokKind Kind;
37 const char *Word;
38 } PreprocessorDirs[] = {
39 { tgtok::Ifdef, "ifdef" },
40 { tgtok::Ifndef, "ifndef" },
41 { tgtok::Else, "else" },
42 { tgtok::Endif, "endif" },
43 { tgtok::Define, "define" }
45 } // end anonymous namespace
47 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) {
48 CurBuffer = SrcMgr.getMainFileID();
49 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
50 CurPtr = CurBuf.begin();
51 TokStart = nullptr;
53 // Pretend that we enter the "top-level" include file.
54 PrepIncludeStack.push_back(
55 std::make_unique<std::vector<PreprocessorControlDesc>>());
57 // Put all macros defined in the command line into the DefinedMacros set.
58 for (const std::string &MacroName : Macros)
59 DefinedMacros.insert(MacroName);
62 SMLoc TGLexer::getLoc() const {
63 return SMLoc::getFromPointer(TokStart);
66 SMRange TGLexer::getLocRange() const {
67 return {getLoc(), SMLoc::getFromPointer(CurPtr)};
70 /// ReturnError - Set the error to the specified string at the specified
71 /// location. This is defined to always return tgtok::Error.
72 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) {
73 PrintError(Loc, Msg);
74 return tgtok::Error;
77 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) {
78 return ReturnError(SMLoc::getFromPointer(Loc), Msg);
81 bool TGLexer::processEOF() {
82 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer);
83 if (ParentIncludeLoc != SMLoc()) {
84 // If prepExitInclude() detects a problem with the preprocessing
85 // control stack, it will return false. Pretend that we reached
86 // the final EOF and stop lexing more tokens by returning false
87 // to LexToken().
88 if (!prepExitInclude(false))
89 return false;
91 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc);
92 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
93 CurPtr = ParentIncludeLoc.getPointer();
94 // Make sure TokStart points into the parent file's buffer.
95 // LexToken() assigns to it before calling getNextChar(),
96 // so it is pointing into the included file now.
97 TokStart = CurPtr;
98 return true;
101 // Pretend that we exit the "top-level" include file.
102 // Note that in case of an error (e.g. control stack imbalance)
103 // the routine will issue a fatal error.
104 prepExitInclude(true);
105 return false;
108 int TGLexer::getNextChar() {
109 char CurChar = *CurPtr++;
110 switch (CurChar) {
111 default:
112 return (unsigned char)CurChar;
114 case 0: {
115 // A NUL character in the stream is either the end of the current buffer or
116 // a spurious NUL in the file. Disambiguate that here.
117 if (CurPtr - 1 == CurBuf.end()) {
118 --CurPtr; // Arrange for another call to return EOF again.
119 return EOF;
121 PrintError(getLoc(),
122 "NUL character is invalid in source; treated as space");
123 return ' ';
126 case '\n':
127 case '\r':
128 // Handle the newline character by ignoring it and incrementing the line
129 // count. However, be careful about 'dos style' files with \n\r in them.
130 // Only treat a \n\r or \r\n as a single line.
131 if ((*CurPtr == '\n' || (*CurPtr == '\r')) &&
132 *CurPtr != CurChar)
133 ++CurPtr; // Eat the two char newline sequence.
134 return '\n';
138 int TGLexer::peekNextChar(int Index) const {
139 return *(CurPtr + Index);
142 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
143 TokStart = CurPtr;
144 // This always consumes at least one character.
145 int CurChar = getNextChar();
147 switch (CurChar) {
148 default:
149 // Handle letters: [a-zA-Z_]
150 if (isalpha(CurChar) || CurChar == '_')
151 return LexIdentifier();
153 // Unknown character, emit an error.
154 return ReturnError(TokStart, "Unexpected character");
155 case EOF:
156 // Lex next token, if we just left an include file.
157 // Note that leaving an include file means that the next
158 // symbol is located at the end of the 'include "..."'
159 // construct, so LexToken() is called with default
160 // false parameter.
161 if (processEOF())
162 return LexToken();
164 // Return EOF denoting the end of lexing.
165 return tgtok::Eof;
167 case ':': return tgtok::colon;
168 case ';': return tgtok::semi;
169 case ',': return tgtok::comma;
170 case '<': return tgtok::less;
171 case '>': return tgtok::greater;
172 case ']': return tgtok::r_square;
173 case '{': return tgtok::l_brace;
174 case '}': return tgtok::r_brace;
175 case '(': return tgtok::l_paren;
176 case ')': return tgtok::r_paren;
177 case '=': return tgtok::equal;
178 case '?': return tgtok::question;
179 case '#':
180 if (FileOrLineStart) {
181 tgtok::TokKind Kind = prepIsDirective();
182 if (Kind != tgtok::Error)
183 return lexPreprocessor(Kind);
186 return tgtok::paste;
188 // The period is a separate case so we can recognize the "..."
189 // range punctuator.
190 case '.':
191 if (peekNextChar(0) == '.') {
192 ++CurPtr; // Eat second dot.
193 if (peekNextChar(0) == '.') {
194 ++CurPtr; // Eat third dot.
195 return tgtok::dotdotdot;
197 return ReturnError(TokStart, "Invalid '..' punctuation");
199 return tgtok::dot;
201 case '\r':
202 PrintFatalError("getNextChar() must never return '\r'");
203 return tgtok::Error;
205 case ' ':
206 case '\t':
207 // Ignore whitespace.
208 return LexToken(FileOrLineStart);
209 case '\n':
210 // Ignore whitespace, and identify the new line.
211 return LexToken(true);
212 case '/':
213 // If this is the start of a // comment, skip until the end of the line or
214 // the end of the buffer.
215 if (*CurPtr == '/')
216 SkipBCPLComment();
217 else if (*CurPtr == '*') {
218 if (SkipCComment())
219 return tgtok::Error;
220 } else // Otherwise, this is an error.
221 return ReturnError(TokStart, "Unexpected character");
222 return LexToken(FileOrLineStart);
223 case '-': case '+':
224 case '0': case '1': case '2': case '3': case '4': case '5': case '6':
225 case '7': case '8': case '9': {
226 int NextChar = 0;
227 if (isdigit(CurChar)) {
228 // Allow identifiers to start with a number if it is followed by
229 // an identifier. This can happen with paste operations like
230 // foo#8i.
231 int i = 0;
232 do {
233 NextChar = peekNextChar(i++);
234 } while (isdigit(NextChar));
236 if (NextChar == 'x' || NextChar == 'b') {
237 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most
238 // likely a number.
239 int NextNextChar = peekNextChar(i);
240 switch (NextNextChar) {
241 default:
242 break;
243 case '0': case '1':
244 if (NextChar == 'b')
245 return LexNumber();
246 [[fallthrough]];
247 case '2': case '3': case '4': case '5':
248 case '6': case '7': case '8': case '9':
249 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
250 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
251 if (NextChar == 'x')
252 return LexNumber();
253 break;
258 if (isalpha(NextChar) || NextChar == '_')
259 return LexIdentifier();
261 return LexNumber();
263 case '"': return LexString();
264 case '$': return LexVarName();
265 case '[': return LexBracket();
266 case '!': return LexExclaim();
270 /// LexString - Lex "[^"]*"
271 tgtok::TokKind TGLexer::LexString() {
272 const char *StrStart = CurPtr;
274 CurStrVal = "";
276 while (*CurPtr != '"') {
277 // If we hit the end of the buffer, report an error.
278 if (*CurPtr == 0 && CurPtr == CurBuf.end())
279 return ReturnError(StrStart, "End of file in string literal");
281 if (*CurPtr == '\n' || *CurPtr == '\r')
282 return ReturnError(StrStart, "End of line in string literal");
284 if (*CurPtr != '\\') {
285 CurStrVal += *CurPtr++;
286 continue;
289 ++CurPtr;
291 switch (*CurPtr) {
292 case '\\': case '\'': case '"':
293 // These turn into their literal character.
294 CurStrVal += *CurPtr++;
295 break;
296 case 't':
297 CurStrVal += '\t';
298 ++CurPtr;
299 break;
300 case 'n':
301 CurStrVal += '\n';
302 ++CurPtr;
303 break;
305 case '\n':
306 case '\r':
307 return ReturnError(CurPtr, "escaped newlines not supported in tblgen");
309 // If we hit the end of the buffer, report an error.
310 case '\0':
311 if (CurPtr == CurBuf.end())
312 return ReturnError(StrStart, "End of file in string literal");
313 [[fallthrough]];
314 default:
315 return ReturnError(CurPtr, "invalid escape in string literal");
319 ++CurPtr;
320 return tgtok::StrVal;
323 tgtok::TokKind TGLexer::LexVarName() {
324 if (!isalpha(CurPtr[0]) && CurPtr[0] != '_')
325 return ReturnError(TokStart, "Invalid variable name");
327 // Otherwise, we're ok, consume the rest of the characters.
328 const char *VarNameStart = CurPtr++;
330 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_')
331 ++CurPtr;
333 CurStrVal.assign(VarNameStart, CurPtr);
334 return tgtok::VarName;
337 tgtok::TokKind TGLexer::LexIdentifier() {
338 // The first letter is [a-zA-Z_].
339 const char *IdentStart = TokStart;
341 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
342 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_')
343 ++CurPtr;
345 // Check to see if this identifier is a reserved keyword.
346 StringRef Str(IdentStart, CurPtr-IdentStart);
348 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str)
349 .Case("int", tgtok::Int)
350 .Case("bit", tgtok::Bit)
351 .Case("bits", tgtok::Bits)
352 .Case("string", tgtok::String)
353 .Case("list", tgtok::List)
354 .Case("code", tgtok::Code)
355 .Case("dag", tgtok::Dag)
356 .Case("class", tgtok::Class)
357 .Case("def", tgtok::Def)
358 .Case("true", tgtok::TrueVal)
359 .Case("false", tgtok::FalseVal)
360 .Case("foreach", tgtok::Foreach)
361 .Case("defm", tgtok::Defm)
362 .Case("defset", tgtok::Defset)
363 .Case("multiclass", tgtok::MultiClass)
364 .Case("field", tgtok::Field)
365 .Case("let", tgtok::Let)
366 .Case("in", tgtok::In)
367 .Case("defvar", tgtok::Defvar)
368 .Case("include", tgtok::Include)
369 .Case("if", tgtok::If)
370 .Case("then", tgtok::Then)
371 .Case("else", tgtok::ElseKW)
372 .Case("assert", tgtok::Assert)
373 .Default(tgtok::Id);
375 // A couple of tokens require special processing.
376 switch (Kind) {
377 case tgtok::Include:
378 if (LexInclude()) return tgtok::Error;
379 return Lex();
380 case tgtok::Id:
381 CurStrVal.assign(Str.begin(), Str.end());
382 break;
383 default:
384 break;
387 return Kind;
390 /// LexInclude - We just read the "include" token. Get the string token that
391 /// comes next and enter the include.
392 bool TGLexer::LexInclude() {
393 // The token after the include must be a string.
394 tgtok::TokKind Tok = LexToken();
395 if (Tok == tgtok::Error) return true;
396 if (Tok != tgtok::StrVal) {
397 PrintError(getLoc(), "Expected filename after include");
398 return true;
401 // Get the string.
402 std::string Filename = CurStrVal;
403 std::string IncludedFile;
405 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr),
406 IncludedFile);
407 if (!CurBuffer) {
408 PrintError(getLoc(), "Could not find include file '" + Filename + "'");
409 return true;
412 Dependencies.insert(IncludedFile);
413 // Save the line number and lex buffer of the includer.
414 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
415 CurPtr = CurBuf.begin();
417 PrepIncludeStack.push_back(
418 std::make_unique<std::vector<PreprocessorControlDesc>>());
419 return false;
422 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF.
423 /// Or we may end up at the end of the buffer.
424 void TGLexer::SkipBCPLComment() {
425 ++CurPtr; // skip the second slash.
426 auto EOLPos = CurBuf.find_first_of("\r\n", CurPtr - CurBuf.data());
427 CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos;
430 /// SkipCComment - This skips C-style /**/ comments. The only difference from C
431 /// is that we allow nesting.
432 bool TGLexer::SkipCComment() {
433 ++CurPtr; // skip the star.
434 unsigned CommentDepth = 1;
436 while (true) {
437 int CurChar = getNextChar();
438 switch (CurChar) {
439 case EOF:
440 PrintError(TokStart, "Unterminated comment!");
441 return true;
442 case '*':
443 // End of the comment?
444 if (CurPtr[0] != '/') break;
446 ++CurPtr; // End the */.
447 if (--CommentDepth == 0)
448 return false;
449 break;
450 case '/':
451 // Start of a nested comment?
452 if (CurPtr[0] != '*') break;
453 ++CurPtr;
454 ++CommentDepth;
455 break;
460 /// LexNumber - Lex:
461 /// [-+]?[0-9]+
462 /// 0x[0-9a-fA-F]+
463 /// 0b[01]+
464 tgtok::TokKind TGLexer::LexNumber() {
465 if (CurPtr[-1] == '0') {
466 if (CurPtr[0] == 'x') {
467 ++CurPtr;
468 const char *NumStart = CurPtr;
469 while (isxdigit(CurPtr[0]))
470 ++CurPtr;
472 // Requires at least one hex digit.
473 if (CurPtr == NumStart)
474 return ReturnError(TokStart, "Invalid hexadecimal number");
476 errno = 0;
477 CurIntVal = strtoll(NumStart, nullptr, 16);
478 if (errno == EINVAL)
479 return ReturnError(TokStart, "Invalid hexadecimal number");
480 if (errno == ERANGE) {
481 errno = 0;
482 CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16);
483 if (errno == EINVAL)
484 return ReturnError(TokStart, "Invalid hexadecimal number");
485 if (errno == ERANGE)
486 return ReturnError(TokStart, "Hexadecimal number out of range");
488 return tgtok::IntVal;
489 } else if (CurPtr[0] == 'b') {
490 ++CurPtr;
491 const char *NumStart = CurPtr;
492 while (CurPtr[0] == '0' || CurPtr[0] == '1')
493 ++CurPtr;
495 // Requires at least one binary digit.
496 if (CurPtr == NumStart)
497 return ReturnError(CurPtr-2, "Invalid binary number");
498 CurIntVal = strtoll(NumStart, nullptr, 2);
499 return tgtok::BinaryIntVal;
503 // Check for a sign without a digit.
504 if (!isdigit(CurPtr[0])) {
505 if (CurPtr[-1] == '-')
506 return tgtok::minus;
507 else if (CurPtr[-1] == '+')
508 return tgtok::plus;
511 while (isdigit(CurPtr[0]))
512 ++CurPtr;
513 CurIntVal = strtoll(TokStart, nullptr, 10);
514 return tgtok::IntVal;
517 /// LexBracket - We just read '['. If this is a code block, return it,
518 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
519 tgtok::TokKind TGLexer::LexBracket() {
520 if (CurPtr[0] != '{')
521 return tgtok::l_square;
522 ++CurPtr;
523 const char *CodeStart = CurPtr;
524 while (true) {
525 int Char = getNextChar();
526 if (Char == EOF) break;
528 if (Char != '}') continue;
530 Char = getNextChar();
531 if (Char == EOF) break;
532 if (Char == ']') {
533 CurStrVal.assign(CodeStart, CurPtr-2);
534 return tgtok::CodeFragment;
538 return ReturnError(CodeStart - 2, "Unterminated code block");
541 /// LexExclaim - Lex '!' and '![a-zA-Z]+'.
542 tgtok::TokKind TGLexer::LexExclaim() {
543 if (!isalpha(*CurPtr))
544 return ReturnError(CurPtr - 1, "Invalid \"!operator\"");
546 const char *Start = CurPtr++;
547 while (isalpha(*CurPtr))
548 ++CurPtr;
550 // Check to see which operator this is.
551 tgtok::TokKind Kind =
552 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start))
553 .Case("eq", tgtok::XEq)
554 .Case("ne", tgtok::XNe)
555 .Case("le", tgtok::XLe)
556 .Case("lt", tgtok::XLt)
557 .Case("ge", tgtok::XGe)
558 .Case("gt", tgtok::XGt)
559 .Case("if", tgtok::XIf)
560 .Case("cond", tgtok::XCond)
561 .Case("isa", tgtok::XIsA)
562 .Case("head", tgtok::XHead)
563 .Case("tail", tgtok::XTail)
564 .Case("size", tgtok::XSize)
565 .Case("con", tgtok::XConcat)
566 .Case("dag", tgtok::XDag)
567 .Case("add", tgtok::XADD)
568 .Case("sub", tgtok::XSUB)
569 .Case("mul", tgtok::XMUL)
570 .Case("div", tgtok::XDIV)
571 .Case("not", tgtok::XNOT)
572 .Case("logtwo", tgtok::XLOG2)
573 .Case("and", tgtok::XAND)
574 .Case("or", tgtok::XOR)
575 .Case("xor", tgtok::XXOR)
576 .Case("shl", tgtok::XSHL)
577 .Case("sra", tgtok::XSRA)
578 .Case("srl", tgtok::XSRL)
579 .Case("cast", tgtok::XCast)
580 .Case("empty", tgtok::XEmpty)
581 .Case("subst", tgtok::XSubst)
582 .Case("foldl", tgtok::XFoldl)
583 .Case("foreach", tgtok::XForEach)
584 .Case("filter", tgtok::XFilter)
585 .Case("listconcat", tgtok::XListConcat)
586 .Case("listsplat", tgtok::XListSplat)
587 .Case("listremove", tgtok::XListRemove)
588 .Case("range", tgtok::XRange)
589 .Case("strconcat", tgtok::XStrConcat)
590 .Case("interleave", tgtok::XInterleave)
591 .Case("substr", tgtok::XSubstr)
592 .Case("find", tgtok::XFind)
593 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated.
594 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated.
595 .Case("exists", tgtok::XExists)
596 .Case("tolower", tgtok::XToLower)
597 .Case("toupper", tgtok::XToUpper)
598 .Default(tgtok::Error);
600 return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator");
603 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) {
604 // Report an error, if preprocessor control stack for the current
605 // file is not empty.
606 if (!PrepIncludeStack.back()->empty()) {
607 prepReportPreprocessorStackError();
609 return false;
612 // Pop the preprocessing controls from the include stack.
613 if (PrepIncludeStack.empty()) {
614 PrintFatalError("Preprocessor include stack is empty");
617 PrepIncludeStack.pop_back();
619 if (IncludeStackMustBeEmpty) {
620 if (!PrepIncludeStack.empty())
621 PrintFatalError("Preprocessor include stack is not empty");
622 } else {
623 if (PrepIncludeStack.empty())
624 PrintFatalError("Preprocessor include stack is empty");
627 return true;
630 tgtok::TokKind TGLexer::prepIsDirective() const {
631 for (const auto &PD : PreprocessorDirs) {
632 int NextChar = *CurPtr;
633 bool Match = true;
634 unsigned I = 0;
635 for (; I < strlen(PD.Word); ++I) {
636 if (NextChar != PD.Word[I]) {
637 Match = false;
638 break;
641 NextChar = peekNextChar(I + 1);
644 // Check for whitespace after the directive. If there is no whitespace,
645 // then we do not recognize it as a preprocessing directive.
646 if (Match) {
647 tgtok::TokKind Kind = PD.Kind;
649 // New line and EOF may follow only #else/#endif. It will be reported
650 // as an error for #ifdef/#define after the call to prepLexMacroName().
651 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF ||
652 NextChar == '\n' ||
653 // It looks like TableGen does not support '\r' as the actual
654 // carriage return, e.g. getNextChar() treats a single '\r'
655 // as '\n'. So we do the same here.
656 NextChar == '\r')
657 return Kind;
659 // Allow comments after some directives, e.g.:
660 // #else// OR #else/**/
661 // #endif// OR #endif/**/
663 // Note that we do allow comments after #ifdef/#define here, e.g.
664 // #ifdef/**/ AND #ifdef//
665 // #define/**/ AND #define//
667 // These cases will be reported as incorrect after calling
668 // prepLexMacroName(). We could have supported C-style comments
669 // after #ifdef/#define, but this would complicate the code
670 // for little benefit.
671 if (NextChar == '/') {
672 NextChar = peekNextChar(I + 1);
674 if (NextChar == '*' || NextChar == '/')
675 return Kind;
677 // Pretend that we do not recognize the directive.
682 return tgtok::Error;
685 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) {
686 TokStart = CurPtr;
688 for (const auto &PD : PreprocessorDirs)
689 if (PD.Kind == Kind) {
690 // Advance CurPtr to the end of the preprocessing word.
691 CurPtr += strlen(PD.Word);
692 return true;
695 PrintFatalError("Unsupported preprocessing token in "
696 "prepEatPreprocessorDirective()");
697 return false;
700 tgtok::TokKind TGLexer::lexPreprocessor(
701 tgtok::TokKind Kind, bool ReturnNextLiveToken) {
703 // We must be looking at a preprocessing directive. Eat it!
704 if (!prepEatPreprocessorDirective(Kind))
705 PrintFatalError("lexPreprocessor() called for unknown "
706 "preprocessor directive");
708 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) {
709 StringRef MacroName = prepLexMacroName();
710 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef";
711 if (MacroName.empty())
712 return ReturnError(TokStart, "Expected macro name after " + IfTokName);
714 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0;
716 // Canonicalize ifndef to ifdef equivalent
717 if (Kind == tgtok::Ifndef) {
718 MacroIsDefined = !MacroIsDefined;
719 Kind = tgtok::Ifdef;
722 // Regardless of whether we are processing tokens or not,
723 // we put the #ifdef control on stack.
724 PrepIncludeStack.back()->push_back(
725 {Kind, MacroIsDefined, SMLoc::getFromPointer(TokStart)});
727 if (!prepSkipDirectiveEnd())
728 return ReturnError(CurPtr, "Only comments are supported after " +
729 IfTokName + " NAME");
731 // If we were not processing tokens before this #ifdef,
732 // then just return back to the lines skipping code.
733 if (!ReturnNextLiveToken)
734 return Kind;
736 // If we were processing tokens before this #ifdef,
737 // and the macro is defined, then just return the next token.
738 if (MacroIsDefined)
739 return LexToken();
741 // We were processing tokens before this #ifdef, and the macro
742 // is not defined, so we have to start skipping the lines.
743 // If the skipping is successful, it will return the token following
744 // either #else or #endif corresponding to this #ifdef.
745 if (prepSkipRegion(ReturnNextLiveToken))
746 return LexToken();
748 return tgtok::Error;
749 } else if (Kind == tgtok::Else) {
750 // Check if this #else is correct before calling prepSkipDirectiveEnd(),
751 // which will move CurPtr away from the beginning of #else.
752 if (PrepIncludeStack.back()->empty())
753 return ReturnError(TokStart, "#else without #ifdef or #ifndef");
755 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back();
757 if (IfdefEntry.Kind != tgtok::Ifdef) {
758 PrintError(TokStart, "double #else");
759 return ReturnError(IfdefEntry.SrcPos, "Previous #else is here");
762 // Replace the corresponding #ifdef's control with its negation
763 // on the control stack.
764 PrepIncludeStack.back()->pop_back();
765 PrepIncludeStack.back()->push_back(
766 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)});
768 if (!prepSkipDirectiveEnd())
769 return ReturnError(CurPtr, "Only comments are supported after #else");
771 // If we were processing tokens before this #else,
772 // we have to start skipping lines until the matching #endif.
773 if (ReturnNextLiveToken) {
774 if (prepSkipRegion(ReturnNextLiveToken))
775 return LexToken();
777 return tgtok::Error;
780 // Return to the lines skipping code.
781 return Kind;
782 } else if (Kind == tgtok::Endif) {
783 // Check if this #endif is correct before calling prepSkipDirectiveEnd(),
784 // which will move CurPtr away from the beginning of #endif.
785 if (PrepIncludeStack.back()->empty())
786 return ReturnError(TokStart, "#endif without #ifdef");
788 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back();
790 if (IfdefOrElseEntry.Kind != tgtok::Ifdef &&
791 IfdefOrElseEntry.Kind != tgtok::Else) {
792 PrintFatalError("Invalid preprocessor control on the stack");
793 return tgtok::Error;
796 if (!prepSkipDirectiveEnd())
797 return ReturnError(CurPtr, "Only comments are supported after #endif");
799 PrepIncludeStack.back()->pop_back();
801 // If we were processing tokens before this #endif, then
802 // we should continue it.
803 if (ReturnNextLiveToken) {
804 return LexToken();
807 // Return to the lines skipping code.
808 return Kind;
809 } else if (Kind == tgtok::Define) {
810 StringRef MacroName = prepLexMacroName();
811 if (MacroName.empty())
812 return ReturnError(TokStart, "Expected macro name after #define");
814 if (!DefinedMacros.insert(MacroName).second)
815 PrintWarning(getLoc(),
816 "Duplicate definition of macro: " + Twine(MacroName));
818 if (!prepSkipDirectiveEnd())
819 return ReturnError(CurPtr,
820 "Only comments are supported after #define NAME");
822 if (!ReturnNextLiveToken) {
823 PrintFatalError("#define must be ignored during the lines skipping");
824 return tgtok::Error;
827 return LexToken();
830 PrintFatalError("Preprocessing directive is not supported");
831 return tgtok::Error;
834 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
835 if (!MustNeverBeFalse)
836 PrintFatalError("Invalid recursion.");
838 do {
839 // Skip all symbols to the line end.
840 prepSkipToLineEnd();
842 // Find the first non-whitespace symbol in the next line(s).
843 if (!prepSkipLineBegin())
844 return false;
846 // If the first non-blank/comment symbol on the line is '#',
847 // it may be a start of preprocessing directive.
849 // If it is not '#' just go to the next line.
850 if (*CurPtr == '#')
851 ++CurPtr;
852 else
853 continue;
855 tgtok::TokKind Kind = prepIsDirective();
857 // If we did not find a preprocessing directive or it is #define,
858 // then just skip to the next line. We do not have to do anything
859 // for #define in the line-skipping mode.
860 if (Kind == tgtok::Error || Kind == tgtok::Define)
861 continue;
863 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false);
865 // If lexPreprocessor() encountered an error during lexing this
866 // preprocessor idiom, then return false to the calling lexPreprocessor().
867 // This will force tgtok::Error to be returned to the tokens processing.
868 if (ProcessedKind == tgtok::Error)
869 return false;
871 if (Kind != ProcessedKind)
872 PrintFatalError("prepIsDirective() and lexPreprocessor() "
873 "returned different token kinds");
875 // If this preprocessing directive enables tokens processing,
876 // then return to the lexPreprocessor() and get to the next token.
877 // We can move from line-skipping mode to processing tokens only
878 // due to #else or #endif.
879 if (prepIsProcessingEnabled()) {
880 if (Kind != tgtok::Else && Kind != tgtok::Endif) {
881 PrintFatalError("Tokens processing was enabled by an unexpected "
882 "preprocessing directive");
883 return false;
886 return true;
888 } while (CurPtr != CurBuf.end());
890 // We have reached the end of the file, but never left the lines-skipping
891 // mode. This means there is no matching #endif.
892 prepReportPreprocessorStackError();
893 return false;
896 StringRef TGLexer::prepLexMacroName() {
897 // Skip whitespaces between the preprocessing directive and the macro name.
898 while (*CurPtr == ' ' || *CurPtr == '\t')
899 ++CurPtr;
901 TokStart = CurPtr;
902 // Macro names start with [a-zA-Z_].
903 if (*CurPtr != '_' && !isalpha(*CurPtr))
904 return "";
906 // Match the rest of the identifier regex: [0-9a-zA-Z_]*
907 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_')
908 ++CurPtr;
910 return StringRef(TokStart, CurPtr - TokStart);
913 bool TGLexer::prepSkipLineBegin() {
914 while (CurPtr != CurBuf.end()) {
915 switch (*CurPtr) {
916 case ' ':
917 case '\t':
918 case '\n':
919 case '\r':
920 break;
922 case '/': {
923 int NextChar = peekNextChar(1);
924 if (NextChar == '*') {
925 // Skip C-style comment.
926 // Note that we do not care about skipping the C++-style comments.
927 // If the line contains "//", it may not contain any processable
928 // preprocessing directive. Just return CurPtr pointing to
929 // the first '/' in this case. We also do not care about
930 // incorrect symbols after the first '/' - we are in lines-skipping
931 // mode, so incorrect code is allowed to some extent.
933 // Set TokStart to the beginning of the comment to enable proper
934 // diagnostic printing in case of error in SkipCComment().
935 TokStart = CurPtr;
937 // CurPtr must point to '*' before call to SkipCComment().
938 ++CurPtr;
939 if (SkipCComment())
940 return false;
941 } else {
942 // CurPtr points to the non-whitespace '/'.
943 return true;
946 // We must not increment CurPtr after the comment was lexed.
947 continue;
950 default:
951 return true;
954 ++CurPtr;
957 // We have reached the end of the file. Return to the lines skipping
958 // code, and allow it to handle the EOF as needed.
959 return true;
962 bool TGLexer::prepSkipDirectiveEnd() {
963 while (CurPtr != CurBuf.end()) {
964 switch (*CurPtr) {
965 case ' ':
966 case '\t':
967 break;
969 case '\n':
970 case '\r':
971 return true;
973 case '/': {
974 int NextChar = peekNextChar(1);
975 if (NextChar == '/') {
976 // Skip C++-style comment.
977 // We may just return true now, but let's skip to the line/buffer end
978 // to simplify the method specification.
979 ++CurPtr;
980 SkipBCPLComment();
981 } else if (NextChar == '*') {
982 // When we are skipping C-style comment at the end of a preprocessing
983 // directive, we can skip several lines. If any meaningful TD token
984 // follows the end of the C-style comment on the same line, it will
985 // be considered as an invalid usage of TD token.
986 // For example, we want to forbid usages like this one:
987 // #define MACRO class Class {}
988 // But with C-style comments we also disallow the following:
989 // #define MACRO /* This macro is used
990 // to ... */ class Class {}
991 // One can argue that this should be allowed, but it does not seem
992 // to be worth of the complication. Moreover, this matches
993 // the C preprocessor behavior.
995 // Set TokStart to the beginning of the comment to enable proper
996 // diagnostic printer in case of error in SkipCComment().
997 TokStart = CurPtr;
998 ++CurPtr;
999 if (SkipCComment())
1000 return false;
1001 } else {
1002 TokStart = CurPtr;
1003 PrintError(CurPtr, "Unexpected character");
1004 return false;
1007 // We must not increment CurPtr after the comment was lexed.
1008 continue;
1011 default:
1012 // Do not allow any non-whitespaces after the directive.
1013 TokStart = CurPtr;
1014 return false;
1017 ++CurPtr;
1020 return true;
1023 void TGLexer::prepSkipToLineEnd() {
1024 while (*CurPtr != '\n' && *CurPtr != '\r' && CurPtr != CurBuf.end())
1025 ++CurPtr;
1028 bool TGLexer::prepIsProcessingEnabled() {
1029 for (const PreprocessorControlDesc &I :
1030 llvm::reverse(*PrepIncludeStack.back()))
1031 if (!I.IsDefined)
1032 return false;
1034 return true;
1037 void TGLexer::prepReportPreprocessorStackError() {
1038 if (PrepIncludeStack.back()->empty())
1039 PrintFatalError("prepReportPreprocessorStackError() called with "
1040 "empty control stack");
1042 auto &PrepControl = PrepIncludeStack.back()->back();
1043 PrintError(CurBuf.end(), "Reached EOF without matching #endif");
1044 PrintError(PrepControl.SrcPos, "The latest preprocessor control is here");
1046 TokStart = CurPtr;