typo
[CppCoreGuidelines.git] / scripts / python / cpplint.py
blob825c87c2a606a864189dc8c1ccbdc75178c197dc
1 #!/usr/bin/env python
3 # Copyright (c) 2009 Google Inc. All rights reserved.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 """Does google-lint on c++ files.
33 The goal of this script is to identify places in the code that *may*
34 be in non-compliance with google style. It does not attempt to fix
35 up these problems -- the point is to educate. It does also not
36 attempt to find all problems, or to ensure that everything it does
37 find is legitimately a problem.
39 In particular, we can get very confused by /* and // inside strings!
40 We do a small hack, which is to ignore //'s with "'s after them on the
41 same line, but it is far from perfect (in either direction).
42 """
44 import codecs
45 import copy
46 import getopt
47 import glob
48 import itertools
49 import math # for log
50 import os
51 import re
52 import sre_compile
53 import string
54 import sys
55 import unicodedata
56 import xml.etree.ElementTree
58 # if empty, use defaults
59 _header_extensions = set([])
61 # if empty, use defaults
62 _valid_extensions = set([])
65 # Files with any of these extensions are considered to be
66 # header files (and will undergo different style checks).
67 # This set can be extended by using the --headers
68 # option (also supported in CPPLINT.cfg)
69 def GetHeaderExtensions():
70 if not _header_extensions:
71 return set(['h', 'hpp', 'hxx', 'h++', 'cuh'])
72 return _header_extensions
74 # The allowed extensions for file names
75 # This is set by --extensions flag
76 def GetAllExtensions():
77 if not _valid_extensions:
78 return GetHeaderExtensions().union(set(['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
79 return _valid_extensions
81 def GetNonHeaderExtensions():
82 return GetAllExtensions().difference(GetHeaderExtensions())
85 _USAGE = """
86 Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit]
87 [--filter=-x,+y,...]
88 [--counting=total|toplevel|detailed] [--repository=path]
89 [--root=subdir] [--linelength=digits] [--recursive]
90 [--exclude=path]
91 [--headers=ext1,ext2]
92 [--extensions=hpp,cpp,...]
93 <file> [file] ...
95 The style guidelines this tries to follow are those in
96 https://google.github.io/styleguide/cppguide.html
98 Every problem is given a confidence score from 1-5, with 5 meaning we are
99 certain of the problem, and 1 meaning it could be a legitimate construct.
100 This will miss some errors, and is not a substitute for a code review.
102 To suppress false-positive errors of a certain category, add a
103 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
104 suppresses errors of all categories on that line.
106 The files passed in will be linted; at least one file must be provided.
107 Default linted extensions are %s.
108 Other file types will be ignored.
109 Change the extensions with the --extensions flag.
111 Flags:
113 output=emacs|eclipse|vs7|junit
114 By default, the output is formatted to ease emacs parsing. Output
115 compatible with eclipse (eclipse), Visual Studio (vs7), and JUnit
116 XML parsers such as those used in Jenkins and Bamboo may also be
117 used. Other formats are unsupported.
119 verbose=#
120 Specify a number 0-5 to restrict errors to certain verbosity levels.
121 Errors with lower verbosity levels have lower confidence and are more
122 likely to be false positives.
124 quiet
125 Supress output other than linting errors, such as information about
126 which files have been processed and excluded.
128 filter=-x,+y,...
129 Specify a comma-separated list of category-filters to apply: only
130 error messages whose category names pass the filters will be printed.
131 (Category names are printed with the message and look like
132 "[whitespace/indent]".) Filters are evaluated left to right.
133 "-FOO" and "FOO" means "do not print categories that start with FOO".
134 "+FOO" means "do print categories that start with FOO".
136 Examples: --filter=-whitespace,+whitespace/braces
137 --filter=whitespace,runtime/printf,+runtime/printf_format
138 --filter=-,+build/include_what_you_use
140 To see a list of all the categories used in cpplint, pass no arg:
141 --filter=
143 counting=total|toplevel|detailed
144 The total number of errors found is always printed. If
145 'toplevel' is provided, then the count of errors in each of
146 the top-level categories like 'build' and 'whitespace' will
147 also be printed. If 'detailed' is provided, then a count
148 is provided for each category like 'build/class'.
150 repository=path
151 The top level directory of the repository, used to derive the header
152 guard CPP variable. By default, this is determined by searching for a
153 path that contains .git, .hg, or .svn. When this flag is specified, the
154 given path is used instead. This option allows the header guard CPP
155 variable to remain consistent even if members of a team have different
156 repository root directories (such as when checking out a subdirectory
157 with SVN). In addition, users of non-mainstream version control systems
158 can use this flag to ensure readable header guard CPP variables.
160 Examples:
161 Assuming that Alice checks out ProjectName and Bob checks out
162 ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
163 with no --repository flag, the header guard CPP variable will be:
165 Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
166 Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
168 If Alice uses the --repository=trunk flag and Bob omits the flag or
169 uses --repository=. then the header guard CPP variable will be:
171 Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
172 Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
174 root=subdir
175 The root directory used for deriving header guard CPP variables. This
176 directory is relative to the top level directory of the repository which
177 by default is determined by searching for a directory that contains .git,
178 .hg, or .svn but can also be controlled with the --repository flag. If
179 the specified directory does not exist, this flag is ignored.
181 Examples:
182 Assuming that src is the top level directory of the repository, the
183 header guard CPP variables for src/chrome/browser/ui/browser.h are:
185 No flag => CHROME_BROWSER_UI_BROWSER_H_
186 --root=chrome => BROWSER_UI_BROWSER_H_
187 --root=chrome/browser => UI_BROWSER_H_
189 linelength=digits
190 This is the allowed line length for the project. The default value is
191 80 characters.
193 Examples:
194 --linelength=120
196 recursive
197 Search for files to lint recursively. Each directory given in the list
198 of files to be linted is replaced by all files that descend from that
199 directory. Files with extensions not in the valid extensions list are
200 excluded.
202 exclude=path
203 Exclude the given path from the list of files to be linted. Relative
204 paths are evaluated relative to the current directory and shell globbing
205 is performed. This flag can be provided multiple times to exclude
206 multiple files.
208 Examples:
209 --exclude=one.cc
210 --exclude=src/*.cc
211 --exclude=src/*.cc --exclude=test/*.cc
213 extensions=extension,extension,...
214 The allowed file extensions that cpplint will check
216 Examples:
217 --extensions=%s
219 headers=extension,extension,...
220 The allowed header extensions that cpplint will consider to be header files
221 (by default, only files with extensions %s
222 will be assumed to be headers)
224 Examples:
225 --headers=%s
227 cpplint.py supports per-directory configurations specified in CPPLINT.cfg
228 files. CPPLINT.cfg file can contain a number of key=value pairs.
229 Currently the following options are supported:
231 set noparent
232 filter=+filter1,-filter2,...
233 exclude_files=regex
234 linelength=80
235 root=subdir
237 "set noparent" option prevents cpplint from traversing directory tree
238 upwards looking for more .cfg files in parent directories. This option
239 is usually placed in the top-level project directory.
241 The "filter" option is similar in function to --filter flag. It specifies
242 message filters in addition to the |_DEFAULT_FILTERS| and those specified
243 through --filter command-line flag.
245 "exclude_files" allows to specify a regular expression to be matched against
246 a file name. If the expression matches, the file is skipped and not run
247 through the linter.
249 "linelength" specifies the allowed line length for the project.
251 The "root" option is similar in function to the --root flag (see example
252 above).
254 CPPLINT.cfg has an effect on files in the same directory and all
255 subdirectories, unless overridden by a nested configuration file.
257 Example file:
258 filter=-build/include_order,+build/include_alpha
259 exclude_files=.*\\.cc
261 The above example disables build/include_order warning and enables
262 build/include_alpha as well as excludes all .cc from being
263 processed by linter, in the current directory (where the .cfg
264 file is located) and all subdirectories.
265 """ % (list(GetAllExtensions()),
266 ','.join(list(GetAllExtensions())),
267 GetHeaderExtensions(),
268 ','.join(GetHeaderExtensions()))
270 # We categorize each error message we print. Here are the categories.
271 # We want an explicit list so we can list them all in cpplint --filter=.
272 # If you add a new error message with a new category, add it to the list
273 # here! cpplint_unittest.py should tell you if you forget to do this.
274 _ERROR_CATEGORIES = [
275 'build/class',
276 'build/c++11',
277 'build/c++14',
278 'build/c++tr1',
279 'build/deprecated',
280 'build/endif_comment',
281 'build/explicit_make_pair',
282 'build/forward_decl',
283 'build/header_guard',
284 'build/include',
285 'build/include_subdir',
286 'build/include_alpha',
287 'build/include_order',
288 'build/include_what_you_use',
289 'build/namespaces_literals',
290 'build/namespaces',
291 'build/printf_format',
292 'build/storage_class',
293 'legal/copyright',
294 'readability/alt_tokens',
295 'readability/braces',
296 'readability/casting',
297 'readability/check',
298 'readability/constructors',
299 'readability/fn_size',
300 'readability/inheritance',
301 'readability/multiline_comment',
302 'readability/multiline_string',
303 'readability/namespace',
304 'readability/nolint',
305 'readability/nul',
306 'readability/strings',
307 'readability/todo',
308 'readability/utf8',
309 'runtime/arrays',
310 'runtime/casting',
311 'runtime/explicit',
312 'runtime/int',
313 'runtime/init',
314 'runtime/invalid_increment',
315 'runtime/member_string_references',
316 'runtime/memset',
317 'runtime/indentation_namespace',
318 'runtime/operator',
319 'runtime/printf',
320 'runtime/printf_format',
321 'runtime/references',
322 'runtime/string',
323 'runtime/threadsafe_fn',
324 'runtime/vlog',
325 'whitespace/blank_line',
326 'whitespace/braces',
327 'whitespace/comma',
328 'whitespace/comments',
329 'whitespace/empty_conditional_body',
330 'whitespace/empty_if_body',
331 'whitespace/empty_loop_body',
332 'whitespace/end_of_line',
333 'whitespace/ending_newline',
334 'whitespace/forcolon',
335 'whitespace/indent',
336 'whitespace/line_length',
337 'whitespace/newline',
338 'whitespace/operators',
339 'whitespace/parens',
340 'whitespace/semicolon',
341 'whitespace/tab',
342 'whitespace/todo',
345 # These error categories are no longer enforced by cpplint, but for backwards-
346 # compatibility they may still appear in NOLINT comments.
347 _LEGACY_ERROR_CATEGORIES = [
348 'readability/streams',
349 'readability/function',
352 # The default state of the category filter. This is overridden by the --filter=
353 # flag. By default all errors are on, so only add here categories that should be
354 # off by default (i.e., categories that must be enabled by the --filter= flags).
355 # All entries here should start with a '-' or '+', as in the --filter= flag.
356 _DEFAULT_FILTERS = ['-build/include_alpha']
358 # The default list of categories suppressed for C (not C++) files.
359 _DEFAULT_C_SUPPRESSED_CATEGORIES = [
360 'readability/casting',
363 # The default list of categories suppressed for Linux Kernel files.
364 _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
365 'whitespace/tab',
368 # We used to check for high-bit characters, but after much discussion we
369 # decided those were OK, as long as they were in UTF-8 and didn't represent
370 # hard-coded international strings, which belong in a separate i18n file.
372 # C++ headers
373 _CPP_HEADERS = frozenset([
374 # Legacy
375 'algobase.h',
376 'algo.h',
377 'alloc.h',
378 'builtinbuf.h',
379 'bvector.h',
380 'complex.h',
381 'defalloc.h',
382 'deque.h',
383 'editbuf.h',
384 'fstream.h',
385 'function.h',
386 'hash_map',
387 'hash_map.h',
388 'hash_set',
389 'hash_set.h',
390 'hashtable.h',
391 'heap.h',
392 'indstream.h',
393 'iomanip.h',
394 'iostream.h',
395 'istream.h',
396 'iterator.h',
397 'list.h',
398 'map.h',
399 'multimap.h',
400 'multiset.h',
401 'ostream.h',
402 'pair.h',
403 'parsestream.h',
404 'pfstream.h',
405 'procbuf.h',
406 'pthread_alloc',
407 'pthread_alloc.h',
408 'rope',
409 'rope.h',
410 'ropeimpl.h',
411 'set.h',
412 'slist',
413 'slist.h',
414 'stack.h',
415 'stdiostream.h',
416 'stl_alloc.h',
417 'stl_relops.h',
418 'streambuf.h',
419 'stream.h',
420 'strfile.h',
421 'strstream.h',
422 'tempbuf.h',
423 'tree.h',
424 'type_traits.h',
425 'vector.h',
426 # 17.6.1.2 C++ library headers
427 'algorithm',
428 'array',
429 'atomic',
430 'bitset',
431 'chrono',
432 'codecvt',
433 'complex',
434 'condition_variable',
435 'deque',
436 'exception',
437 'forward_list',
438 'fstream',
439 'functional',
440 'future',
441 'initializer_list',
442 'iomanip',
443 'ios',
444 'iosfwd',
445 'iostream',
446 'istream',
447 'iterator',
448 'limits',
449 'list',
450 'locale',
451 'map',
452 'memory',
453 'mutex',
454 'new',
455 'numeric',
456 'ostream',
457 'queue',
458 'random',
459 'ratio',
460 'regex',
461 'scoped_allocator',
462 'set',
463 'sstream',
464 'stack',
465 'stdexcept',
466 'streambuf',
467 'string',
468 'strstream',
469 'system_error',
470 'thread',
471 'tuple',
472 'typeindex',
473 'typeinfo',
474 'type_traits',
475 'unordered_map',
476 'unordered_set',
477 'utility',
478 'valarray',
479 'vector',
480 # 17.6.1.2 C++ headers for C library facilities
481 'cassert',
482 'ccomplex',
483 'cctype',
484 'cerrno',
485 'cfenv',
486 'cfloat',
487 'cinttypes',
488 'ciso646',
489 'climits',
490 'clocale',
491 'cmath',
492 'csetjmp',
493 'csignal',
494 'cstdalign',
495 'cstdarg',
496 'cstdbool',
497 'cstddef',
498 'cstdint',
499 'cstdio',
500 'cstdlib',
501 'cstring',
502 'ctgmath',
503 'ctime',
504 'cuchar',
505 'cwchar',
506 'cwctype',
509 # Type names
510 _TYPES = re.compile(
511 r'^(?:'
512 # [dcl.type.simple]
513 r'(char(16_t|32_t)?)|wchar_t|'
514 r'bool|short|int|long|signed|unsigned|float|double|'
515 # [support.types]
516 r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
517 # [cstdint.syn]
518 r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
519 r'(u?int(max|ptr)_t)|'
520 r')$')
523 # These headers are excluded from [build/include] and [build/include_order]
524 # checks:
525 # - Anything not following google file name conventions (containing an
526 # uppercase character, such as Python.h or nsStringAPI.h, for example).
527 # - Lua headers.
528 _THIRD_PARTY_HEADERS_PATTERN = re.compile(
529 r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
531 # Pattern for matching FileInfo.BaseName() against test file name
532 _test_suffixes = ['_test', '_regtest', '_unittest']
533 _TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
535 # Pattern that matches only complete whitespace, possibly across multiple lines.
536 _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
538 # Assertion macros. These are defined in base/logging.h and
539 # testing/base/public/gunit.h.
540 _CHECK_MACROS = [
541 'DCHECK', 'CHECK',
542 'EXPECT_TRUE', 'ASSERT_TRUE',
543 'EXPECT_FALSE', 'ASSERT_FALSE',
546 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
547 _CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
549 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
550 ('>=', 'GE'), ('>', 'GT'),
551 ('<=', 'LE'), ('<', 'LT')]:
552 _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
553 _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
554 _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
555 _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
557 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
558 ('>=', 'LT'), ('>', 'LE'),
559 ('<=', 'GT'), ('<', 'GE')]:
560 _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
561 _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
563 # Alternative tokens and their replacements. For full list, see section 2.5
564 # Alternative tokens [lex.digraph] in the C++ standard.
566 # Digraphs (such as '%:') are not included here since it's a mess to
567 # match those on a word boundary.
568 _ALT_TOKEN_REPLACEMENT = {
569 'and': '&&',
570 'bitor': '|',
571 'or': '||',
572 'xor': '^',
573 'compl': '~',
574 'bitand': '&',
575 'and_eq': '&=',
576 'or_eq': '|=',
577 'xor_eq': '^=',
578 'not': '!',
579 'not_eq': '!='
582 # Compile regular expression that matches all the above keywords. The "[ =()]"
583 # bit is meant to avoid matching these keywords outside of boolean expressions.
585 # False positives include C-style multi-line comments and multi-line strings
586 # but those have always been troublesome for cpplint.
587 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
588 r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
591 # These constants define types of headers for use with
592 # _IncludeState.CheckNextIncludeOrder().
593 _C_SYS_HEADER = 1
594 _CPP_SYS_HEADER = 2
595 _LIKELY_MY_HEADER = 3
596 _POSSIBLE_MY_HEADER = 4
597 _OTHER_HEADER = 5
599 # These constants define the current inline assembly state
600 _NO_ASM = 0 # Outside of inline assembly block
601 _INSIDE_ASM = 1 # Inside inline assembly block
602 _END_ASM = 2 # Last line of inline assembly block
603 _BLOCK_ASM = 3 # The whole block is an inline assembly block
605 # Match start of assembly blocks
606 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
607 r'(?:\s+(volatile|__volatile__))?'
608 r'\s*[{(]')
610 # Match strings that indicate we're working on a C (not C++) file.
611 _SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
612 r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
614 # Match string that indicates we're working on a Linux Kernel file.
615 _SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
617 _regexp_compile_cache = {}
619 # {str, set(int)}: a map from error categories to sets of linenumbers
620 # on which those errors are expected and should be suppressed.
621 _error_suppressions = {}
623 # The root directory used for deriving header guard CPP variable.
624 # This is set by --root flag.
625 _root = None
627 # The top level repository directory. If set, _root is calculated relative to
628 # this directory instead of the directory containing version control artifacts.
629 # This is set by the --repository flag.
630 _repository = None
632 # Files to exclude from linting. This is set by the --exclude flag.
633 _excludes = None
635 # Whether to supress PrintInfo messages
636 _quiet = False
638 # The allowed line length of files.
639 # This is set by --linelength flag.
640 _line_length = 80
642 try:
643 xrange(1, 0)
644 except NameError:
645 # -- pylint: disable=redefined-builtin
646 xrange = range
648 try:
649 unicode
650 except NameError:
651 # -- pylint: disable=redefined-builtin
652 basestring = unicode = str
654 try:
655 long(2)
656 except NameError:
657 # -- pylint: disable=redefined-builtin
658 long = int
660 if sys.version_info < (3,):
661 # -- pylint: disable=no-member
662 # BINARY_TYPE = str
663 itervalues = dict.itervalues
664 iteritems = dict.iteritems
665 else:
666 # BINARY_TYPE = bytes
667 itervalues = dict.values
668 iteritems = dict.items
670 def unicode_escape_decode(x):
671 if sys.version_info < (3,):
672 return codecs.unicode_escape_decode(x)[0]
673 else:
674 return x
676 # {str, bool}: a map from error categories to booleans which indicate if the
677 # category should be suppressed for every line.
678 _global_error_suppressions = {}
683 def ParseNolintSuppressions(filename, raw_line, linenum, error):
684 """Updates the global list of line error-suppressions.
686 Parses any NOLINT comments on the current line, updating the global
687 error_suppressions store. Reports an error if the NOLINT comment
688 was malformed.
690 Args:
691 filename: str, the name of the input file.
692 raw_line: str, the line of input text, with comments.
693 linenum: int, the number of the current line.
694 error: function, an error handler.
696 matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
697 if matched:
698 if matched.group(1):
699 suppressed_line = linenum + 1
700 else:
701 suppressed_line = linenum
702 category = matched.group(2)
703 if category in (None, '(*)'): # => "suppress all"
704 _error_suppressions.setdefault(None, set()).add(suppressed_line)
705 else:
706 if category.startswith('(') and category.endswith(')'):
707 category = category[1:-1]
708 if category in _ERROR_CATEGORIES:
709 _error_suppressions.setdefault(category, set()).add(suppressed_line)
710 elif category not in _LEGACY_ERROR_CATEGORIES:
711 error(filename, linenum, 'readability/nolint', 5,
712 'Unknown NOLINT error category: %s' % category)
715 def ProcessGlobalSuppresions(lines):
716 """Updates the list of global error suppressions.
718 Parses any lint directives in the file that have global effect.
720 Args:
721 lines: An array of strings, each representing a line of the file, with the
722 last element being empty if the file is terminated with a newline.
724 for line in lines:
725 if _SEARCH_C_FILE.search(line):
726 for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
727 _global_error_suppressions[category] = True
728 if _SEARCH_KERNEL_FILE.search(line):
729 for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
730 _global_error_suppressions[category] = True
733 def ResetNolintSuppressions():
734 """Resets the set of NOLINT suppressions to empty."""
735 _error_suppressions.clear()
736 _global_error_suppressions.clear()
739 def IsErrorSuppressedByNolint(category, linenum):
740 """Returns true if the specified error category is suppressed on this line.
742 Consults the global error_suppressions map populated by
743 ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
745 Args:
746 category: str, the category of the error.
747 linenum: int, the current line number.
748 Returns:
749 bool, True iff the error should be suppressed due to a NOLINT comment or
750 global suppression.
752 return (_global_error_suppressions.get(category, False) or
753 linenum in _error_suppressions.get(category, set()) or
754 linenum in _error_suppressions.get(None, set()))
757 def Match(pattern, s):
758 """Matches the string with the pattern, caching the compiled regexp."""
759 # The regexp compilation caching is inlined in both Match and Search for
760 # performance reasons; factoring it out into a separate function turns out
761 # to be noticeably expensive.
762 if pattern not in _regexp_compile_cache:
763 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
764 return _regexp_compile_cache[pattern].match(s)
767 def ReplaceAll(pattern, rep, s):
768 """Replaces instances of pattern in a string with a replacement.
770 The compiled regex is kept in a cache shared by Match and Search.
772 Args:
773 pattern: regex pattern
774 rep: replacement text
775 s: search string
777 Returns:
778 string with replacements made (or original string if no replacements)
780 if pattern not in _regexp_compile_cache:
781 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
782 return _regexp_compile_cache[pattern].sub(rep, s)
785 def Search(pattern, s):
786 """Searches the string for the pattern, caching the compiled regexp."""
787 if pattern not in _regexp_compile_cache:
788 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
789 return _regexp_compile_cache[pattern].search(s)
792 def _IsSourceExtension(s):
793 """File extension (excluding dot) matches a source file extension."""
794 return s in GetNonHeaderExtensions()
797 class _IncludeState(object):
798 """Tracks line numbers for includes, and the order in which includes appear.
800 include_list contains list of lists of (header, line number) pairs.
801 It's a lists of lists rather than just one flat list to make it
802 easier to update across preprocessor boundaries.
804 Call CheckNextIncludeOrder() once for each header in the file, passing
805 in the type constants defined above. Calls in an illegal order will
806 raise an _IncludeError with an appropriate error message.
809 # self._section will move monotonically through this set. If it ever
810 # needs to move backwards, CheckNextIncludeOrder will raise an error.
811 _INITIAL_SECTION = 0
812 _MY_H_SECTION = 1
813 _C_SECTION = 2
814 _CPP_SECTION = 3
815 _OTHER_H_SECTION = 4
817 _TYPE_NAMES = {
818 _C_SYS_HEADER: 'C system header',
819 _CPP_SYS_HEADER: 'C++ system header',
820 _LIKELY_MY_HEADER: 'header this file implements',
821 _POSSIBLE_MY_HEADER: 'header this file may implement',
822 _OTHER_HEADER: 'other header',
824 _SECTION_NAMES = {
825 _INITIAL_SECTION: "... nothing. (This can't be an error.)",
826 _MY_H_SECTION: 'a header this file implements',
827 _C_SECTION: 'C system header',
828 _CPP_SECTION: 'C++ system header',
829 _OTHER_H_SECTION: 'other header',
832 def __init__(self):
833 self.include_list = [[]]
834 self._section = None
835 self._last_header = None
836 self.ResetSection('')
838 def FindHeader(self, header):
839 """Check if a header has already been included.
841 Args:
842 header: header to check.
843 Returns:
844 Line number of previous occurrence, or -1 if the header has not
845 been seen before.
847 for section_list in self.include_list:
848 for f in section_list:
849 if f[0] == header:
850 return f[1]
851 return -1
853 def ResetSection(self, directive):
854 """Reset section checking for preprocessor directive.
856 Args:
857 directive: preprocessor directive (e.g. "if", "else").
859 # The name of the current section.
860 self._section = self._INITIAL_SECTION
861 # The path of last found header.
862 self._last_header = ''
864 # Update list of includes. Note that we never pop from the
865 # include list.
866 if directive in ('if', 'ifdef', 'ifndef'):
867 self.include_list.append([])
868 elif directive in ('else', 'elif'):
869 self.include_list[-1] = []
871 def SetLastHeader(self, header_path):
872 self._last_header = header_path
874 def CanonicalizeAlphabeticalOrder(self, header_path):
875 """Returns a path canonicalized for alphabetical comparison.
877 - replaces "-" with "_" so they both cmp the same.
878 - removes '-inl' since we don't require them to be after the main header.
879 - lowercase everything, just in case.
881 Args:
882 header_path: Path to be canonicalized.
884 Returns:
885 Canonicalized path.
887 return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
889 def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
890 """Check if a header is in alphabetical order with the previous header.
892 Args:
893 clean_lines: A CleansedLines instance containing the file.
894 linenum: The number of the line to check.
895 header_path: Canonicalized header to be checked.
897 Returns:
898 Returns true if the header is in alphabetical order.
900 # If previous section is different from current section, _last_header will
901 # be reset to empty string, so it's always less than current header.
903 # If previous line was a blank line, assume that the headers are
904 # intentionally sorted the way they are.
905 if (self._last_header > header_path and
906 Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
907 return False
908 return True
910 def CheckNextIncludeOrder(self, header_type):
911 """Returns a non-empty error message if the next header is out of order.
913 This function also updates the internal state to be ready to check
914 the next include.
916 Args:
917 header_type: One of the _XXX_HEADER constants defined above.
919 Returns:
920 The empty string if the header is in the right order, or an
921 error message describing what's wrong.
924 error_message = ('Found %s after %s' %
925 (self._TYPE_NAMES[header_type],
926 self._SECTION_NAMES[self._section]))
928 last_section = self._section
930 if header_type == _C_SYS_HEADER:
931 if self._section <= self._C_SECTION:
932 self._section = self._C_SECTION
933 else:
934 self._last_header = ''
935 return error_message
936 elif header_type == _CPP_SYS_HEADER:
937 if self._section <= self._CPP_SECTION:
938 self._section = self._CPP_SECTION
939 else:
940 self._last_header = ''
941 return error_message
942 elif header_type == _LIKELY_MY_HEADER:
943 if self._section <= self._MY_H_SECTION:
944 self._section = self._MY_H_SECTION
945 else:
946 self._section = self._OTHER_H_SECTION
947 elif header_type == _POSSIBLE_MY_HEADER:
948 if self._section <= self._MY_H_SECTION:
949 self._section = self._MY_H_SECTION
950 else:
951 # This will always be the fallback because we're not sure
952 # enough that the header is associated with this file.
953 self._section = self._OTHER_H_SECTION
954 else:
955 assert header_type == _OTHER_HEADER
956 self._section = self._OTHER_H_SECTION
958 if last_section != self._section:
959 self._last_header = ''
961 return ''
964 class _CppLintState(object):
965 """Maintains module-wide state.."""
967 def __init__(self):
968 self.verbose_level = 1 # global setting.
969 self.error_count = 0 # global count of reported errors
970 # filters to apply when emitting error messages
971 self.filters = _DEFAULT_FILTERS[:]
972 # backup of filter list. Used to restore the state after each file.
973 self._filters_backup = self.filters[:]
974 self.counting = 'total' # In what way are we counting errors?
975 self.errors_by_category = {} # string to int dict storing error counts
977 # output format:
978 # "emacs" - format that emacs can parse (default)
979 # "eclipse" - format that eclipse can parse
980 # "vs7" - format that Microsoft Visual Studio 7 can parse
981 # "junit" - format that Jenkins, Bamboo, etc can parse
982 self.output_format = 'emacs'
984 # For JUnit output, save errors and failures until the end so that they
985 # can be written into the XML
986 self._junit_errors = []
987 self._junit_failures = []
989 def SetOutputFormat(self, output_format):
990 """Sets the output format for errors."""
991 self.output_format = output_format
993 def SetVerboseLevel(self, level):
994 """Sets the module's verbosity, and returns the previous setting."""
995 last_verbose_level = self.verbose_level
996 self.verbose_level = level
997 return last_verbose_level
999 def SetCountingStyle(self, counting_style):
1000 """Sets the module's counting options."""
1001 self.counting = counting_style
1003 def SetFilters(self, filters):
1004 """Sets the error-message filters.
1006 These filters are applied when deciding whether to emit a given
1007 error message.
1009 Args:
1010 filters: A string of comma-separated filters (eg "+whitespace/indent").
1011 Each filter should start with + or -; else we die.
1013 Raises:
1014 ValueError: The comma-separated filters did not all start with '+' or '-'.
1015 E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
1017 # Default filters always have less priority than the flag ones.
1018 self.filters = _DEFAULT_FILTERS[:]
1019 self.AddFilters(filters)
1021 def AddFilters(self, filters):
1022 """ Adds more filters to the existing list of error-message filters. """
1023 for filt in filters.split(','):
1024 clean_filt = filt.strip()
1025 if clean_filt:
1026 self.filters.append(clean_filt)
1027 for filt in self.filters:
1028 if not (filt.startswith('+') or filt.startswith('-')):
1029 raise ValueError('Every filter in --filters must start with + or -'
1030 ' (%s does not)' % filt)
1032 def BackupFilters(self):
1033 """ Saves the current filter list to backup storage."""
1034 self._filters_backup = self.filters[:]
1036 def RestoreFilters(self):
1037 """ Restores filters previously backed up."""
1038 self.filters = self._filters_backup[:]
1040 def ResetErrorCounts(self):
1041 """Sets the module's error statistic back to zero."""
1042 self.error_count = 0
1043 self.errors_by_category = {}
1045 def IncrementErrorCount(self, category):
1046 """Bumps the module's error statistic."""
1047 self.error_count += 1
1048 if self.counting in ('toplevel', 'detailed'):
1049 if self.counting != 'detailed':
1050 category = category.split('/')[0]
1051 if category not in self.errors_by_category:
1052 self.errors_by_category[category] = 0
1053 self.errors_by_category[category] += 1
1055 def PrintErrorCounts(self):
1056 """Print a summary of errors by category, and the total."""
1057 for category, count in sorted(iteritems(self.errors_by_category)):
1058 self.PrintInfo('Category \'%s\' errors found: %d\n' %
1059 (category, count))
1060 if self.error_count > 0:
1061 self.PrintInfo('Total errors found: %d\n' % self.error_count)
1063 def PrintInfo(self, message):
1064 if not _quiet and self.output_format != 'junit':
1065 sys.stderr.write(message)
1067 def PrintError(self, message):
1068 if self.output_format == 'junit':
1069 self._junit_errors.append(message)
1070 else:
1071 sys.stderr.write(message)
1073 def AddJUnitFailure(self, filename, linenum, message, category, confidence):
1074 self._junit_failures.append((filename, linenum, message, category,
1075 confidence))
1077 def FormatJUnitXML(self):
1078 num_errors = len(self._junit_errors)
1079 num_failures = len(self._junit_failures)
1081 testsuite = xml.etree.ElementTree.Element('testsuite')
1082 testsuite.attrib['name'] = 'cpplint'
1083 testsuite.attrib['errors'] = str(num_errors)
1084 testsuite.attrib['failures'] = str(num_failures)
1086 if num_errors == 0 and num_failures == 0:
1087 testsuite.attrib['tests'] = str(1)
1088 xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
1090 else:
1091 testsuite.attrib['tests'] = str(num_errors + num_failures)
1092 if num_errors > 0:
1093 testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1094 testcase.attrib['name'] = 'errors'
1095 error = xml.etree.ElementTree.SubElement(testcase, 'error')
1096 error.text = '\n'.join(self._junit_errors)
1097 if num_failures > 0:
1098 # Group failures by file
1099 failed_file_order = []
1100 failures_by_file = {}
1101 for failure in self._junit_failures:
1102 failed_file = failure[0]
1103 if failed_file not in failed_file_order:
1104 failed_file_order.append(failed_file)
1105 failures_by_file[failed_file] = []
1106 failures_by_file[failed_file].append(failure)
1107 # Create a testcase for each file
1108 for failed_file in failed_file_order:
1109 failures = failures_by_file[failed_file]
1110 testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1111 testcase.attrib['name'] = failed_file
1112 failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
1113 template = '{0}: {1} [{2}] [{3}]'
1114 texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
1115 failure.text = '\n'.join(texts)
1117 xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
1118 return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
1121 _cpplint_state = _CppLintState()
1124 def _OutputFormat():
1125 """Gets the module's output format."""
1126 return _cpplint_state.output_format
1129 def _SetOutputFormat(output_format):
1130 """Sets the module's output format."""
1131 _cpplint_state.SetOutputFormat(output_format)
1134 def _VerboseLevel():
1135 """Returns the module's verbosity setting."""
1136 return _cpplint_state.verbose_level
1139 def _SetVerboseLevel(level):
1140 """Sets the module's verbosity, and returns the previous setting."""
1141 return _cpplint_state.SetVerboseLevel(level)
1144 def _SetCountingStyle(level):
1145 """Sets the module's counting options."""
1146 _cpplint_state.SetCountingStyle(level)
1149 def _Filters():
1150 """Returns the module's list of output filters, as a list."""
1151 return _cpplint_state.filters
1154 def _SetFilters(filters):
1155 """Sets the module's error-message filters.
1157 These filters are applied when deciding whether to emit a given
1158 error message.
1160 Args:
1161 filters: A string of comma-separated filters (eg "whitespace/indent").
1162 Each filter should start with + or -; else we die.
1164 _cpplint_state.SetFilters(filters)
1166 def _AddFilters(filters):
1167 """Adds more filter overrides.
1169 Unlike _SetFilters, this function does not reset the current list of filters
1170 available.
1172 Args:
1173 filters: A string of comma-separated filters (eg "whitespace/indent").
1174 Each filter should start with + or -; else we die.
1176 _cpplint_state.AddFilters(filters)
1178 def _BackupFilters():
1179 """ Saves the current filter list to backup storage."""
1180 _cpplint_state.BackupFilters()
1182 def _RestoreFilters():
1183 """ Restores filters previously backed up."""
1184 _cpplint_state.RestoreFilters()
1186 class _FunctionState(object):
1187 """Tracks current function name and the number of lines in its body."""
1189 _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
1190 _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
1192 def __init__(self):
1193 self.in_a_function = False
1194 self.lines_in_function = 0
1195 self.current_function = ''
1197 def Begin(self, function_name):
1198 """Start analyzing function body.
1200 Args:
1201 function_name: The name of the function being tracked.
1203 self.in_a_function = True
1204 self.lines_in_function = 0
1205 self.current_function = function_name
1207 def Count(self):
1208 """Count line in current function body."""
1209 if self.in_a_function:
1210 self.lines_in_function += 1
1212 def Check(self, error, filename, linenum):
1213 """Report if too many lines in function body.
1215 Args:
1216 error: The function to call with any errors found.
1217 filename: The name of the current file.
1218 linenum: The number of the line to check.
1220 if not self.in_a_function:
1221 return
1223 if Match(r'T(EST|est)', self.current_function):
1224 base_trigger = self._TEST_TRIGGER
1225 else:
1226 base_trigger = self._NORMAL_TRIGGER
1227 trigger = base_trigger * 2**_VerboseLevel()
1229 if self.lines_in_function > trigger:
1230 error_level = int(math.log(self.lines_in_function / base_trigger, 2))
1231 # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
1232 if error_level > 5:
1233 error_level = 5
1234 error(filename, linenum, 'readability/fn_size', error_level,
1235 'Small and focused functions are preferred:'
1236 ' %s has %d non-comment lines'
1237 ' (error triggered by exceeding %d lines).' % (
1238 self.current_function, self.lines_in_function, trigger))
1240 def End(self):
1241 """Stop analyzing function body."""
1242 self.in_a_function = False
1245 class _IncludeError(Exception):
1246 """Indicates a problem with the include order in a file."""
1247 pass
1250 class FileInfo(object):
1251 """Provides utility functions for filenames.
1253 FileInfo provides easy access to the components of a file's path
1254 relative to the project root.
1257 def __init__(self, filename):
1258 self._filename = filename
1260 def FullName(self):
1261 """Make Windows paths like Unix."""
1262 return os.path.abspath(self._filename).replace('\\', '/')
1264 def RepositoryName(self):
1265 r"""FullName after removing the local path to the repository.
1267 If we have a real absolute path name here we can try to do something smart:
1268 detecting the root of the checkout and truncating /path/to/checkout from
1269 the name so that we get header guards that don't include things like
1270 "C:\Documents and Settings\..." or "/home/username/..." in them and thus
1271 people on different computers who have checked the source out to different
1272 locations won't see bogus errors.
1274 fullname = self.FullName()
1276 if os.path.exists(fullname):
1277 project_dir = os.path.dirname(fullname)
1279 # If the user specified a repository path, it exists, and the file is
1280 # contained in it, use the specified repository path
1281 if _repository:
1282 repo = FileInfo(_repository).FullName()
1283 root_dir = project_dir
1284 while os.path.exists(root_dir):
1285 # allow case insensitive compare on Windows
1286 if os.path.normcase(root_dir) == os.path.normcase(repo):
1287 return os.path.relpath(fullname, root_dir).replace('\\', '/')
1288 one_up_dir = os.path.dirname(root_dir)
1289 if one_up_dir == root_dir:
1290 break
1291 root_dir = one_up_dir
1293 if os.path.exists(os.path.join(project_dir, ".svn")):
1294 # If there's a .svn file in the current directory, we recursively look
1295 # up the directory tree for the top of the SVN checkout
1296 root_dir = project_dir
1297 one_up_dir = os.path.dirname(root_dir)
1298 while os.path.exists(os.path.join(one_up_dir, ".svn")):
1299 root_dir = os.path.dirname(root_dir)
1300 one_up_dir = os.path.dirname(one_up_dir)
1302 prefix = os.path.commonprefix([root_dir, project_dir])
1303 return fullname[len(prefix) + 1:]
1305 # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
1306 # searching up from the current path.
1307 root_dir = current_dir = os.path.dirname(fullname)
1308 while current_dir != os.path.dirname(current_dir):
1309 if (os.path.exists(os.path.join(current_dir, ".git")) or
1310 os.path.exists(os.path.join(current_dir, ".hg")) or
1311 os.path.exists(os.path.join(current_dir, ".svn"))):
1312 root_dir = current_dir
1313 current_dir = os.path.dirname(current_dir)
1315 if (os.path.exists(os.path.join(root_dir, ".git")) or
1316 os.path.exists(os.path.join(root_dir, ".hg")) or
1317 os.path.exists(os.path.join(root_dir, ".svn"))):
1318 prefix = os.path.commonprefix([root_dir, project_dir])
1319 return fullname[len(prefix) + 1:]
1321 # Don't know what to do; header guard warnings may be wrong...
1322 return fullname
1324 def Split(self):
1325 """Splits the file into the directory, basename, and extension.
1327 For 'chrome/browser/browser.cc', Split() would
1328 return ('chrome/browser', 'browser', '.cc')
1330 Returns:
1331 A tuple of (directory, basename, extension).
1334 googlename = self.RepositoryName()
1335 project, rest = os.path.split(googlename)
1336 return (project,) + os.path.splitext(rest)
1338 def BaseName(self):
1339 """File base name - text after the final slash, before the final period."""
1340 return self.Split()[1]
1342 def Extension(self):
1343 """File extension - text following the final period, includes that period."""
1344 return self.Split()[2]
1346 def NoExtension(self):
1347 """File has no source file extension."""
1348 return '/'.join(self.Split()[0:2])
1350 def IsSource(self):
1351 """File has a source file extension."""
1352 return _IsSourceExtension(self.Extension()[1:])
1355 def _ShouldPrintError(category, confidence, linenum):
1356 """If confidence >= verbose, category passes filter and is not suppressed."""
1358 # There are three ways we might decide not to print an error message:
1359 # a "NOLINT(category)" comment appears in the source,
1360 # the verbosity level isn't high enough, or the filters filter it out.
1361 if IsErrorSuppressedByNolint(category, linenum):
1362 return False
1364 if confidence < _cpplint_state.verbose_level:
1365 return False
1367 is_filtered = False
1368 for one_filter in _Filters():
1369 if one_filter.startswith('-'):
1370 if category.startswith(one_filter[1:]):
1371 is_filtered = True
1372 elif one_filter.startswith('+'):
1373 if category.startswith(one_filter[1:]):
1374 is_filtered = False
1375 else:
1376 assert False # should have been checked for in SetFilter.
1377 if is_filtered:
1378 return False
1380 return True
1383 def Error(filename, linenum, category, confidence, message):
1384 """Logs the fact we've found a lint error.
1386 We log where the error was found, and also our confidence in the error,
1387 that is, how certain we are this is a legitimate style regression, and
1388 not a misidentification or a use that's sometimes justified.
1390 False positives can be suppressed by the use of
1391 "cpplint(category)" comments on the offending line. These are
1392 parsed into _error_suppressions.
1394 Args:
1395 filename: The name of the file containing the error.
1396 linenum: The number of the line containing the error.
1397 category: A string used to describe the "category" this bug
1398 falls under: "whitespace", say, or "runtime". Categories
1399 may have a hierarchy separated by slashes: "whitespace/indent".
1400 confidence: A number from 1-5 representing a confidence score for
1401 the error, with 5 meaning that we are certain of the problem,
1402 and 1 meaning that it could be a legitimate construct.
1403 message: The error message.
1405 if _ShouldPrintError(category, confidence, linenum):
1406 _cpplint_state.IncrementErrorCount(category)
1407 if _cpplint_state.output_format == 'vs7':
1408 _cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % (
1409 filename, linenum, message, category, confidence))
1410 elif _cpplint_state.output_format == 'eclipse':
1411 sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
1412 filename, linenum, message, category, confidence))
1413 elif _cpplint_state.output_format == 'junit':
1414 _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
1415 confidence)
1416 else:
1417 final_message = '%s:%s: %s [%s] [%d]\n' % (
1418 filename, linenum, message, category, confidence)
1419 sys.stderr.write(final_message)
1421 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1422 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1423 r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1424 # Match a single C style comment on the same line.
1425 _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
1426 # Matches multi-line C style comments.
1427 # This RE is a little bit more complicated than one might expect, because we
1428 # have to take care of space removals tools so we can handle comments inside
1429 # statements better.
1430 # The current rule is: We only clear spaces from both sides when we're at the
1431 # end of the line. Otherwise, we try to remove spaces from the right side,
1432 # if this doesn't work we try on left side but only if there's a non-character
1433 # on the right.
1434 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1435 r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
1436 _RE_PATTERN_C_COMMENTS + r'\s+|' +
1437 r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
1438 _RE_PATTERN_C_COMMENTS + r')')
1441 def IsCppString(line):
1442 """Does line terminate so, that the next symbol is in string constant.
1444 This function does not consider single-line nor multi-line comments.
1446 Args:
1447 line: is a partial line of code starting from the 0..n.
1449 Returns:
1450 True, if next character appended to 'line' is inside a
1451 string constant.
1454 line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
1455 return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1458 def CleanseRawStrings(raw_lines):
1459 """Removes C++11 raw strings from lines.
1461 Before:
1462 static const char kData[] = R"(
1463 multi-line string
1466 After:
1467 static const char kData[] = ""
1468 (replaced by blank line)
1471 Args:
1472 raw_lines: list of raw lines.
1474 Returns:
1475 list of lines with C++11 raw strings replaced by empty strings.
1478 delimiter = None
1479 lines_without_raw_strings = []
1480 for line in raw_lines:
1481 if delimiter:
1482 # Inside a raw string, look for the end
1483 end = line.find(delimiter)
1484 if end >= 0:
1485 # Found the end of the string, match leading space for this
1486 # line and resume copying the original lines, and also insert
1487 # a "" on the last line.
1488 leading_space = Match(r'^(\s*)\S', line)
1489 line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1490 delimiter = None
1491 else:
1492 # Haven't found the end yet, append a blank line.
1493 line = '""'
1495 # Look for beginning of a raw string, and replace them with
1496 # empty strings. This is done in a loop to handle multiple raw
1497 # strings on the same line.
1498 while delimiter is None:
1499 # Look for beginning of a raw string.
1500 # See 2.14.15 [lex.string] for syntax.
1502 # Once we have matched a raw string, we check the prefix of the
1503 # line to make sure that the line is not part of a single line
1504 # comment. It's done this way because we remove raw strings
1505 # before removing comments as opposed to removing comments
1506 # before removing raw strings. This is because there are some
1507 # cpplint checks that requires the comments to be preserved, but
1508 # we don't want to check comments that are inside raw strings.
1509 matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1510 if (matched and
1511 not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
1512 matched.group(1))):
1513 delimiter = ')' + matched.group(2) + '"'
1515 end = matched.group(3).find(delimiter)
1516 if end >= 0:
1517 # Raw string ended on same line
1518 line = (matched.group(1) + '""' +
1519 matched.group(3)[end + len(delimiter):])
1520 delimiter = None
1521 else:
1522 # Start of a multi-line raw string
1523 line = matched.group(1) + '""'
1524 else:
1525 break
1527 lines_without_raw_strings.append(line)
1529 # TODO(unknown): if delimiter is not None here, we might want to
1530 # emit a warning for unterminated string.
1531 return lines_without_raw_strings
1534 def FindNextMultiLineCommentStart(lines, lineix):
1535 """Find the beginning marker for a multiline comment."""
1536 while lineix < len(lines):
1537 if lines[lineix].strip().startswith('/*'):
1538 # Only return this marker if the comment goes beyond this line
1539 if lines[lineix].strip().find('*/', 2) < 0:
1540 return lineix
1541 lineix += 1
1542 return len(lines)
1545 def FindNextMultiLineCommentEnd(lines, lineix):
1546 """We are inside a comment, find the end marker."""
1547 while lineix < len(lines):
1548 if lines[lineix].strip().endswith('*/'):
1549 return lineix
1550 lineix += 1
1551 return len(lines)
1554 def RemoveMultiLineCommentsFromRange(lines, begin, end):
1555 """Clears a range of lines for multi-line comments."""
1556 # Having // dummy comments makes the lines non-empty, so we will not get
1557 # unnecessary blank line warnings later in the code.
1558 for i in range(begin, end):
1559 lines[i] = '/**/'
1562 def RemoveMultiLineComments(filename, lines, error):
1563 """Removes multiline (c-style) comments from lines."""
1564 lineix = 0
1565 while lineix < len(lines):
1566 lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1567 if lineix_begin >= len(lines):
1568 return
1569 lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1570 if lineix_end >= len(lines):
1571 error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1572 'Could not find end of multi-line comment')
1573 return
1574 RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1575 lineix = lineix_end + 1
1578 def CleanseComments(line):
1579 """Removes //-comments and single-line C-style /* */ comments.
1581 Args:
1582 line: A line of C++ source.
1584 Returns:
1585 The line with single-line comments removed.
1587 commentpos = line.find('//')
1588 if commentpos != -1 and not IsCppString(line[:commentpos]):
1589 line = line[:commentpos].rstrip()
1590 # get rid of /* ... */
1591 return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1594 class CleansedLines(object):
1595 """Holds 4 copies of all lines with different preprocessing applied to them.
1597 1) elided member contains lines without strings and comments.
1598 2) lines member contains lines without comments.
1599 3) raw_lines member contains all the lines without processing.
1600 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
1601 strings removed.
1602 All these members are of <type 'list'>, and of the same length.
1605 def __init__(self, lines):
1606 self.elided = []
1607 self.lines = []
1608 self.raw_lines = lines
1609 self.num_lines = len(lines)
1610 self.lines_without_raw_strings = CleanseRawStrings(lines)
1611 for linenum in range(len(self.lines_without_raw_strings)):
1612 self.lines.append(CleanseComments(
1613 self.lines_without_raw_strings[linenum]))
1614 elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1615 self.elided.append(CleanseComments(elided))
1617 def NumLines(self):
1618 """Returns the number of lines represented."""
1619 return self.num_lines
1621 @staticmethod
1622 def _CollapseStrings(elided):
1623 """Collapses strings and chars on a line to simple "" or '' blocks.
1625 We nix strings first so we're not fooled by text like '"http://"'
1627 Args:
1628 elided: The line being processed.
1630 Returns:
1631 The line with collapsed strings.
1633 if _RE_PATTERN_INCLUDE.match(elided):
1634 return elided
1636 # Remove escaped characters first to make quote/single quote collapsing
1637 # basic. Things that look like escaped characters shouldn't occur
1638 # outside of strings and chars.
1639 elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1641 # Replace quoted strings and digit separators. Both single quotes
1642 # and double quotes are processed in the same loop, otherwise
1643 # nested quotes wouldn't work.
1644 collapsed = ''
1645 while True:
1646 # Find the first quote character
1647 match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
1648 if not match:
1649 collapsed += elided
1650 break
1651 head, quote, tail = match.groups()
1653 if quote == '"':
1654 # Collapse double quoted strings
1655 second_quote = tail.find('"')
1656 if second_quote >= 0:
1657 collapsed += head + '""'
1658 elided = tail[second_quote + 1:]
1659 else:
1660 # Unmatched double quote, don't bother processing the rest
1661 # of the line since this is probably a multiline string.
1662 collapsed += elided
1663 break
1664 else:
1665 # Found single quote, check nearby text to eliminate digit separators.
1667 # There is no special handling for floating point here, because
1668 # the integer/fractional/exponent parts would all be parsed
1669 # correctly as long as there are digits on both sides of the
1670 # separator. So we are fine as long as we don't see something
1671 # like "0.'3" (gcc 4.9.0 will not allow this literal).
1672 if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
1673 match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
1674 collapsed += head + match_literal.group(1).replace("'", '')
1675 elided = match_literal.group(2)
1676 else:
1677 second_quote = tail.find('\'')
1678 if second_quote >= 0:
1679 collapsed += head + "''"
1680 elided = tail[second_quote + 1:]
1681 else:
1682 # Unmatched single quote
1683 collapsed += elided
1684 break
1686 return collapsed
1689 def FindEndOfExpressionInLine(line, startpos, stack):
1690 """Find the position just after the end of current parenthesized expression.
1692 Args:
1693 line: a CleansedLines line.
1694 startpos: start searching at this position.
1695 stack: nesting stack at startpos.
1697 Returns:
1698 On finding matching end: (index just after matching end, None)
1699 On finding an unclosed expression: (-1, None)
1700 Otherwise: (-1, new stack at end of this line)
1702 for i in xrange(startpos, len(line)):
1703 char = line[i]
1704 if char in '([{':
1705 # Found start of parenthesized expression, push to expression stack
1706 stack.append(char)
1707 elif char == '<':
1708 # Found potential start of template argument list
1709 if i > 0 and line[i - 1] == '<':
1710 # Left shift operator
1711 if stack and stack[-1] == '<':
1712 stack.pop()
1713 if not stack:
1714 return (-1, None)
1715 elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
1716 # operator<, don't add to stack
1717 continue
1718 else:
1719 # Tentative start of template argument list
1720 stack.append('<')
1721 elif char in ')]}':
1722 # Found end of parenthesized expression.
1724 # If we are currently expecting a matching '>', the pending '<'
1725 # must have been an operator. Remove them from expression stack.
1726 while stack and stack[-1] == '<':
1727 stack.pop()
1728 if not stack:
1729 return (-1, None)
1730 if ((stack[-1] == '(' and char == ')') or
1731 (stack[-1] == '[' and char == ']') or
1732 (stack[-1] == '{' and char == '}')):
1733 stack.pop()
1734 if not stack:
1735 return (i + 1, None)
1736 else:
1737 # Mismatched parentheses
1738 return (-1, None)
1739 elif char == '>':
1740 # Found potential end of template argument list.
1742 # Ignore "->" and operator functions
1743 if (i > 0 and
1744 (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
1745 continue
1747 # Pop the stack if there is a matching '<'. Otherwise, ignore
1748 # this '>' since it must be an operator.
1749 if stack:
1750 if stack[-1] == '<':
1751 stack.pop()
1752 if not stack:
1753 return (i + 1, None)
1754 elif char == ';':
1755 # Found something that look like end of statements. If we are currently
1756 # expecting a '>', the matching '<' must have been an operator, since
1757 # template argument list should not contain statements.
1758 while stack and stack[-1] == '<':
1759 stack.pop()
1760 if not stack:
1761 return (-1, None)
1763 # Did not find end of expression or unbalanced parentheses on this line
1764 return (-1, stack)
1767 def CloseExpression(clean_lines, linenum, pos):
1768 """If input points to ( or { or [ or <, finds the position that closes it.
1770 If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
1771 linenum/pos that correspond to the closing of the expression.
1773 TODO(unknown): cpplint spends a fair bit of time matching parentheses.
1774 Ideally we would want to index all opening and closing parentheses once
1775 and have CloseExpression be just a simple lookup, but due to preprocessor
1776 tricks, this is not so easy.
1778 Args:
1779 clean_lines: A CleansedLines instance containing the file.
1780 linenum: The number of the line to check.
1781 pos: A position on the line.
1783 Returns:
1784 A tuple (line, linenum, pos) pointer *past* the closing brace, or
1785 (line, len(lines), -1) if we never find a close. Note we ignore
1786 strings and comments when matching; and the line we return is the
1787 'cleansed' line at linenum.
1790 line = clean_lines.elided[linenum]
1791 if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
1792 return (line, clean_lines.NumLines(), -1)
1794 # Check first line
1795 (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
1796 if end_pos > -1:
1797 return (line, linenum, end_pos)
1799 # Continue scanning forward
1800 while stack and linenum < clean_lines.NumLines() - 1:
1801 linenum += 1
1802 line = clean_lines.elided[linenum]
1803 (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
1804 if end_pos > -1:
1805 return (line, linenum, end_pos)
1807 # Did not find end of expression before end of file, give up
1808 return (line, clean_lines.NumLines(), -1)
1811 def FindStartOfExpressionInLine(line, endpos, stack):
1812 """Find position at the matching start of current expression.
1814 This is almost the reverse of FindEndOfExpressionInLine, but note
1815 that the input position and returned position differs by 1.
1817 Args:
1818 line: a CleansedLines line.
1819 endpos: start searching at this position.
1820 stack: nesting stack at endpos.
1822 Returns:
1823 On finding matching start: (index at matching start, None)
1824 On finding an unclosed expression: (-1, None)
1825 Otherwise: (-1, new stack at beginning of this line)
1827 i = endpos
1828 while i >= 0:
1829 char = line[i]
1830 if char in ')]}':
1831 # Found end of expression, push to expression stack
1832 stack.append(char)
1833 elif char == '>':
1834 # Found potential end of template argument list.
1836 # Ignore it if it's a "->" or ">=" or "operator>"
1837 if (i > 0 and
1838 (line[i - 1] == '-' or
1839 Match(r'\s>=\s', line[i - 1:]) or
1840 Search(r'\boperator\s*$', line[0:i]))):
1841 i -= 1
1842 else:
1843 stack.append('>')
1844 elif char == '<':
1845 # Found potential start of template argument list
1846 if i > 0 and line[i - 1] == '<':
1847 # Left shift operator
1848 i -= 1
1849 else:
1850 # If there is a matching '>', we can pop the expression stack.
1851 # Otherwise, ignore this '<' since it must be an operator.
1852 if stack and stack[-1] == '>':
1853 stack.pop()
1854 if not stack:
1855 return (i, None)
1856 elif char in '([{':
1857 # Found start of expression.
1859 # If there are any unmatched '>' on the stack, they must be
1860 # operators. Remove those.
1861 while stack and stack[-1] == '>':
1862 stack.pop()
1863 if not stack:
1864 return (-1, None)
1865 if ((char == '(' and stack[-1] == ')') or
1866 (char == '[' and stack[-1] == ']') or
1867 (char == '{' and stack[-1] == '}')):
1868 stack.pop()
1869 if not stack:
1870 return (i, None)
1871 else:
1872 # Mismatched parentheses
1873 return (-1, None)
1874 elif char == ';':
1875 # Found something that look like end of statements. If we are currently
1876 # expecting a '<', the matching '>' must have been an operator, since
1877 # template argument list should not contain statements.
1878 while stack and stack[-1] == '>':
1879 stack.pop()
1880 if not stack:
1881 return (-1, None)
1883 i -= 1
1885 return (-1, stack)
1888 def ReverseCloseExpression(clean_lines, linenum, pos):
1889 """If input points to ) or } or ] or >, finds the position that opens it.
1891 If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
1892 linenum/pos that correspond to the opening of the expression.
1894 Args:
1895 clean_lines: A CleansedLines instance containing the file.
1896 linenum: The number of the line to check.
1897 pos: A position on the line.
1899 Returns:
1900 A tuple (line, linenum, pos) pointer *at* the opening brace, or
1901 (line, 0, -1) if we never find the matching opening brace. Note
1902 we ignore strings and comments when matching; and the line we
1903 return is the 'cleansed' line at linenum.
1905 line = clean_lines.elided[linenum]
1906 if line[pos] not in ')}]>':
1907 return (line, 0, -1)
1909 # Check last line
1910 (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
1911 if start_pos > -1:
1912 return (line, linenum, start_pos)
1914 # Continue scanning backward
1915 while stack and linenum > 0:
1916 linenum -= 1
1917 line = clean_lines.elided[linenum]
1918 (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
1919 if start_pos > -1:
1920 return (line, linenum, start_pos)
1922 # Did not find start of expression before beginning of file, give up
1923 return (line, 0, -1)
1926 def CheckForCopyright(filename, lines, error):
1927 """Logs an error if no Copyright message appears at the top of the file."""
1929 # We'll say it should occur by line 10. Don't forget there's a
1930 # dummy line at the front.
1931 for line in range(1, min(len(lines), 11)):
1932 if re.search(r'Copyright', lines[line], re.I): break
1933 else: # means no copyright line was found
1934 error(filename, 0, 'legal/copyright', 5,
1935 'No copyright message found. '
1936 'You should have a line: "Copyright [year] <Copyright Owner>"')
1939 def GetIndentLevel(line):
1940 """Return the number of leading spaces in line.
1942 Args:
1943 line: A string to check.
1945 Returns:
1946 An integer count of leading spaces, possibly zero.
1948 indent = Match(r'^( *)\S', line)
1949 if indent:
1950 return len(indent.group(1))
1951 else:
1952 return 0
1955 def GetHeaderGuardCPPVariable(filename):
1956 """Returns the CPP variable that should be used as a header guard.
1958 Args:
1959 filename: The name of a C++ header file.
1961 Returns:
1962 The CPP variable that should be used as a header guard in the
1963 named file.
1967 # Restores original filename in case that cpplint is invoked from Emacs's
1968 # flymake.
1969 filename = re.sub(r'_flymake\.h$', '.h', filename)
1970 filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
1971 # Replace 'c++' with 'cpp'.
1972 filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
1974 fileinfo = FileInfo(filename)
1975 file_path_from_root = fileinfo.RepositoryName()
1976 if _root:
1977 suffix = os.sep
1978 # On Windows using directory separator will leave us with
1979 # "bogus escape error" unless we properly escape regex.
1980 if suffix == '\\':
1981 suffix += '\\'
1982 file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root)
1983 return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
1986 def CheckForHeaderGuard(filename, clean_lines, error):
1987 """Checks that the file contains a header guard.
1989 Logs an error if no #ifndef header guard is present. For other
1990 headers, checks that the full pathname is used.
1992 Args:
1993 filename: The name of the C++ header file.
1994 clean_lines: A CleansedLines instance containing the file.
1995 error: The function to call with any errors found.
1998 # Don't check for header guards if there are error suppression
1999 # comments somewhere in this file.
2001 # Because this is silencing a warning for a nonexistent line, we
2002 # only support the very specific NOLINT(build/header_guard) syntax,
2003 # and not the general NOLINT or NOLINT(*) syntax.
2004 raw_lines = clean_lines.lines_without_raw_strings
2005 for i in raw_lines:
2006 if Search(r'//\s*NOLINT\(build/header_guard\)', i):
2007 return
2009 # Allow pragma once instead of header guards
2010 for i in raw_lines:
2011 if Search(r'^\s*#pragma\s+once', i):
2012 return
2014 cppvar = GetHeaderGuardCPPVariable(filename)
2016 ifndef = ''
2017 ifndef_linenum = 0
2018 define = ''
2019 endif = ''
2020 endif_linenum = 0
2021 for linenum, line in enumerate(raw_lines):
2022 linesplit = line.split()
2023 if len(linesplit) >= 2:
2024 # find the first occurrence of #ifndef and #define, save arg
2025 if not ifndef and linesplit[0] == '#ifndef':
2026 # set ifndef to the header guard presented on the #ifndef line.
2027 ifndef = linesplit[1]
2028 ifndef_linenum = linenum
2029 if not define and linesplit[0] == '#define':
2030 define = linesplit[1]
2031 # find the last occurrence of #endif, save entire line
2032 if line.startswith('#endif'):
2033 endif = line
2034 endif_linenum = linenum
2036 if not ifndef or not define or ifndef != define:
2037 error(filename, 0, 'build/header_guard', 5,
2038 'No #ifndef header guard found, suggested CPP variable is: %s' %
2039 cppvar)
2040 return
2042 # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
2043 # for backward compatibility.
2044 if ifndef != cppvar:
2045 error_level = 0
2046 if ifndef != cppvar + '_':
2047 error_level = 5
2049 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
2050 error)
2051 error(filename, ifndef_linenum, 'build/header_guard', error_level,
2052 '#ifndef header guard has wrong style, please use: %s' % cppvar)
2054 # Check for "//" comments on endif line.
2055 ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
2056 error)
2057 match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
2058 if match:
2059 if match.group(1) == '_':
2060 # Issue low severity warning for deprecated double trailing underscore
2061 error(filename, endif_linenum, 'build/header_guard', 0,
2062 '#endif line should be "#endif // %s"' % cppvar)
2063 return
2065 # Didn't find the corresponding "//" comment. If this file does not
2066 # contain any "//" comments at all, it could be that the compiler
2067 # only wants "/**/" comments, look for those instead.
2068 no_single_line_comments = True
2069 for i in xrange(1, len(raw_lines) - 1):
2070 line = raw_lines[i]
2071 if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
2072 no_single_line_comments = False
2073 break
2075 if no_single_line_comments:
2076 match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
2077 if match:
2078 if match.group(1) == '_':
2079 # Low severity warning for double trailing underscore
2080 error(filename, endif_linenum, 'build/header_guard', 0,
2081 '#endif line should be "#endif /* %s */"' % cppvar)
2082 return
2084 # Didn't find anything
2085 error(filename, endif_linenum, 'build/header_guard', 5,
2086 '#endif line should be "#endif // %s"' % cppvar)
2089 def CheckHeaderFileIncluded(filename, include_state, error):
2090 """Logs an error if a source file does not include its header."""
2092 # Do not check test files
2093 fileinfo = FileInfo(filename)
2094 if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
2095 return
2097 for ext in GetHeaderExtensions():
2098 basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
2099 headerfile = basefilename + '.' + ext
2100 if not os.path.exists(headerfile):
2101 continue
2102 headername = FileInfo(headerfile).RepositoryName()
2103 first_include = None
2104 for section_list in include_state.include_list:
2105 for f in section_list:
2106 if headername in f[0] or f[0] in headername:
2107 return
2108 if not first_include:
2109 first_include = f[1]
2111 error(filename, first_include, 'build/include', 5,
2112 '%s should include its header file %s' % (fileinfo.RepositoryName(),
2113 headername))
2116 def CheckForBadCharacters(filename, lines, error):
2117 """Logs an error for each line containing bad characters.
2119 Two kinds of bad characters:
2121 1. Unicode replacement characters: These indicate that either the file
2122 contained invalid UTF-8 (likely) or Unicode replacement characters (which
2123 it shouldn't). Note that it's possible for this to throw off line
2124 numbering if the invalid UTF-8 occurred adjacent to a newline.
2126 2. NUL bytes. These are problematic for some tools.
2128 Args:
2129 filename: The name of the current file.
2130 lines: An array of strings, each representing a line of the file.
2131 error: The function to call with any errors found.
2133 for linenum, line in enumerate(lines):
2134 if unicode_escape_decode('\ufffd') in line:
2135 error(filename, linenum, 'readability/utf8', 5,
2136 'Line contains invalid UTF-8 (or Unicode replacement character).')
2137 if '\0' in line:
2138 error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
2141 def CheckForNewlineAtEOF(filename, lines, error):
2142 """Logs an error if there is no newline char at the end of the file.
2144 Args:
2145 filename: The name of the current file.
2146 lines: An array of strings, each representing a line of the file.
2147 error: The function to call with any errors found.
2150 # The array lines() was created by adding two newlines to the
2151 # original file (go figure), then splitting on \n.
2152 # To verify that the file ends in \n, we just have to make sure the
2153 # last-but-two element of lines() exists and is empty.
2154 if len(lines) < 3 or lines[-2]:
2155 error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
2156 'Could not find a newline character at the end of the file.')
2159 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
2160 """Logs an error if we see /* ... */ or "..." that extend past one line.
2162 /* ... */ comments are legit inside macros, for one line.
2163 Otherwise, we prefer // comments, so it's ok to warn about the
2164 other. Likewise, it's ok for strings to extend across multiple
2165 lines, as long as a line continuation character (backslash)
2166 terminates each line. Although not currently prohibited by the C++
2167 style guide, it's ugly and unnecessary. We don't do well with either
2168 in this lint program, so we warn about both.
2170 Args:
2171 filename: The name of the current file.
2172 clean_lines: A CleansedLines instance containing the file.
2173 linenum: The number of the line to check.
2174 error: The function to call with any errors found.
2176 line = clean_lines.elided[linenum]
2178 # Remove all \\ (escaped backslashes) from the line. They are OK, and the
2179 # second (escaped) slash may trigger later \" detection erroneously.
2180 line = line.replace('\\\\', '')
2182 if line.count('/*') > line.count('*/'):
2183 error(filename, linenum, 'readability/multiline_comment', 5,
2184 'Complex multi-line /*...*/-style comment found. '
2185 'Lint may give bogus warnings. '
2186 'Consider replacing these with //-style comments, '
2187 'with #if 0...#endif, '
2188 'or with more clearly structured multi-line comments.')
2190 if (line.count('"') - line.count('\\"')) % 2:
2191 error(filename, linenum, 'readability/multiline_string', 5,
2192 'Multi-line string ("...") found. This lint script doesn\'t '
2193 'do well with such strings, and may give bogus warnings. '
2194 'Use C++11 raw strings or concatenation instead.')
2197 # (non-threadsafe name, thread-safe alternative, validation pattern)
2199 # The validation pattern is used to eliminate false positives such as:
2200 # _rand(); // false positive due to substring match.
2201 # ->rand(); // some member function rand().
2202 # ACMRandom rand(seed); // some variable named rand.
2203 # ISAACRandom rand(); // another variable named rand.
2205 # Basically we require the return value of these functions to be used
2206 # in some expression context on the same line by matching on some
2207 # operator before the function name. This eliminates constructors and
2208 # member function calls.
2209 _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
2210 _THREADING_LIST = (
2211 ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
2212 ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
2213 ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
2214 ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
2215 ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
2216 ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
2217 ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
2218 ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
2219 ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
2220 ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
2221 ('strtok(', 'strtok_r(',
2222 _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
2223 ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
2227 def CheckPosixThreading(filename, clean_lines, linenum, error):
2228 """Checks for calls to thread-unsafe functions.
2230 Much code has been originally written without consideration of
2231 multi-threading. Also, engineers are relying on their old experience;
2232 they have learned posix before threading extensions were added. These
2233 tests guide the engineers to use thread-safe functions (when using
2234 posix directly).
2236 Args:
2237 filename: The name of the current file.
2238 clean_lines: A CleansedLines instance containing the file.
2239 linenum: The number of the line to check.
2240 error: The function to call with any errors found.
2242 line = clean_lines.elided[linenum]
2243 for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
2244 # Additional pattern matching check to confirm that this is the
2245 # function we are looking for
2246 if Search(pattern, line):
2247 error(filename, linenum, 'runtime/threadsafe_fn', 2,
2248 'Consider using ' + multithread_safe_func +
2249 '...) instead of ' + single_thread_func +
2250 '...) for improved thread safety.')
2253 def CheckVlogArguments(filename, clean_lines, linenum, error):
2254 """Checks that VLOG() is only used for defining a logging level.
2256 For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
2257 VLOG(FATAL) are not.
2259 Args:
2260 filename: The name of the current file.
2261 clean_lines: A CleansedLines instance containing the file.
2262 linenum: The number of the line to check.
2263 error: The function to call with any errors found.
2265 line = clean_lines.elided[linenum]
2266 if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
2267 error(filename, linenum, 'runtime/vlog', 5,
2268 'VLOG() should be used with numeric verbosity level. '
2269 'Use LOG() if you want symbolic severity levels.')
2271 # Matches invalid increment: *count++, which moves pointer instead of
2272 # incrementing a value.
2273 _RE_PATTERN_INVALID_INCREMENT = re.compile(
2274 r'^\s*\*\w+(\+\+|--);')
2277 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
2278 """Checks for invalid increment *count++.
2280 For example following function:
2281 void increment_counter(int* count) {
2282 *count++;
2284 is invalid, because it effectively does count++, moving pointer, and should
2285 be replaced with ++*count, (*count)++ or *count += 1.
2287 Args:
2288 filename: The name of the current file.
2289 clean_lines: A CleansedLines instance containing the file.
2290 linenum: The number of the line to check.
2291 error: The function to call with any errors found.
2293 line = clean_lines.elided[linenum]
2294 if _RE_PATTERN_INVALID_INCREMENT.match(line):
2295 error(filename, linenum, 'runtime/invalid_increment', 5,
2296 'Changing pointer instead of value (or unused value of operator*).')
2299 def IsMacroDefinition(clean_lines, linenum):
2300 if Search(r'^#define', clean_lines[linenum]):
2301 return True
2303 if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
2304 return True
2306 return False
2309 def IsForwardClassDeclaration(clean_lines, linenum):
2310 return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
2313 class _BlockInfo(object):
2314 """Stores information about a generic block of code."""
2316 def __init__(self, linenum, seen_open_brace):
2317 self.starting_linenum = linenum
2318 self.seen_open_brace = seen_open_brace
2319 self.open_parentheses = 0
2320 self.inline_asm = _NO_ASM
2321 self.check_namespace_indentation = False
2323 def CheckBegin(self, filename, clean_lines, linenum, error):
2324 """Run checks that applies to text up to the opening brace.
2326 This is mostly for checking the text after the class identifier
2327 and the "{", usually where the base class is specified. For other
2328 blocks, there isn't much to check, so we always pass.
2330 Args:
2331 filename: The name of the current file.
2332 clean_lines: A CleansedLines instance containing the file.
2333 linenum: The number of the line to check.
2334 error: The function to call with any errors found.
2336 pass
2338 def CheckEnd(self, filename, clean_lines, linenum, error):
2339 """Run checks that applies to text after the closing brace.
2341 This is mostly used for checking end of namespace comments.
2343 Args:
2344 filename: The name of the current file.
2345 clean_lines: A CleansedLines instance containing the file.
2346 linenum: The number of the line to check.
2347 error: The function to call with any errors found.
2349 pass
2351 def IsBlockInfo(self):
2352 """Returns true if this block is a _BlockInfo.
2354 This is convenient for verifying that an object is an instance of
2355 a _BlockInfo, but not an instance of any of the derived classes.
2357 Returns:
2358 True for this class, False for derived classes.
2360 return self.__class__ == _BlockInfo
2363 class _ExternCInfo(_BlockInfo):
2364 """Stores information about an 'extern "C"' block."""
2366 def __init__(self, linenum):
2367 _BlockInfo.__init__(self, linenum, True)
2370 class _ClassInfo(_BlockInfo):
2371 """Stores information about a class."""
2373 def __init__(self, name, class_or_struct, clean_lines, linenum):
2374 _BlockInfo.__init__(self, linenum, False)
2375 self.name = name
2376 self.is_derived = False
2377 self.check_namespace_indentation = True
2378 if class_or_struct == 'struct':
2379 self.access = 'public'
2380 self.is_struct = True
2381 else:
2382 self.access = 'private'
2383 self.is_struct = False
2385 # Remember initial indentation level for this class. Using raw_lines here
2386 # instead of elided to account for leading comments.
2387 self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
2389 # Try to find the end of the class. This will be confused by things like:
2390 # class A {
2391 # } *x = { ...
2393 # But it's still good enough for CheckSectionSpacing.
2394 self.last_line = 0
2395 depth = 0
2396 for i in range(linenum, clean_lines.NumLines()):
2397 line = clean_lines.elided[i]
2398 depth += line.count('{') - line.count('}')
2399 if not depth:
2400 self.last_line = i
2401 break
2403 def CheckBegin(self, filename, clean_lines, linenum, error):
2404 # Look for a bare ':'
2405 if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
2406 self.is_derived = True
2408 def CheckEnd(self, filename, clean_lines, linenum, error):
2409 # If there is a DISALLOW macro, it should appear near the end of
2410 # the class.
2411 seen_last_thing_in_class = False
2412 for i in xrange(linenum - 1, self.starting_linenum, -1):
2413 match = Search(
2414 r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
2415 self.name + r'\)',
2416 clean_lines.elided[i])
2417 if match:
2418 if seen_last_thing_in_class:
2419 error(filename, i, 'readability/constructors', 3,
2420 match.group(1) + ' should be the last thing in the class')
2421 break
2423 if not Match(r'^\s*$', clean_lines.elided[i]):
2424 seen_last_thing_in_class = True
2426 # Check that closing brace is aligned with beginning of the class.
2427 # Only do this if the closing brace is indented by only whitespaces.
2428 # This means we will not check single-line class definitions.
2429 indent = Match(r'^( *)\}', clean_lines.elided[linenum])
2430 if indent and len(indent.group(1)) != self.class_indent:
2431 if self.is_struct:
2432 parent = 'struct ' + self.name
2433 else:
2434 parent = 'class ' + self.name
2435 error(filename, linenum, 'whitespace/indent', 3,
2436 'Closing brace should be aligned with beginning of %s' % parent)
2439 class _NamespaceInfo(_BlockInfo):
2440 """Stores information about a namespace."""
2442 def __init__(self, name, linenum):
2443 _BlockInfo.__init__(self, linenum, False)
2444 self.name = name or ''
2445 self.check_namespace_indentation = True
2447 def CheckEnd(self, filename, clean_lines, linenum, error):
2448 """Check end of namespace comments."""
2449 line = clean_lines.raw_lines[linenum]
2451 # Check how many lines is enclosed in this namespace. Don't issue
2452 # warning for missing namespace comments if there aren't enough
2453 # lines. However, do apply checks if there is already an end of
2454 # namespace comment and it's incorrect.
2456 # TODO(unknown): We always want to check end of namespace comments
2457 # if a namespace is large, but sometimes we also want to apply the
2458 # check if a short namespace contained nontrivial things (something
2459 # other than forward declarations). There is currently no logic on
2460 # deciding what these nontrivial things are, so this check is
2461 # triggered by namespace size only, which works most of the time.
2462 if (linenum - self.starting_linenum < 10
2463 and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
2464 return
2466 # Look for matching comment at end of namespace.
2468 # Note that we accept C style "/* */" comments for terminating
2469 # namespaces, so that code that terminate namespaces inside
2470 # preprocessor macros can be cpplint clean.
2472 # We also accept stuff like "// end of namespace <name>." with the
2473 # period at the end.
2475 # Besides these, we don't accept anything else, otherwise we might
2476 # get false negatives when existing comment is a substring of the
2477 # expected namespace.
2478 if self.name:
2479 # Named namespace
2480 if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
2481 re.escape(self.name) + r'[\*/\.\\\s]*$'),
2482 line):
2483 error(filename, linenum, 'readability/namespace', 5,
2484 'Namespace should be terminated with "// namespace %s"' %
2485 self.name)
2486 else:
2487 # Anonymous namespace
2488 if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
2489 # If "// namespace anonymous" or "// anonymous namespace (more text)",
2490 # mention "// anonymous namespace" as an acceptable form
2491 if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
2492 error(filename, linenum, 'readability/namespace', 5,
2493 'Anonymous namespace should be terminated with "// namespace"'
2494 ' or "// anonymous namespace"')
2495 else:
2496 error(filename, linenum, 'readability/namespace', 5,
2497 'Anonymous namespace should be terminated with "// namespace"')
2500 class _PreprocessorInfo(object):
2501 """Stores checkpoints of nesting stacks when #if/#else is seen."""
2503 def __init__(self, stack_before_if):
2504 # The entire nesting stack before #if
2505 self.stack_before_if = stack_before_if
2507 # The entire nesting stack up to #else
2508 self.stack_before_else = []
2510 # Whether we have already seen #else or #elif
2511 self.seen_else = False
2514 class NestingState(object):
2515 """Holds states related to parsing braces."""
2517 def __init__(self):
2518 # Stack for tracking all braces. An object is pushed whenever we
2519 # see a "{", and popped when we see a "}". Only 3 types of
2520 # objects are possible:
2521 # - _ClassInfo: a class or struct.
2522 # - _NamespaceInfo: a namespace.
2523 # - _BlockInfo: some other type of block.
2524 self.stack = []
2526 # Top of the previous stack before each Update().
2528 # Because the nesting_stack is updated at the end of each line, we
2529 # had to do some convoluted checks to find out what is the current
2530 # scope at the beginning of the line. This check is simplified by
2531 # saving the previous top of nesting stack.
2533 # We could save the full stack, but we only need the top. Copying
2534 # the full nesting stack would slow down cpplint by ~10%.
2535 self.previous_stack_top = []
2537 # Stack of _PreprocessorInfo objects.
2538 self.pp_stack = []
2540 def SeenOpenBrace(self):
2541 """Check if we have seen the opening brace for the innermost block.
2543 Returns:
2544 True if we have seen the opening brace, False if the innermost
2545 block is still expecting an opening brace.
2547 return (not self.stack) or self.stack[-1].seen_open_brace
2549 def InNamespaceBody(self):
2550 """Check if we are currently one level inside a namespace body.
2552 Returns:
2553 True if top of the stack is a namespace block, False otherwise.
2555 return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
2557 def InExternC(self):
2558 """Check if we are currently one level inside an 'extern "C"' block.
2560 Returns:
2561 True if top of the stack is an extern block, False otherwise.
2563 return self.stack and isinstance(self.stack[-1], _ExternCInfo)
2565 def InClassDeclaration(self):
2566 """Check if we are currently one level inside a class or struct declaration.
2568 Returns:
2569 True if top of the stack is a class/struct, False otherwise.
2571 return self.stack and isinstance(self.stack[-1], _ClassInfo)
2573 def InAsmBlock(self):
2574 """Check if we are currently one level inside an inline ASM block.
2576 Returns:
2577 True if the top of the stack is a block containing inline ASM.
2579 return self.stack and self.stack[-1].inline_asm != _NO_ASM
2581 def InTemplateArgumentList(self, clean_lines, linenum, pos):
2582 """Check if current position is inside template argument list.
2584 Args:
2585 clean_lines: A CleansedLines instance containing the file.
2586 linenum: The number of the line to check.
2587 pos: position just after the suspected template argument.
2588 Returns:
2589 True if (linenum, pos) is inside template arguments.
2591 while linenum < clean_lines.NumLines():
2592 # Find the earliest character that might indicate a template argument
2593 line = clean_lines.elided[linenum]
2594 match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
2595 if not match:
2596 linenum += 1
2597 pos = 0
2598 continue
2599 token = match.group(1)
2600 pos += len(match.group(0))
2602 # These things do not look like template argument list:
2603 # class Suspect {
2604 # class Suspect x; }
2605 if token in ('{', '}', ';'): return False
2607 # These things look like template argument list:
2608 # template <class Suspect>
2609 # template <class Suspect = default_value>
2610 # template <class Suspect[]>
2611 # template <class Suspect...>
2612 if token in ('>', '=', '[', ']', '.'): return True
2614 # Check if token is an unmatched '<'.
2615 # If not, move on to the next character.
2616 if token != '<':
2617 pos += 1
2618 if pos >= len(line):
2619 linenum += 1
2620 pos = 0
2621 continue
2623 # We can't be sure if we just find a single '<', and need to
2624 # find the matching '>'.
2625 (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
2626 if end_pos < 0:
2627 # Not sure if template argument list or syntax error in file
2628 return False
2629 linenum = end_line
2630 pos = end_pos
2631 return False
2633 def UpdatePreprocessor(self, line):
2634 """Update preprocessor stack.
2636 We need to handle preprocessors due to classes like this:
2637 #ifdef SWIG
2638 struct ResultDetailsPageElementExtensionPoint {
2639 #else
2640 struct ResultDetailsPageElementExtensionPoint : public Extension {
2641 #endif
2643 We make the following assumptions (good enough for most files):
2644 - Preprocessor condition evaluates to true from #if up to first
2645 #else/#elif/#endif.
2647 - Preprocessor condition evaluates to false from #else/#elif up
2648 to #endif. We still perform lint checks on these lines, but
2649 these do not affect nesting stack.
2651 Args:
2652 line: current line to check.
2654 if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
2655 # Beginning of #if block, save the nesting stack here. The saved
2656 # stack will allow us to restore the parsing state in the #else case.
2657 self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
2658 elif Match(r'^\s*#\s*(else|elif)\b', line):
2659 # Beginning of #else block
2660 if self.pp_stack:
2661 if not self.pp_stack[-1].seen_else:
2662 # This is the first #else or #elif block. Remember the
2663 # whole nesting stack up to this point. This is what we
2664 # keep after the #endif.
2665 self.pp_stack[-1].seen_else = True
2666 self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
2668 # Restore the stack to how it was before the #if
2669 self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
2670 else:
2671 # TODO(unknown): unexpected #else, issue warning?
2672 pass
2673 elif Match(r'^\s*#\s*endif\b', line):
2674 # End of #if or #else blocks.
2675 if self.pp_stack:
2676 # If we saw an #else, we will need to restore the nesting
2677 # stack to its former state before the #else, otherwise we
2678 # will just continue from where we left off.
2679 if self.pp_stack[-1].seen_else:
2680 # Here we can just use a shallow copy since we are the last
2681 # reference to it.
2682 self.stack = self.pp_stack[-1].stack_before_else
2683 # Drop the corresponding #if
2684 self.pp_stack.pop()
2685 else:
2686 # TODO(unknown): unexpected #endif, issue warning?
2687 pass
2689 # TODO(unknown): Update() is too long, but we will refactor later.
2690 def Update(self, filename, clean_lines, linenum, error):
2691 """Update nesting state with current line.
2693 Args:
2694 filename: The name of the current file.
2695 clean_lines: A CleansedLines instance containing the file.
2696 linenum: The number of the line to check.
2697 error: The function to call with any errors found.
2699 line = clean_lines.elided[linenum]
2701 # Remember top of the previous nesting stack.
2703 # The stack is always pushed/popped and not modified in place, so
2704 # we can just do a shallow copy instead of copy.deepcopy. Using
2705 # deepcopy would slow down cpplint by ~28%.
2706 if self.stack:
2707 self.previous_stack_top = self.stack[-1]
2708 else:
2709 self.previous_stack_top = None
2711 # Update pp_stack
2712 self.UpdatePreprocessor(line)
2714 # Count parentheses. This is to avoid adding struct arguments to
2715 # the nesting stack.
2716 if self.stack:
2717 inner_block = self.stack[-1]
2718 depth_change = line.count('(') - line.count(')')
2719 inner_block.open_parentheses += depth_change
2721 # Also check if we are starting or ending an inline assembly block.
2722 if inner_block.inline_asm in (_NO_ASM, _END_ASM):
2723 if (depth_change != 0 and
2724 inner_block.open_parentheses == 1 and
2725 _MATCH_ASM.match(line)):
2726 # Enter assembly block
2727 inner_block.inline_asm = _INSIDE_ASM
2728 else:
2729 # Not entering assembly block. If previous line was _END_ASM,
2730 # we will now shift to _NO_ASM state.
2731 inner_block.inline_asm = _NO_ASM
2732 elif (inner_block.inline_asm == _INSIDE_ASM and
2733 inner_block.open_parentheses == 0):
2734 # Exit assembly block
2735 inner_block.inline_asm = _END_ASM
2737 # Consume namespace declaration at the beginning of the line. Do
2738 # this in a loop so that we catch same line declarations like this:
2739 # namespace proto2 { namespace bridge { class MessageSet; } }
2740 while True:
2741 # Match start of namespace. The "\b\s*" below catches namespace
2742 # declarations even if it weren't followed by a whitespace, this
2743 # is so that we don't confuse our namespace checker. The
2744 # missing spaces will be flagged by CheckSpacing.
2745 namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
2746 if not namespace_decl_match:
2747 break
2749 new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
2750 self.stack.append(new_namespace)
2752 line = namespace_decl_match.group(2)
2753 if line.find('{') != -1:
2754 new_namespace.seen_open_brace = True
2755 line = line[line.find('{') + 1:]
2757 # Look for a class declaration in whatever is left of the line
2758 # after parsing namespaces. The regexp accounts for decorated classes
2759 # such as in:
2760 # class LOCKABLE API Object {
2761 # };
2762 class_decl_match = Match(
2763 r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
2764 r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
2765 r'(.*)$', line)
2766 if (class_decl_match and
2767 (not self.stack or self.stack[-1].open_parentheses == 0)):
2768 # We do not want to accept classes that are actually template arguments:
2769 # template <class Ignore1,
2770 # class Ignore2 = Default<Args>,
2771 # template <Args> class Ignore3>
2772 # void Function() {};
2774 # To avoid template argument cases, we scan forward and look for
2775 # an unmatched '>'. If we see one, assume we are inside a
2776 # template argument list.
2777 end_declaration = len(class_decl_match.group(1))
2778 if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
2779 self.stack.append(_ClassInfo(
2780 class_decl_match.group(3), class_decl_match.group(2),
2781 clean_lines, linenum))
2782 line = class_decl_match.group(4)
2784 # If we have not yet seen the opening brace for the innermost block,
2785 # run checks here.
2786 if not self.SeenOpenBrace():
2787 self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
2789 # Update access control if we are inside a class/struct
2790 if self.stack and isinstance(self.stack[-1], _ClassInfo):
2791 classinfo = self.stack[-1]
2792 access_match = Match(
2793 r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
2794 r':(?:[^:]|$)',
2795 line)
2796 if access_match:
2797 classinfo.access = access_match.group(2)
2799 # Check that access keywords are indented +1 space. Skip this
2800 # check if the keywords are not preceded by whitespaces.
2801 indent = access_match.group(1)
2802 if (len(indent) != classinfo.class_indent + 1 and
2803 Match(r'^\s*$', indent)):
2804 if classinfo.is_struct:
2805 parent = 'struct ' + classinfo.name
2806 else:
2807 parent = 'class ' + classinfo.name
2808 slots = ''
2809 if access_match.group(3):
2810 slots = access_match.group(3)
2811 error(filename, linenum, 'whitespace/indent', 3,
2812 '%s%s: should be indented +1 space inside %s' % (
2813 access_match.group(2), slots, parent))
2815 # Consume braces or semicolons from what's left of the line
2816 while True:
2817 # Match first brace, semicolon, or closed parenthesis.
2818 matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
2819 if not matched:
2820 break
2822 token = matched.group(1)
2823 if token == '{':
2824 # If namespace or class hasn't seen a opening brace yet, mark
2825 # namespace/class head as complete. Push a new block onto the
2826 # stack otherwise.
2827 if not self.SeenOpenBrace():
2828 self.stack[-1].seen_open_brace = True
2829 elif Match(r'^extern\s*"[^"]*"\s*\{', line):
2830 self.stack.append(_ExternCInfo(linenum))
2831 else:
2832 self.stack.append(_BlockInfo(linenum, True))
2833 if _MATCH_ASM.match(line):
2834 self.stack[-1].inline_asm = _BLOCK_ASM
2836 elif token == ';' or token == ')':
2837 # If we haven't seen an opening brace yet, but we already saw
2838 # a semicolon, this is probably a forward declaration. Pop
2839 # the stack for these.
2841 # Similarly, if we haven't seen an opening brace yet, but we
2842 # already saw a closing parenthesis, then these are probably
2843 # function arguments with extra "class" or "struct" keywords.
2844 # Also pop these stack for these.
2845 if not self.SeenOpenBrace():
2846 self.stack.pop()
2847 else: # token == '}'
2848 # Perform end of block checks and pop the stack.
2849 if self.stack:
2850 self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
2851 self.stack.pop()
2852 line = matched.group(2)
2854 def InnermostClass(self):
2855 """Get class info on the top of the stack.
2857 Returns:
2858 A _ClassInfo object if we are inside a class, or None otherwise.
2860 for i in range(len(self.stack), 0, -1):
2861 classinfo = self.stack[i - 1]
2862 if isinstance(classinfo, _ClassInfo):
2863 return classinfo
2864 return None
2866 def CheckCompletedBlocks(self, filename, error):
2867 """Checks that all classes and namespaces have been completely parsed.
2869 Call this when all lines in a file have been processed.
2870 Args:
2871 filename: The name of the current file.
2872 error: The function to call with any errors found.
2874 # Note: This test can result in false positives if #ifdef constructs
2875 # get in the way of brace matching. See the testBuildClass test in
2876 # cpplint_unittest.py for an example of this.
2877 for obj in self.stack:
2878 if isinstance(obj, _ClassInfo):
2879 error(filename, obj.starting_linenum, 'build/class', 5,
2880 'Failed to find complete declaration of class %s' %
2881 obj.name)
2882 elif isinstance(obj, _NamespaceInfo):
2883 error(filename, obj.starting_linenum, 'build/namespaces', 5,
2884 'Failed to find complete declaration of namespace %s' %
2885 obj.name)
2888 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
2889 nesting_state, error):
2890 r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
2892 Complain about several constructs which gcc-2 accepts, but which are
2893 not standard C++. Warning about these in lint is one way to ease the
2894 transition to new compilers.
2895 - put storage class first (e.g. "static const" instead of "const static").
2896 - "%lld" instead of %qd" in printf-type functions.
2897 - "%1$d" is non-standard in printf-type functions.
2898 - "\%" is an undefined character escape sequence.
2899 - text after #endif is not allowed.
2900 - invalid inner-style forward declaration.
2901 - >? and <? operators, and their >?= and <?= cousins.
2903 Additionally, check for constructor/destructor style violations and reference
2904 members, as it is very convenient to do so while checking for
2905 gcc-2 compliance.
2907 Args:
2908 filename: The name of the current file.
2909 clean_lines: A CleansedLines instance containing the file.
2910 linenum: The number of the line to check.
2911 nesting_state: A NestingState instance which maintains information about
2912 the current stack of nested blocks being parsed.
2913 error: A callable to which errors are reported, which takes 4 arguments:
2914 filename, line number, error level, and message
2917 # Remove comments from the line, but leave in strings for now.
2918 line = clean_lines.lines[linenum]
2920 if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
2921 error(filename, linenum, 'runtime/printf_format', 3,
2922 '%q in format strings is deprecated. Use %ll instead.')
2924 if Search(r'printf\s*\(.*".*%\d+\$', line):
2925 error(filename, linenum, 'runtime/printf_format', 2,
2926 '%N$ formats are unconventional. Try rewriting to avoid them.')
2928 # Remove escaped backslashes before looking for undefined escapes.
2929 line = line.replace('\\\\', '')
2931 if Search(r'("|\').*\\(%|\[|\(|{)', line):
2932 error(filename, linenum, 'build/printf_format', 3,
2933 '%, [, (, and { are undefined character escapes. Unescape them.')
2935 # For the rest, work with both comments and strings removed.
2936 line = clean_lines.elided[linenum]
2938 if Search(r'\b(const|volatile|void|char|short|int|long'
2939 r'|float|double|signed|unsigned'
2940 r'|schar|u?int8|u?int16|u?int32|u?int64)'
2941 r'\s+(register|static|extern|typedef)\b',
2942 line):
2943 error(filename, linenum, 'build/storage_class', 5,
2944 'Storage-class specifier (static, extern, typedef, etc) should be '
2945 'at the beginning of the declaration.')
2947 if Match(r'\s*#\s*endif\s*[^/\s]+', line):
2948 error(filename, linenum, 'build/endif_comment', 5,
2949 'Uncommented text after #endif is non-standard. Use a comment.')
2951 if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
2952 error(filename, linenum, 'build/forward_decl', 5,
2953 'Inner-style forward declarations are invalid. Remove this line.')
2955 if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
2956 line):
2957 error(filename, linenum, 'build/deprecated', 3,
2958 '>? and <? (max and min) operators are non-standard and deprecated.')
2960 if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
2961 # TODO(unknown): Could it be expanded safely to arbitrary references,
2962 # without triggering too many false positives? The first
2963 # attempt triggered 5 warnings for mostly benign code in the regtest, hence
2964 # the restriction.
2965 # Here's the original regexp, for the reference:
2966 # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
2967 # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
2968 error(filename, linenum, 'runtime/member_string_references', 2,
2969 'const string& members are dangerous. It is much better to use '
2970 'alternatives, such as pointers or simple constants.')
2972 # Everything else in this function operates on class declarations.
2973 # Return early if the top of the nesting stack is not a class, or if
2974 # the class head is not completed yet.
2975 classinfo = nesting_state.InnermostClass()
2976 if not classinfo or not classinfo.seen_open_brace:
2977 return
2979 # The class may have been declared with namespace or classname qualifiers.
2980 # The constructor and destructor will not have those qualifiers.
2981 base_classname = classinfo.name.split('::')[-1]
2983 # Look for single-argument constructors that aren't marked explicit.
2984 # Technically a valid construct, but against style.
2985 explicit_constructor_match = Match(
2986 r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
2987 r'\(((?:[^()]|\([^()]*\))*)\)'
2988 % re.escape(base_classname),
2989 line)
2991 if explicit_constructor_match:
2992 is_marked_explicit = explicit_constructor_match.group(1)
2994 if not explicit_constructor_match.group(2):
2995 constructor_args = []
2996 else:
2997 constructor_args = explicit_constructor_match.group(2).split(',')
2999 # collapse arguments so that commas in template parameter lists and function
3000 # argument parameter lists don't split arguments in two
3001 i = 0
3002 while i < len(constructor_args):
3003 constructor_arg = constructor_args[i]
3004 while (constructor_arg.count('<') > constructor_arg.count('>') or
3005 constructor_arg.count('(') > constructor_arg.count(')')):
3006 constructor_arg += ',' + constructor_args[i + 1]
3007 del constructor_args[i + 1]
3008 constructor_args[i] = constructor_arg
3009 i += 1
3011 variadic_args = [arg for arg in constructor_args if '&&...' in arg]
3012 defaulted_args = [arg for arg in constructor_args if '=' in arg]
3013 noarg_constructor = (not constructor_args or # empty arg list
3014 # 'void' arg specifier
3015 (len(constructor_args) == 1 and
3016 constructor_args[0].strip() == 'void'))
3017 onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
3018 not noarg_constructor) or
3019 # all but at most one arg defaulted
3020 (len(constructor_args) >= 1 and
3021 not noarg_constructor and
3022 len(defaulted_args) >= len(constructor_args) - 1) or
3023 # variadic arguments with zero or one argument
3024 (len(constructor_args) <= 2 and
3025 len(variadic_args) >= 1))
3026 initializer_list_constructor = bool(
3027 onearg_constructor and
3028 Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
3029 copy_constructor = bool(
3030 onearg_constructor and
3031 Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
3032 % re.escape(base_classname), constructor_args[0].strip()))
3034 if (not is_marked_explicit and
3035 onearg_constructor and
3036 not initializer_list_constructor and
3037 not copy_constructor):
3038 if defaulted_args or variadic_args:
3039 error(filename, linenum, 'runtime/explicit', 5,
3040 'Constructors callable with one argument '
3041 'should be marked explicit.')
3042 else:
3043 error(filename, linenum, 'runtime/explicit', 5,
3044 'Single-parameter constructors should be marked explicit.')
3045 elif is_marked_explicit and not onearg_constructor:
3046 if noarg_constructor:
3047 error(filename, linenum, 'runtime/explicit', 5,
3048 'Zero-parameter constructors should not be marked explicit.')
3051 def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
3052 """Checks for the correctness of various spacing around function calls.
3054 Args:
3055 filename: The name of the current file.
3056 clean_lines: A CleansedLines instance containing the file.
3057 linenum: The number of the line to check.
3058 error: The function to call with any errors found.
3060 line = clean_lines.elided[linenum]
3062 # Since function calls often occur inside if/for/while/switch
3063 # expressions - which have their own, more liberal conventions - we
3064 # first see if we should be looking inside such an expression for a
3065 # function call, to which we can apply more strict standards.
3066 fncall = line # if there's no control flow construct, look at whole line
3067 for pattern in (r'\bif\s*\((.*)\)\s*{',
3068 r'\bfor\s*\((.*)\)\s*{',
3069 r'\bwhile\s*\((.*)\)\s*[{;]',
3070 r'\bswitch\s*\((.*)\)\s*{'):
3071 match = Search(pattern, line)
3072 if match:
3073 fncall = match.group(1) # look inside the parens for function calls
3074 break
3076 # Except in if/for/while/switch, there should never be space
3077 # immediately inside parens (eg "f( 3, 4 )"). We make an exception
3078 # for nested parens ( (a+b) + c ). Likewise, there should never be
3079 # a space before a ( when it's a function argument. I assume it's a
3080 # function argument when the char before the whitespace is legal in
3081 # a function name (alnum + _) and we're not starting a macro. Also ignore
3082 # pointers and references to arrays and functions coz they're too tricky:
3083 # we use a very simple way to recognize these:
3084 # " (something)(maybe-something)" or
3085 # " (something)(maybe-something," or
3086 # " (something)[something]"
3087 # Note that we assume the contents of [] to be short enough that
3088 # they'll never need to wrap.
3089 if ( # Ignore control structures.
3090 not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
3091 fncall) and
3092 # Ignore pointers/references to functions.
3093 not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
3094 # Ignore pointers/references to arrays.
3095 not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
3096 if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
3097 error(filename, linenum, 'whitespace/parens', 4,
3098 'Extra space after ( in function call')
3099 elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
3100 error(filename, linenum, 'whitespace/parens', 2,
3101 'Extra space after (')
3102 if (Search(r'\w\s+\(', fncall) and
3103 not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
3104 not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
3105 not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
3106 not Search(r'\bcase\s+\(', fncall)):
3107 # TODO(unknown): Space after an operator function seem to be a common
3108 # error, silence those for now by restricting them to highest verbosity.
3109 if Search(r'\boperator_*\b', line):
3110 error(filename, linenum, 'whitespace/parens', 0,
3111 'Extra space before ( in function call')
3112 else:
3113 error(filename, linenum, 'whitespace/parens', 4,
3114 'Extra space before ( in function call')
3115 # If the ) is followed only by a newline or a { + newline, assume it's
3116 # part of a control statement (if/while/etc), and don't complain
3117 if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
3118 # If the closing parenthesis is preceded by only whitespaces,
3119 # try to give a more descriptive error message.
3120 if Search(r'^\s+\)', fncall):
3121 error(filename, linenum, 'whitespace/parens', 2,
3122 'Closing ) should be moved to the previous line')
3123 else:
3124 error(filename, linenum, 'whitespace/parens', 2,
3125 'Extra space before )')
3128 def IsBlankLine(line):
3129 """Returns true if the given line is blank.
3131 We consider a line to be blank if the line is empty or consists of
3132 only white spaces.
3134 Args:
3135 line: A line of a string.
3137 Returns:
3138 True, if the given line is blank.
3140 return not line or line.isspace()
3143 def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
3144 error):
3145 is_namespace_indent_item = (
3146 len(nesting_state.stack) > 1 and
3147 nesting_state.stack[-1].check_namespace_indentation and
3148 isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
3149 nesting_state.previous_stack_top == nesting_state.stack[-2])
3151 if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
3152 clean_lines.elided, line):
3153 CheckItemIndentationInNamespace(filename, clean_lines.elided,
3154 line, error)
3157 def CheckForFunctionLengths(filename, clean_lines, linenum,
3158 function_state, error):
3159 """Reports for long function bodies.
3161 For an overview why this is done, see:
3162 https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
3164 Uses a simplistic algorithm assuming other style guidelines
3165 (especially spacing) are followed.
3166 Only checks unindented functions, so class members are unchecked.
3167 Trivial bodies are unchecked, so constructors with huge initializer lists
3168 may be missed.
3169 Blank/comment lines are not counted so as to avoid encouraging the removal
3170 of vertical space and comments just to get through a lint check.
3171 NOLINT *on the last line of a function* disables this check.
3173 Args:
3174 filename: The name of the current file.
3175 clean_lines: A CleansedLines instance containing the file.
3176 linenum: The number of the line to check.
3177 function_state: Current function name and lines in body so far.
3178 error: The function to call with any errors found.
3180 lines = clean_lines.lines
3181 line = lines[linenum]
3182 joined_line = ''
3184 starting_func = False
3185 regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
3186 match_result = Match(regexp, line)
3187 if match_result:
3188 # If the name is all caps and underscores, figure it's a macro and
3189 # ignore it, unless it's TEST or TEST_F.
3190 function_name = match_result.group(1).split()[-1]
3191 if function_name == 'TEST' or function_name == 'TEST_F' or (
3192 not Match(r'[A-Z_]+$', function_name)):
3193 starting_func = True
3195 if starting_func:
3196 body_found = False
3197 for start_linenum in range(linenum, clean_lines.NumLines()):
3198 start_line = lines[start_linenum]
3199 joined_line += ' ' + start_line.lstrip()
3200 if Search(r'(;|})', start_line): # Declarations and trivial functions
3201 body_found = True
3202 break # ... ignore
3203 elif Search(r'{', start_line):
3204 body_found = True
3205 function = Search(r'((\w|:)*)\(', line).group(1)
3206 if Match(r'TEST', function): # Handle TEST... macros
3207 parameter_regexp = Search(r'(\(.*\))', joined_line)
3208 if parameter_regexp: # Ignore bad syntax
3209 function += parameter_regexp.group(1)
3210 else:
3211 function += '()'
3212 function_state.Begin(function)
3213 break
3214 if not body_found:
3215 # No body for the function (or evidence of a non-function) was found.
3216 error(filename, linenum, 'readability/fn_size', 5,
3217 'Lint failed to find start of function body.')
3218 elif Match(r'^\}\s*$', line): # function end
3219 function_state.Check(error, filename, linenum)
3220 function_state.End()
3221 elif not Match(r'^\s*$', line):
3222 function_state.Count() # Count non-blank/non-comment lines.
3225 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
3228 def CheckComment(line, filename, linenum, next_line_start, error):
3229 """Checks for common mistakes in comments.
3231 Args:
3232 line: The line in question.
3233 filename: The name of the current file.
3234 linenum: The number of the line to check.
3235 next_line_start: The first non-whitespace column of the next line.
3236 error: The function to call with any errors found.
3238 commentpos = line.find('//')
3239 if commentpos != -1:
3240 # Check if the // may be in quotes. If so, ignore it
3241 if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
3242 # Allow one space for new scopes, two spaces otherwise:
3243 if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
3244 ((commentpos >= 1 and
3245 line[commentpos-1] not in string.whitespace) or
3246 (commentpos >= 2 and
3247 line[commentpos-2] not in string.whitespace))):
3248 error(filename, linenum, 'whitespace/comments', 2,
3249 'At least two spaces is best between code and comments')
3251 # Checks for common mistakes in TODO comments.
3252 comment = line[commentpos:]
3253 match = _RE_PATTERN_TODO.match(comment)
3254 if match:
3255 # One whitespace is correct; zero whitespace is handled elsewhere.
3256 leading_whitespace = match.group(1)
3257 if len(leading_whitespace) > 1:
3258 error(filename, linenum, 'whitespace/todo', 2,
3259 'Too many spaces before TODO')
3261 username = match.group(2)
3262 if not username:
3263 error(filename, linenum, 'readability/todo', 2,
3264 'Missing username in TODO; it should look like '
3265 '"// TODO(my_username): Stuff."')
3267 middle_whitespace = match.group(3)
3268 # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
3269 if middle_whitespace != ' ' and middle_whitespace != '':
3270 error(filename, linenum, 'whitespace/todo', 2,
3271 'TODO(my_username) should be followed by a space')
3273 # If the comment contains an alphanumeric character, there
3274 # should be a space somewhere between it and the // unless
3275 # it's a /// or //! Doxygen comment.
3276 if (Match(r'//[^ ]*\w', comment) and
3277 not Match(r'(///|//\!)(\s+|$)', comment)):
3278 error(filename, linenum, 'whitespace/comments', 4,
3279 'Should have a space between // and comment')
3282 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
3283 """Checks for improper use of DISALLOW* macros.
3285 Args:
3286 filename: The name of the current file.
3287 clean_lines: A CleansedLines instance containing the file.
3288 linenum: The number of the line to check.
3289 nesting_state: A NestingState instance which maintains information about
3290 the current stack of nested blocks being parsed.
3291 error: The function to call with any errors found.
3293 line = clean_lines.elided[linenum] # get rid of comments and strings
3295 matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
3296 r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
3297 if not matched:
3298 return
3299 if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
3300 if nesting_state.stack[-1].access != 'private':
3301 error(filename, linenum, 'readability/constructors', 3,
3302 '%s must be in the private: section' % matched.group(1))
3304 else:
3305 # Found DISALLOW* macro outside a class declaration, or perhaps it
3306 # was used inside a function when it should have been part of the
3307 # class declaration. We could issue a warning here, but it
3308 # probably resulted in a compiler error already.
3309 pass
3312 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
3313 """Checks for the correctness of various spacing issues in the code.
3315 Things we check for: spaces around operators, spaces after
3316 if/for/while/switch, no spaces around parens in function calls, two
3317 spaces between code and comment, don't start a block with a blank
3318 line, don't end a function with a blank line, don't add a blank line
3319 after public/protected/private, don't have too many blank lines in a row.
3321 Args:
3322 filename: The name of the current file.
3323 clean_lines: A CleansedLines instance containing the file.
3324 linenum: The number of the line to check.
3325 nesting_state: A NestingState instance which maintains information about
3326 the current stack of nested blocks being parsed.
3327 error: The function to call with any errors found.
3330 # Don't use "elided" lines here, otherwise we can't check commented lines.
3331 # Don't want to use "raw" either, because we don't want to check inside C++11
3332 # raw strings,
3333 raw = clean_lines.lines_without_raw_strings
3334 line = raw[linenum]
3336 # Before nixing comments, check if the line is blank for no good
3337 # reason. This includes the first line after a block is opened, and
3338 # blank lines at the end of a function (ie, right before a line like '}'
3340 # Skip all the blank line checks if we are immediately inside a
3341 # namespace body. In other words, don't issue blank line warnings
3342 # for this block:
3343 # namespace {
3347 # A warning about missing end of namespace comments will be issued instead.
3349 # Also skip blank line checks for 'extern "C"' blocks, which are formatted
3350 # like namespaces.
3351 if (IsBlankLine(line) and
3352 not nesting_state.InNamespaceBody() and
3353 not nesting_state.InExternC()):
3354 elided = clean_lines.elided
3355 prev_line = elided[linenum - 1]
3356 prevbrace = prev_line.rfind('{')
3357 # TODO(unknown): Don't complain if line before blank line, and line after,
3358 # both start with alnums and are indented the same amount.
3359 # This ignores whitespace at the start of a namespace block
3360 # because those are not usually indented.
3361 if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
3362 # OK, we have a blank line at the start of a code block. Before we
3363 # complain, we check if it is an exception to the rule: The previous
3364 # non-empty line has the parameters of a function header that are indented
3365 # 4 spaces (because they did not fit in a 80 column line when placed on
3366 # the same line as the function name). We also check for the case where
3367 # the previous line is indented 6 spaces, which may happen when the
3368 # initializers of a constructor do not fit into a 80 column line.
3369 exception = False
3370 if Match(r' {6}\w', prev_line): # Initializer list?
3371 # We are looking for the opening column of initializer list, which
3372 # should be indented 4 spaces to cause 6 space indentation afterwards.
3373 search_position = linenum-2
3374 while (search_position >= 0
3375 and Match(r' {6}\w', elided[search_position])):
3376 search_position -= 1
3377 exception = (search_position >= 0
3378 and elided[search_position][:5] == ' :')
3379 else:
3380 # Search for the function arguments or an initializer list. We use a
3381 # simple heuristic here: If the line is indented 4 spaces; and we have a
3382 # closing paren, without the opening paren, followed by an opening brace
3383 # or colon (for initializer lists) we assume that it is the last line of
3384 # a function header. If we have a colon indented 4 spaces, it is an
3385 # initializer list.
3386 exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
3387 prev_line)
3388 or Match(r' {4}:', prev_line))
3390 if not exception:
3391 error(filename, linenum, 'whitespace/blank_line', 2,
3392 'Redundant blank line at the start of a code block '
3393 'should be deleted.')
3394 # Ignore blank lines at the end of a block in a long if-else
3395 # chain, like this:
3396 # if (condition1) {
3397 # // Something followed by a blank line
3399 # } else if (condition2) {
3400 # // Something else
3402 if linenum + 1 < clean_lines.NumLines():
3403 next_line = raw[linenum + 1]
3404 if (next_line
3405 and Match(r'\s*}', next_line)
3406 and next_line.find('} else ') == -1):
3407 error(filename, linenum, 'whitespace/blank_line', 3,
3408 'Redundant blank line at the end of a code block '
3409 'should be deleted.')
3411 matched = Match(r'\s*(public|protected|private):', prev_line)
3412 if matched:
3413 error(filename, linenum, 'whitespace/blank_line', 3,
3414 'Do not leave a blank line after "%s:"' % matched.group(1))
3416 # Next, check comments
3417 next_line_start = 0
3418 if linenum + 1 < clean_lines.NumLines():
3419 next_line = raw[linenum + 1]
3420 next_line_start = len(next_line) - len(next_line.lstrip())
3421 CheckComment(line, filename, linenum, next_line_start, error)
3423 # get rid of comments and strings
3424 line = clean_lines.elided[linenum]
3426 # You shouldn't have spaces before your brackets, except maybe after
3427 # 'delete []' or 'return []() {};'
3428 if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
3429 error(filename, linenum, 'whitespace/braces', 5,
3430 'Extra space before [')
3432 # In range-based for, we wanted spaces before and after the colon, but
3433 # not around "::" tokens that might appear.
3434 if (Search(r'for *\(.*[^:]:[^: ]', line) or
3435 Search(r'for *\(.*[^: ]:[^:]', line)):
3436 error(filename, linenum, 'whitespace/forcolon', 2,
3437 'Missing space around colon in range-based for loop')
3440 def CheckOperatorSpacing(filename, clean_lines, linenum, error):
3441 """Checks for horizontal spacing around operators.
3443 Args:
3444 filename: The name of the current file.
3445 clean_lines: A CleansedLines instance containing the file.
3446 linenum: The number of the line to check.
3447 error: The function to call with any errors found.
3449 line = clean_lines.elided[linenum]
3451 # Don't try to do spacing checks for operator methods. Do this by
3452 # replacing the troublesome characters with something else,
3453 # preserving column position for all other characters.
3455 # The replacement is done repeatedly to avoid false positives from
3456 # operators that call operators.
3457 while True:
3458 match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
3459 if match:
3460 line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
3461 else:
3462 break
3464 # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
3465 # Otherwise not. Note we only check for non-spaces on *both* sides;
3466 # sometimes people put non-spaces on one side when aligning ='s among
3467 # many lines (not that this is behavior that I approve of...)
3468 if ((Search(r'[\w.]=', line) or
3469 Search(r'=[\w.]', line))
3470 and not Search(r'\b(if|while|for) ', line)
3471 # Operators taken from [lex.operators] in C++11 standard.
3472 and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
3473 and not Search(r'operator=', line)):
3474 error(filename, linenum, 'whitespace/operators', 4,
3475 'Missing spaces around =')
3477 # It's ok not to have spaces around binary operators like + - * /, but if
3478 # there's too little whitespace, we get concerned. It's hard to tell,
3479 # though, so we punt on this one for now. TODO.
3481 # You should always have whitespace around binary operators.
3483 # Check <= and >= first to avoid false positives with < and >, then
3484 # check non-include lines for spacing around < and >.
3486 # If the operator is followed by a comma, assume it's be used in a
3487 # macro context and don't do any checks. This avoids false
3488 # positives.
3490 # Note that && is not included here. This is because there are too
3491 # many false positives due to RValue references.
3492 match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
3493 if match:
3494 error(filename, linenum, 'whitespace/operators', 3,
3495 'Missing spaces around %s' % match.group(1))
3496 elif not Match(r'#.*include', line):
3497 # Look for < that is not surrounded by spaces. This is only
3498 # triggered if both sides are missing spaces, even though
3499 # technically should should flag if at least one side is missing a
3500 # space. This is done to avoid some false positives with shifts.
3501 match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
3502 if match:
3503 (_, _, end_pos) = CloseExpression(
3504 clean_lines, linenum, len(match.group(1)))
3505 if end_pos <= -1:
3506 error(filename, linenum, 'whitespace/operators', 3,
3507 'Missing spaces around <')
3509 # Look for > that is not surrounded by spaces. Similar to the
3510 # above, we only trigger if both sides are missing spaces to avoid
3511 # false positives with shifts.
3512 match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
3513 if match:
3514 (_, _, start_pos) = ReverseCloseExpression(
3515 clean_lines, linenum, len(match.group(1)))
3516 if start_pos <= -1:
3517 error(filename, linenum, 'whitespace/operators', 3,
3518 'Missing spaces around >')
3520 # We allow no-spaces around << when used like this: 10<<20, but
3521 # not otherwise (particularly, not when used as streams)
3523 # We also allow operators following an opening parenthesis, since
3524 # those tend to be macros that deal with operators.
3525 match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
3526 if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
3527 not (match.group(1) == 'operator' and match.group(2) == ';')):
3528 error(filename, linenum, 'whitespace/operators', 3,
3529 'Missing spaces around <<')
3531 # We allow no-spaces around >> for almost anything. This is because
3532 # C++11 allows ">>" to close nested templates, which accounts for
3533 # most cases when ">>" is not followed by a space.
3535 # We still warn on ">>" followed by alpha character, because that is
3536 # likely due to ">>" being used for right shifts, e.g.:
3537 # value >> alpha
3539 # When ">>" is used to close templates, the alphanumeric letter that
3540 # follows would be part of an identifier, and there should still be
3541 # a space separating the template type and the identifier.
3542 # type<type<type>> alpha
3543 match = Search(r'>>[a-zA-Z_]', line)
3544 if match:
3545 error(filename, linenum, 'whitespace/operators', 3,
3546 'Missing spaces around >>')
3548 # There shouldn't be space around unary operators
3549 match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
3550 if match:
3551 error(filename, linenum, 'whitespace/operators', 4,
3552 'Extra space for operator %s' % match.group(1))
3555 def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
3556 """Checks for horizontal spacing around parentheses.
3558 Args:
3559 filename: The name of the current file.
3560 clean_lines: A CleansedLines instance containing the file.
3561 linenum: The number of the line to check.
3562 error: The function to call with any errors found.
3564 line = clean_lines.elided[linenum]
3566 # No spaces after an if, while, switch, or for
3567 match = Search(r' (if\(|for\(|while\(|switch\()', line)
3568 if match:
3569 error(filename, linenum, 'whitespace/parens', 5,
3570 'Missing space before ( in %s' % match.group(1))
3572 # For if/for/while/switch, the left and right parens should be
3573 # consistent about how many spaces are inside the parens, and
3574 # there should either be zero or one spaces inside the parens.
3575 # We don't want: "if ( foo)" or "if ( foo )".
3576 # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
3577 match = Search(r'\b(if|for|while|switch)\s*'
3578 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
3579 line)
3580 if match:
3581 if len(match.group(2)) != len(match.group(4)):
3582 if not (match.group(3) == ';' and
3583 len(match.group(2)) == 1 + len(match.group(4)) or
3584 not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
3585 error(filename, linenum, 'whitespace/parens', 5,
3586 'Mismatching spaces inside () in %s' % match.group(1))
3587 if len(match.group(2)) not in [0, 1]:
3588 error(filename, linenum, 'whitespace/parens', 5,
3589 'Should have zero or one spaces inside ( and ) in %s' %
3590 match.group(1))
3593 def CheckCommaSpacing(filename, clean_lines, linenum, error):
3594 """Checks for horizontal spacing near commas and semicolons.
3596 Args:
3597 filename: The name of the current file.
3598 clean_lines: A CleansedLines instance containing the file.
3599 linenum: The number of the line to check.
3600 error: The function to call with any errors found.
3602 raw = clean_lines.lines_without_raw_strings
3603 line = clean_lines.elided[linenum]
3605 # You should always have a space after a comma (either as fn arg or operator)
3607 # This does not apply when the non-space character following the
3608 # comma is another comma, since the only time when that happens is
3609 # for empty macro arguments.
3611 # We run this check in two passes: first pass on elided lines to
3612 # verify that lines contain missing whitespaces, second pass on raw
3613 # lines to confirm that those missing whitespaces are not due to
3614 # elided comments.
3615 if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
3616 Search(r',[^,\s]', raw[linenum])):
3617 error(filename, linenum, 'whitespace/comma', 3,
3618 'Missing space after ,')
3620 # You should always have a space after a semicolon
3621 # except for few corner cases
3622 # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
3623 # space after ;
3624 if Search(r';[^\s};\\)/]', line):
3625 error(filename, linenum, 'whitespace/semicolon', 3,
3626 'Missing space after ;')
3629 def _IsType(clean_lines, nesting_state, expr):
3630 """Check if expression looks like a type name, returns true if so.
3632 Args:
3633 clean_lines: A CleansedLines instance containing the file.
3634 nesting_state: A NestingState instance which maintains information about
3635 the current stack of nested blocks being parsed.
3636 expr: The expression to check.
3637 Returns:
3638 True, if token looks like a type.
3640 # Keep only the last token in the expression
3641 last_word = Match(r'^.*(\b\S+)$', expr)
3642 if last_word:
3643 token = last_word.group(1)
3644 else:
3645 token = expr
3647 # Match native types and stdint types
3648 if _TYPES.match(token):
3649 return True
3651 # Try a bit harder to match templated types. Walk up the nesting
3652 # stack until we find something that resembles a typename
3653 # declaration for what we are looking for.
3654 typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
3655 r'\b')
3656 block_index = len(nesting_state.stack) - 1
3657 while block_index >= 0:
3658 if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
3659 return False
3661 # Found where the opening brace is. We want to scan from this
3662 # line up to the beginning of the function, minus a few lines.
3663 # template <typename Type1, // stop scanning here
3664 # ...>
3665 # class C
3666 # : public ... { // start scanning here
3667 last_line = nesting_state.stack[block_index].starting_linenum
3669 next_block_start = 0
3670 if block_index > 0:
3671 next_block_start = nesting_state.stack[block_index - 1].starting_linenum
3672 first_line = last_line
3673 while first_line >= next_block_start:
3674 if clean_lines.elided[first_line].find('template') >= 0:
3675 break
3676 first_line -= 1
3677 if first_line < next_block_start:
3678 # Didn't find any "template" keyword before reaching the next block,
3679 # there are probably no template things to check for this block
3680 block_index -= 1
3681 continue
3683 # Look for typename in the specified range
3684 for i in xrange(first_line, last_line + 1, 1):
3685 if Search(typename_pattern, clean_lines.elided[i]):
3686 return True
3687 block_index -= 1
3689 return False
3692 def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
3693 """Checks for horizontal spacing near commas.
3695 Args:
3696 filename: The name of the current file.
3697 clean_lines: A CleansedLines instance containing the file.
3698 linenum: The number of the line to check.
3699 nesting_state: A NestingState instance which maintains information about
3700 the current stack of nested blocks being parsed.
3701 error: The function to call with any errors found.
3703 line = clean_lines.elided[linenum]
3705 # Except after an opening paren, or after another opening brace (in case of
3706 # an initializer list, for instance), you should have spaces before your
3707 # braces when they are delimiting blocks, classes, namespaces etc.
3708 # And since you should never have braces at the beginning of a line,
3709 # this is an easy test. Except that braces used for initialization don't
3710 # follow the same rule; we often don't want spaces before those.
3711 match = Match(r'^(.*[^ ({>]){', line)
3713 if match:
3714 # Try a bit harder to check for brace initialization. This
3715 # happens in one of the following forms:
3716 # Constructor() : initializer_list_{} { ... }
3717 # Constructor{}.MemberFunction()
3718 # Type variable{};
3719 # FunctionCall(type{}, ...);
3720 # LastArgument(..., type{});
3721 # LOG(INFO) << type{} << " ...";
3722 # map_of_type[{...}] = ...;
3723 # ternary = expr ? new type{} : nullptr;
3724 # OuterTemplate<InnerTemplateConstructor<Type>{}>
3726 # We check for the character following the closing brace, and
3727 # silence the warning if it's one of those listed above, i.e.
3728 # "{.;,)<>]:".
3730 # To account for nested initializer list, we allow any number of
3731 # closing braces up to "{;,)<". We can't simply silence the
3732 # warning on first sight of closing brace, because that would
3733 # cause false negatives for things that are not initializer lists.
3734 # Silence this: But not this:
3735 # Outer{ if (...) {
3736 # Inner{...} if (...){ // Missing space before {
3737 # }; }
3739 # There is a false negative with this approach if people inserted
3740 # spurious semicolons, e.g. "if (cond){};", but we will catch the
3741 # spurious semicolon with a separate check.
3742 leading_text = match.group(1)
3743 (endline, endlinenum, endpos) = CloseExpression(
3744 clean_lines, linenum, len(match.group(1)))
3745 trailing_text = ''
3746 if endpos > -1:
3747 trailing_text = endline[endpos:]
3748 for offset in xrange(endlinenum + 1,
3749 min(endlinenum + 3, clean_lines.NumLines() - 1)):
3750 trailing_text += clean_lines.elided[offset]
3751 # We also suppress warnings for `uint64_t{expression}` etc., as the style
3752 # guide recommends brace initialization for integral types to avoid
3753 # overflow/truncation.
3754 if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
3755 and not _IsType(clean_lines, nesting_state, leading_text)):
3756 error(filename, linenum, 'whitespace/braces', 5,
3757 'Missing space before {')
3759 # Make sure '} else {' has spaces.
3760 if Search(r'}else', line):
3761 error(filename, linenum, 'whitespace/braces', 5,
3762 'Missing space before else')
3764 # You shouldn't have a space before a semicolon at the end of the line.
3765 # There's a special case for "for" since the style guide allows space before
3766 # the semicolon there.
3767 if Search(r':\s*;\s*$', line):
3768 error(filename, linenum, 'whitespace/semicolon', 5,
3769 'Semicolon defining empty statement. Use {} instead.')
3770 elif Search(r'^\s*;\s*$', line):
3771 error(filename, linenum, 'whitespace/semicolon', 5,
3772 'Line contains only semicolon. If this should be an empty statement, '
3773 'use {} instead.')
3774 elif (Search(r'\s+;\s*$', line) and
3775 not Search(r'\bfor\b', line)):
3776 error(filename, linenum, 'whitespace/semicolon', 5,
3777 'Extra space before last semicolon. If this should be an empty '
3778 'statement, use {} instead.')
3781 def IsDecltype(clean_lines, linenum, column):
3782 """Check if the token ending on (linenum, column) is decltype().
3784 Args:
3785 clean_lines: A CleansedLines instance containing the file.
3786 linenum: the number of the line to check.
3787 column: end column of the token to check.
3788 Returns:
3789 True if this token is decltype() expression, False otherwise.
3791 (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
3792 if start_col < 0:
3793 return False
3794 if Search(r'\bdecltype\s*$', text[0:start_col]):
3795 return True
3796 return False
3798 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
3799 """Checks for additional blank line issues related to sections.
3801 Currently the only thing checked here is blank line before protected/private.
3803 Args:
3804 filename: The name of the current file.
3805 clean_lines: A CleansedLines instance containing the file.
3806 class_info: A _ClassInfo objects.
3807 linenum: The number of the line to check.
3808 error: The function to call with any errors found.
3810 # Skip checks if the class is small, where small means 25 lines or less.
3811 # 25 lines seems like a good cutoff since that's the usual height of
3812 # terminals, and any class that can't fit in one screen can't really
3813 # be considered "small".
3815 # Also skip checks if we are on the first line. This accounts for
3816 # classes that look like
3817 # class Foo { public: ... };
3819 # If we didn't find the end of the class, last_line would be zero,
3820 # and the check will be skipped by the first condition.
3821 if (class_info.last_line - class_info.starting_linenum <= 24 or
3822 linenum <= class_info.starting_linenum):
3823 return
3825 matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
3826 if matched:
3827 # Issue warning if the line before public/protected/private was
3828 # not a blank line, but don't do this if the previous line contains
3829 # "class" or "struct". This can happen two ways:
3830 # - We are at the beginning of the class.
3831 # - We are forward-declaring an inner class that is semantically
3832 # private, but needed to be public for implementation reasons.
3833 # Also ignores cases where the previous line ends with a backslash as can be
3834 # common when defining classes in C macros.
3835 prev_line = clean_lines.lines[linenum - 1]
3836 if (not IsBlankLine(prev_line) and
3837 not Search(r'\b(class|struct)\b', prev_line) and
3838 not Search(r'\\$', prev_line)):
3839 # Try a bit harder to find the beginning of the class. This is to
3840 # account for multi-line base-specifier lists, e.g.:
3841 # class Derived
3842 # : public Base {
3843 end_class_head = class_info.starting_linenum
3844 for i in range(class_info.starting_linenum, linenum):
3845 if Search(r'\{\s*$', clean_lines.lines[i]):
3846 end_class_head = i
3847 break
3848 if end_class_head < linenum - 1:
3849 error(filename, linenum, 'whitespace/blank_line', 3,
3850 '"%s:" should be preceded by a blank line' % matched.group(1))
3853 def GetPreviousNonBlankLine(clean_lines, linenum):
3854 """Return the most recent non-blank line and its line number.
3856 Args:
3857 clean_lines: A CleansedLines instance containing the file contents.
3858 linenum: The number of the line to check.
3860 Returns:
3861 A tuple with two elements. The first element is the contents of the last
3862 non-blank line before the current line, or the empty string if this is the
3863 first non-blank line. The second is the line number of that line, or -1
3864 if this is the first non-blank line.
3867 prevlinenum = linenum - 1
3868 while prevlinenum >= 0:
3869 prevline = clean_lines.elided[prevlinenum]
3870 if not IsBlankLine(prevline): # if not a blank line...
3871 return (prevline, prevlinenum)
3872 prevlinenum -= 1
3873 return ('', -1)
3876 def CheckBraces(filename, clean_lines, linenum, error):
3877 """Looks for misplaced braces (e.g. at the end of line).
3879 Args:
3880 filename: The name of the current file.
3881 clean_lines: A CleansedLines instance containing the file.
3882 linenum: The number of the line to check.
3883 error: The function to call with any errors found.
3886 line = clean_lines.elided[linenum] # get rid of comments and strings
3888 if Match(r'\s*{\s*$', line):
3889 # We allow an open brace to start a line in the case where someone is using
3890 # braces in a block to explicitly create a new scope, which is commonly used
3891 # to control the lifetime of stack-allocated variables. Braces are also
3892 # used for brace initializers inside function calls. We don't detect this
3893 # perfectly: we just don't complain if the last non-whitespace character on
3894 # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
3895 # previous line starts a preprocessor block. We also allow a brace on the
3896 # following line if it is part of an array initialization and would not fit
3897 # within the 80 character limit of the preceding line.
3898 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3899 if (not Search(r'[,;:}{(]\s*$', prevline) and
3900 not Match(r'\s*#', prevline) and
3901 not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
3902 error(filename, linenum, 'whitespace/braces', 4,
3903 '{ should almost always be at the end of the previous line')
3905 # An else clause should be on the same line as the preceding closing brace.
3906 if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
3907 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3908 if Match(r'\s*}\s*$', prevline):
3909 error(filename, linenum, 'whitespace/newline', 4,
3910 'An else should appear on the same line as the preceding }')
3912 # If braces come on one side of an else, they should be on both.
3913 # However, we have to worry about "else if" that spans multiple lines!
3914 if Search(r'else if\s*\(', line): # could be multi-line if
3915 brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
3916 # find the ( after the if
3917 pos = line.find('else if')
3918 pos = line.find('(', pos)
3919 if pos > 0:
3920 (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
3921 brace_on_right = endline[endpos:].find('{') != -1
3922 if brace_on_left != brace_on_right: # must be brace after if
3923 error(filename, linenum, 'readability/braces', 5,
3924 'If an else has a brace on one side, it should have it on both')
3925 elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
3926 error(filename, linenum, 'readability/braces', 5,
3927 'If an else has a brace on one side, it should have it on both')
3929 # Likewise, an else should never have the else clause on the same line
3930 if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
3931 error(filename, linenum, 'whitespace/newline', 4,
3932 'Else clause should never be on same line as else (use 2 lines)')
3934 # In the same way, a do/while should never be on one line
3935 if Match(r'\s*do [^\s{]', line):
3936 error(filename, linenum, 'whitespace/newline', 4,
3937 'do/while clauses should not be on a single line')
3939 # Check single-line if/else bodies. The style guide says 'curly braces are not
3940 # required for single-line statements'. We additionally allow multi-line,
3941 # single statements, but we reject anything with more than one semicolon in
3942 # it. This means that the first semicolon after the if should be at the end of
3943 # its line, and the line after that should have an indent level equal to or
3944 # lower than the if. We also check for ambiguous if/else nesting without
3945 # braces.
3946 if_else_match = Search(r'\b(if\s*\(|else\b)', line)
3947 if if_else_match and not Match(r'\s*#', line):
3948 if_indent = GetIndentLevel(line)
3949 endline, endlinenum, endpos = line, linenum, if_else_match.end()
3950 if_match = Search(r'\bif\s*\(', line)
3951 if if_match:
3952 # This could be a multiline if condition, so find the end first.
3953 pos = if_match.end() - 1
3954 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
3955 # Check for an opening brace, either directly after the if or on the next
3956 # line. If found, this isn't a single-statement conditional.
3957 if (not Match(r'\s*{', endline[endpos:])
3958 and not (Match(r'\s*$', endline[endpos:])
3959 and endlinenum < (len(clean_lines.elided) - 1)
3960 and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
3961 while (endlinenum < len(clean_lines.elided)
3962 and ';' not in clean_lines.elided[endlinenum][endpos:]):
3963 endlinenum += 1
3964 endpos = 0
3965 if endlinenum < len(clean_lines.elided):
3966 endline = clean_lines.elided[endlinenum]
3967 # We allow a mix of whitespace and closing braces (e.g. for one-liner
3968 # methods) and a single \ after the semicolon (for macros)
3969 endpos = endline.find(';')
3970 if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
3971 # Semicolon isn't the last character, there's something trailing.
3972 # Output a warning if the semicolon is not contained inside
3973 # a lambda expression.
3974 if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
3975 endline):
3976 error(filename, linenum, 'readability/braces', 4,
3977 'If/else bodies with multiple statements require braces')
3978 elif endlinenum < len(clean_lines.elided) - 1:
3979 # Make sure the next line is dedented
3980 next_line = clean_lines.elided[endlinenum + 1]
3981 next_indent = GetIndentLevel(next_line)
3982 # With ambiguous nested if statements, this will error out on the
3983 # if that *doesn't* match the else, regardless of whether it's the
3984 # inner one or outer one.
3985 if (if_match and Match(r'\s*else\b', next_line)
3986 and next_indent != if_indent):
3987 error(filename, linenum, 'readability/braces', 4,
3988 'Else clause should be indented at the same level as if. '
3989 'Ambiguous nested if/else chains require braces.')
3990 elif next_indent > if_indent:
3991 error(filename, linenum, 'readability/braces', 4,
3992 'If/else bodies with multiple statements require braces')
3995 def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
3996 """Looks for redundant trailing semicolon.
3998 Args:
3999 filename: The name of the current file.
4000 clean_lines: A CleansedLines instance containing the file.
4001 linenum: The number of the line to check.
4002 error: The function to call with any errors found.
4005 line = clean_lines.elided[linenum]
4007 # Block bodies should not be followed by a semicolon. Due to C++11
4008 # brace initialization, there are more places where semicolons are
4009 # required than not, so we use a whitelist approach to check these
4010 # rather than a blacklist. These are the places where "};" should
4011 # be replaced by just "}":
4012 # 1. Some flavor of block following closing parenthesis:
4013 # for (;;) {};
4014 # while (...) {};
4015 # switch (...) {};
4016 # Function(...) {};
4017 # if (...) {};
4018 # if (...) else if (...) {};
4020 # 2. else block:
4021 # if (...) else {};
4023 # 3. const member function:
4024 # Function(...) const {};
4026 # 4. Block following some statement:
4027 # x = 42;
4028 # {};
4030 # 5. Block at the beginning of a function:
4031 # Function(...) {
4032 # {};
4035 # Note that naively checking for the preceding "{" will also match
4036 # braces inside multi-dimensional arrays, but this is fine since
4037 # that expression will not contain semicolons.
4039 # 6. Block following another block:
4040 # while (true) {}
4041 # {};
4043 # 7. End of namespaces:
4044 # namespace {};
4046 # These semicolons seems far more common than other kinds of
4047 # redundant semicolons, possibly due to people converting classes
4048 # to namespaces. For now we do not warn for this case.
4050 # Try matching case 1 first.
4051 match = Match(r'^(.*\)\s*)\{', line)
4052 if match:
4053 # Matched closing parenthesis (case 1). Check the token before the
4054 # matching opening parenthesis, and don't warn if it looks like a
4055 # macro. This avoids these false positives:
4056 # - macro that defines a base class
4057 # - multi-line macro that defines a base class
4058 # - macro that defines the whole class-head
4060 # But we still issue warnings for macros that we know are safe to
4061 # warn, specifically:
4062 # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
4063 # - TYPED_TEST
4064 # - INTERFACE_DEF
4065 # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
4067 # We implement a whitelist of safe macros instead of a blacklist of
4068 # unsafe macros, even though the latter appears less frequently in
4069 # google code and would have been easier to implement. This is because
4070 # the downside for getting the whitelist wrong means some extra
4071 # semicolons, while the downside for getting the blacklist wrong
4072 # would result in compile errors.
4074 # In addition to macros, we also don't want to warn on
4075 # - Compound literals
4076 # - Lambdas
4077 # - alignas specifier with anonymous structs
4078 # - decltype
4079 closing_brace_pos = match.group(1).rfind(')')
4080 opening_parenthesis = ReverseCloseExpression(
4081 clean_lines, linenum, closing_brace_pos)
4082 if opening_parenthesis[2] > -1:
4083 line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
4084 macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
4085 func = Match(r'^(.*\])\s*$', line_prefix)
4086 if ((macro and
4087 macro.group(1) not in (
4088 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
4089 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
4090 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
4091 (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
4092 Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
4093 Search(r'\bdecltype$', line_prefix) or
4094 Search(r'\breturn\s*$', line_prefix) or
4095 Search(r'\s+=\s*$', line_prefix)):
4096 match = None
4097 if (match and
4098 opening_parenthesis[1] > 1 and
4099 Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
4100 # Multi-line lambda-expression
4101 match = None
4103 else:
4104 # Try matching cases 2-3.
4105 match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
4106 if not match:
4107 # Try matching cases 4-6. These are always matched on separate lines.
4109 # Note that we can't simply concatenate the previous line to the
4110 # current line and do a single match, otherwise we may output
4111 # duplicate warnings for the blank line case:
4112 # if (cond) {
4113 # // blank line
4115 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4116 if prevline and Search(r'[;{}]\s*$', prevline):
4117 match = Match(r'^(\s*)\{', line)
4119 # Check matching closing brace
4120 if match:
4121 (endline, endlinenum, endpos) = CloseExpression(
4122 clean_lines, linenum, len(match.group(1)))
4123 if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
4124 # Current {} pair is eligible for semicolon check, and we have found
4125 # the redundant semicolon, output warning here.
4127 # Note: because we are scanning forward for opening braces, and
4128 # outputting warnings for the matching closing brace, if there are
4129 # nested blocks with trailing semicolons, we will get the error
4130 # messages in reversed order.
4132 # We need to check the line forward for NOLINT
4133 raw_lines = clean_lines.raw_lines
4134 ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
4135 error)
4136 ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
4137 error)
4139 error(filename, endlinenum, 'readability/braces', 4,
4140 "You don't need a ; after a }")
4143 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
4144 """Look for empty loop/conditional body with only a single semicolon.
4146 Args:
4147 filename: The name of the current file.
4148 clean_lines: A CleansedLines instance containing the file.
4149 linenum: The number of the line to check.
4150 error: The function to call with any errors found.
4153 # Search for loop keywords at the beginning of the line. Because only
4154 # whitespaces are allowed before the keywords, this will also ignore most
4155 # do-while-loops, since those lines should start with closing brace.
4157 # We also check "if" blocks here, since an empty conditional block
4158 # is likely an error.
4159 line = clean_lines.elided[linenum]
4160 matched = Match(r'\s*(for|while|if)\s*\(', line)
4161 if matched:
4162 # Find the end of the conditional expression.
4163 (end_line, end_linenum, end_pos) = CloseExpression(
4164 clean_lines, linenum, line.find('('))
4166 # Output warning if what follows the condition expression is a semicolon.
4167 # No warning for all other cases, including whitespace or newline, since we
4168 # have a separate check for semicolons preceded by whitespace.
4169 if end_pos >= 0 and Match(r';', end_line[end_pos:]):
4170 if matched.group(1) == 'if':
4171 error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
4172 'Empty conditional bodies should use {}')
4173 else:
4174 error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
4175 'Empty loop bodies should use {} or continue')
4177 # Check for if statements that have completely empty bodies (no comments)
4178 # and no else clauses.
4179 if end_pos >= 0 and matched.group(1) == 'if':
4180 # Find the position of the opening { for the if statement.
4181 # Return without logging an error if it has no brackets.
4182 opening_linenum = end_linenum
4183 opening_line_fragment = end_line[end_pos:]
4184 # Loop until EOF or find anything that's not whitespace or opening {.
4185 while not Search(r'^\s*\{', opening_line_fragment):
4186 if Search(r'^(?!\s*$)', opening_line_fragment):
4187 # Conditional has no brackets.
4188 return
4189 opening_linenum += 1
4190 if opening_linenum == len(clean_lines.elided):
4191 # Couldn't find conditional's opening { or any code before EOF.
4192 return
4193 opening_line_fragment = clean_lines.elided[opening_linenum]
4194 # Set opening_line (opening_line_fragment may not be entire opening line).
4195 opening_line = clean_lines.elided[opening_linenum]
4197 # Find the position of the closing }.
4198 opening_pos = opening_line_fragment.find('{')
4199 if opening_linenum == end_linenum:
4200 # We need to make opening_pos relative to the start of the entire line.
4201 opening_pos += end_pos
4202 (closing_line, closing_linenum, closing_pos) = CloseExpression(
4203 clean_lines, opening_linenum, opening_pos)
4204 if closing_pos < 0:
4205 return
4207 # Now construct the body of the conditional. This consists of the portion
4208 # of the opening line after the {, all lines until the closing line,
4209 # and the portion of the closing line before the }.
4210 if (clean_lines.raw_lines[opening_linenum] !=
4211 CleanseComments(clean_lines.raw_lines[opening_linenum])):
4212 # Opening line ends with a comment, so conditional isn't empty.
4213 return
4214 if closing_linenum > opening_linenum:
4215 # Opening line after the {. Ignore comments here since we checked above.
4216 bodylist = list(opening_line[opening_pos+1:])
4217 # All lines until closing line, excluding closing line, with comments.
4218 bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
4219 # Closing line before the }. Won't (and can't) have comments.
4220 bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
4221 body = '\n'.join(bodylist)
4222 else:
4223 # If statement has brackets and fits on a single line.
4224 body = opening_line[opening_pos+1:closing_pos-1]
4226 # Check if the body is empty
4227 if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
4228 return
4229 # The body is empty. Now make sure there's not an else clause.
4230 current_linenum = closing_linenum
4231 current_line_fragment = closing_line[closing_pos:]
4232 # Loop until EOF or find anything that's not whitespace or else clause.
4233 while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
4234 if Search(r'^(?=\s*else)', current_line_fragment):
4235 # Found an else clause, so don't log an error.
4236 return
4237 current_linenum += 1
4238 if current_linenum == len(clean_lines.elided):
4239 break
4240 current_line_fragment = clean_lines.elided[current_linenum]
4242 # The body is empty and there's no else clause until EOF or other code.
4243 error(filename, end_linenum, 'whitespace/empty_if_body', 4,
4244 ('If statement had no body and no else clause'))
4247 def FindCheckMacro(line):
4248 """Find a replaceable CHECK-like macro.
4250 Args:
4251 line: line to search on.
4252 Returns:
4253 (macro name, start position), or (None, -1) if no replaceable
4254 macro is found.
4256 for macro in _CHECK_MACROS:
4257 i = line.find(macro)
4258 if i >= 0:
4259 # Find opening parenthesis. Do a regular expression match here
4260 # to make sure that we are matching the expected CHECK macro, as
4261 # opposed to some other macro that happens to contain the CHECK
4262 # substring.
4263 matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
4264 if not matched:
4265 continue
4266 return (macro, len(matched.group(1)))
4267 return (None, -1)
4270 def CheckCheck(filename, clean_lines, linenum, error):
4271 """Checks the use of CHECK and EXPECT macros.
4273 Args:
4274 filename: The name of the current file.
4275 clean_lines: A CleansedLines instance containing the file.
4276 linenum: The number of the line to check.
4277 error: The function to call with any errors found.
4280 # Decide the set of replacement macros that should be suggested
4281 lines = clean_lines.elided
4282 (check_macro, start_pos) = FindCheckMacro(lines[linenum])
4283 if not check_macro:
4284 return
4286 # Find end of the boolean expression by matching parentheses
4287 (last_line, end_line, end_pos) = CloseExpression(
4288 clean_lines, linenum, start_pos)
4289 if end_pos < 0:
4290 return
4292 # If the check macro is followed by something other than a
4293 # semicolon, assume users will log their own custom error messages
4294 # and don't suggest any replacements.
4295 if not Match(r'\s*;', last_line[end_pos:]):
4296 return
4298 if linenum == end_line:
4299 expression = lines[linenum][start_pos + 1:end_pos - 1]
4300 else:
4301 expression = lines[linenum][start_pos + 1:]
4302 for i in xrange(linenum + 1, end_line):
4303 expression += lines[i]
4304 expression += last_line[0:end_pos - 1]
4306 # Parse expression so that we can take parentheses into account.
4307 # This avoids false positives for inputs like "CHECK((a < 4) == b)",
4308 # which is not replaceable by CHECK_LE.
4309 lhs = ''
4310 rhs = ''
4311 operator = None
4312 while expression:
4313 matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
4314 r'==|!=|>=|>|<=|<|\()(.*)$', expression)
4315 if matched:
4316 token = matched.group(1)
4317 if token == '(':
4318 # Parenthesized operand
4319 expression = matched.group(2)
4320 (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
4321 if end < 0:
4322 return # Unmatched parenthesis
4323 lhs += '(' + expression[0:end]
4324 expression = expression[end:]
4325 elif token in ('&&', '||'):
4326 # Logical and/or operators. This means the expression
4327 # contains more than one term, for example:
4328 # CHECK(42 < a && a < b);
4330 # These are not replaceable with CHECK_LE, so bail out early.
4331 return
4332 elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
4333 # Non-relational operator
4334 lhs += token
4335 expression = matched.group(2)
4336 else:
4337 # Relational operator
4338 operator = token
4339 rhs = matched.group(2)
4340 break
4341 else:
4342 # Unparenthesized operand. Instead of appending to lhs one character
4343 # at a time, we do another regular expression match to consume several
4344 # characters at once if possible. Trivial benchmark shows that this
4345 # is more efficient when the operands are longer than a single
4346 # character, which is generally the case.
4347 matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
4348 if not matched:
4349 matched = Match(r'^(\s*\S)(.*)$', expression)
4350 if not matched:
4351 break
4352 lhs += matched.group(1)
4353 expression = matched.group(2)
4355 # Only apply checks if we got all parts of the boolean expression
4356 if not (lhs and operator and rhs):
4357 return
4359 # Check that rhs do not contain logical operators. We already know
4360 # that lhs is fine since the loop above parses out && and ||.
4361 if rhs.find('&&') > -1 or rhs.find('||') > -1:
4362 return
4364 # At least one of the operands must be a constant literal. This is
4365 # to avoid suggesting replacements for unprintable things like
4366 # CHECK(variable != iterator)
4368 # The following pattern matches decimal, hex integers, strings, and
4369 # characters (in that order).
4370 lhs = lhs.strip()
4371 rhs = rhs.strip()
4372 match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
4373 if Match(match_constant, lhs) or Match(match_constant, rhs):
4374 # Note: since we know both lhs and rhs, we can provide a more
4375 # descriptive error message like:
4376 # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
4377 # Instead of:
4378 # Consider using CHECK_EQ instead of CHECK(a == b)
4380 # We are still keeping the less descriptive message because if lhs
4381 # or rhs gets long, the error message might become unreadable.
4382 error(filename, linenum, 'readability/check', 2,
4383 'Consider using %s instead of %s(a %s b)' % (
4384 _CHECK_REPLACEMENT[check_macro][operator],
4385 check_macro, operator))
4388 def CheckAltTokens(filename, clean_lines, linenum, error):
4389 """Check alternative keywords being used in boolean expressions.
4391 Args:
4392 filename: The name of the current file.
4393 clean_lines: A CleansedLines instance containing the file.
4394 linenum: The number of the line to check.
4395 error: The function to call with any errors found.
4397 line = clean_lines.elided[linenum]
4399 # Avoid preprocessor lines
4400 if Match(r'^\s*#', line):
4401 return
4403 # Last ditch effort to avoid multi-line comments. This will not help
4404 # if the comment started before the current line or ended after the
4405 # current line, but it catches most of the false positives. At least,
4406 # it provides a way to workaround this warning for people who use
4407 # multi-line comments in preprocessor macros.
4409 # TODO(unknown): remove this once cpplint has better support for
4410 # multi-line comments.
4411 if line.find('/*') >= 0 or line.find('*/') >= 0:
4412 return
4414 for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
4415 error(filename, linenum, 'readability/alt_tokens', 2,
4416 'Use operator %s instead of %s' % (
4417 _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
4420 def GetLineWidth(line):
4421 """Determines the width of the line in column positions.
4423 Args:
4424 line: A string, which may be a Unicode string.
4426 Returns:
4427 The width of the line in column positions, accounting for Unicode
4428 combining characters and wide characters.
4430 if isinstance(line, unicode):
4431 width = 0
4432 for uc in unicodedata.normalize('NFC', line):
4433 if unicodedata.east_asian_width(uc) in ('W', 'F'):
4434 width += 2
4435 elif not unicodedata.combining(uc):
4436 width += 1
4437 return width
4438 else:
4439 return len(line)
4442 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
4443 error):
4444 """Checks rules from the 'C++ style rules' section of cppguide.html.
4446 Most of these rules are hard to test (naming, comment style), but we
4447 do what we can. In particular we check for 2-space indents, line lengths,
4448 tab usage, spaces inside code, etc.
4450 Args:
4451 filename: The name of the current file.
4452 clean_lines: A CleansedLines instance containing the file.
4453 linenum: The number of the line to check.
4454 file_extension: The extension (without the dot) of the filename.
4455 nesting_state: A NestingState instance which maintains information about
4456 the current stack of nested blocks being parsed.
4457 error: The function to call with any errors found.
4460 # Don't use "elided" lines here, otherwise we can't check commented lines.
4461 # Don't want to use "raw" either, because we don't want to check inside C++11
4462 # raw strings,
4463 raw_lines = clean_lines.lines_without_raw_strings
4464 line = raw_lines[linenum]
4465 prev = raw_lines[linenum - 1] if linenum > 0 else ''
4467 if line.find('\t') != -1:
4468 error(filename, linenum, 'whitespace/tab', 1,
4469 'Tab found; better to use spaces')
4471 # One or three blank spaces at the beginning of the line is weird; it's
4472 # hard to reconcile that with 2-space indents.
4473 # NOTE: here are the conditions rob pike used for his tests. Mine aren't
4474 # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
4475 # if(RLENGTH > 20) complain = 0;
4476 # if(match($0, " +(error|private|public|protected):")) complain = 0;
4477 # if(match(prev, "&& *$")) complain = 0;
4478 # if(match(prev, "\\|\\| *$")) complain = 0;
4479 # if(match(prev, "[\",=><] *$")) complain = 0;
4480 # if(match($0, " <<")) complain = 0;
4481 # if(match(prev, " +for \\(")) complain = 0;
4482 # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
4483 scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
4484 classinfo = nesting_state.InnermostClass()
4485 initial_spaces = 0
4486 cleansed_line = clean_lines.elided[linenum]
4487 while initial_spaces < len(line) and line[initial_spaces] == ' ':
4488 initial_spaces += 1
4489 # There are certain situations we allow one space, notably for
4490 # section labels, and also lines containing multi-line raw strings.
4491 # We also don't check for lines that look like continuation lines
4492 # (of lines ending in double quotes, commas, equals, or angle brackets)
4493 # because the rules for how to indent those are non-trivial.
4494 if (not Search(r'[",=><] *$', prev) and
4495 (initial_spaces == 1 or initial_spaces == 3) and
4496 not Match(scope_or_label_pattern, cleansed_line) and
4497 not (clean_lines.raw_lines[linenum] != line and
4498 Match(r'^\s*""', line))):
4499 error(filename, linenum, 'whitespace/indent', 3,
4500 'Weird number of spaces at line-start. '
4501 'Are you using a 2-space indent?')
4503 if line and line[-1].isspace():
4504 error(filename, linenum, 'whitespace/end_of_line', 4,
4505 'Line ends in whitespace. Consider deleting these extra spaces.')
4507 # Check if the line is a header guard.
4508 is_header_guard = False
4509 if file_extension in GetHeaderExtensions():
4510 cppvar = GetHeaderGuardCPPVariable(filename)
4511 if (line.startswith('#ifndef %s' % cppvar) or
4512 line.startswith('#define %s' % cppvar) or
4513 line.startswith('#endif // %s' % cppvar)):
4514 is_header_guard = True
4515 # #include lines and header guards can be long, since there's no clean way to
4516 # split them.
4518 # URLs can be long too. It's possible to split these, but it makes them
4519 # harder to cut&paste.
4521 # The "$Id:...$" comment may also get very long without it being the
4522 # developers fault.
4524 # Doxygen documentation copying can get pretty long when using an overloaded
4525 # function declaration
4526 if (not line.startswith('#include') and not is_header_guard and
4527 not Match(r'^\s*//.*http(s?)://\S*$', line) and
4528 not Match(r'^\s*//\s*[^\s]*$', line) and
4529 not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
4530 not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
4531 line_width = GetLineWidth(line)
4532 if line_width > _line_length:
4533 error(filename, linenum, 'whitespace/line_length', 2,
4534 'Lines should be <= %i characters long' % _line_length)
4536 if (cleansed_line.count(';') > 1 and
4537 # allow simple single line lambdas
4538 not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
4539 line) and
4540 # for loops are allowed two ;'s (and may run over two lines).
4541 cleansed_line.find('for') == -1 and
4542 (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
4543 GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
4544 # It's ok to have many commands in a switch case that fits in 1 line
4545 not ((cleansed_line.find('case ') != -1 or
4546 cleansed_line.find('default:') != -1) and
4547 cleansed_line.find('break;') != -1)):
4548 error(filename, linenum, 'whitespace/newline', 0,
4549 'More than one command on the same line')
4551 # Some more style checks
4552 CheckBraces(filename, clean_lines, linenum, error)
4553 CheckTrailingSemicolon(filename, clean_lines, linenum, error)
4554 CheckEmptyBlockBody(filename, clean_lines, linenum, error)
4555 CheckAccess(filename, clean_lines, linenum, nesting_state, error)
4556 CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
4557 CheckOperatorSpacing(filename, clean_lines, linenum, error)
4558 CheckParenthesisSpacing(filename, clean_lines, linenum, error)
4559 CheckCommaSpacing(filename, clean_lines, linenum, error)
4560 CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
4561 CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
4562 CheckCheck(filename, clean_lines, linenum, error)
4563 CheckAltTokens(filename, clean_lines, linenum, error)
4564 classinfo = nesting_state.InnermostClass()
4565 if classinfo:
4566 CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
4569 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
4570 # Matches the first component of a filename delimited by -s and _s. That is:
4571 # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
4572 # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
4573 # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
4574 # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
4575 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
4578 def _DropCommonSuffixes(filename):
4579 """Drops common suffixes like _test.cc or -inl.h from filename.
4581 For example:
4582 >>> _DropCommonSuffixes('foo/foo-inl.h')
4583 'foo/foo'
4584 >>> _DropCommonSuffixes('foo/bar/foo.cc')
4585 'foo/bar/foo'
4586 >>> _DropCommonSuffixes('foo/foo_internal.h')
4587 'foo/foo'
4588 >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
4589 'foo/foo_unusualinternal'
4591 Args:
4592 filename: The input filename.
4594 Returns:
4595 The filename with the common suffix removed.
4597 for suffix in itertools.chain(
4598 ('%s.%s' % (test_suffix.lstrip('_'), ext)
4599 for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
4600 ('%s.%s' % (suffix, ext)
4601 for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
4602 if (filename.endswith(suffix) and len(filename) > len(suffix) and
4603 filename[-len(suffix) - 1] in ('-', '_')):
4604 return filename[:-len(suffix) - 1]
4605 return os.path.splitext(filename)[0]
4608 def _ClassifyInclude(fileinfo, include, is_system):
4609 """Figures out what kind of header 'include' is.
4611 Args:
4612 fileinfo: The current file cpplint is running over. A FileInfo instance.
4613 include: The path to a #included file.
4614 is_system: True if the #include used <> rather than "".
4616 Returns:
4617 One of the _XXX_HEADER constants.
4619 For example:
4620 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
4621 _C_SYS_HEADER
4622 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
4623 _CPP_SYS_HEADER
4624 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
4625 _LIKELY_MY_HEADER
4626 >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
4627 ... 'bar/foo_other_ext.h', False)
4628 _POSSIBLE_MY_HEADER
4629 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
4630 _OTHER_HEADER
4632 # This is a list of all standard c++ header files, except
4633 # those already checked for above.
4634 is_cpp_h = include in _CPP_HEADERS
4636 # Headers with C++ extensions shouldn't be considered C system headers
4637 if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']:
4638 is_system = False
4640 if is_system:
4641 if is_cpp_h:
4642 return _CPP_SYS_HEADER
4643 else:
4644 return _C_SYS_HEADER
4646 # If the target file and the include we're checking share a
4647 # basename when we drop common extensions, and the include
4648 # lives in . , then it's likely to be owned by the target file.
4649 target_dir, target_base = (
4650 os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
4651 include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
4652 target_dir_pub = os.path.normpath(target_dir + '/../public')
4653 target_dir_pub = target_dir_pub.replace('\\', '/')
4654 if target_base == include_base and (
4655 include_dir == target_dir or
4656 include_dir == target_dir_pub):
4657 return _LIKELY_MY_HEADER
4659 # If the target and include share some initial basename
4660 # component, it's possible the target is implementing the
4661 # include, so it's allowed to be first, but we'll never
4662 # complain if it's not there.
4663 target_first_component = _RE_FIRST_COMPONENT.match(target_base)
4664 include_first_component = _RE_FIRST_COMPONENT.match(include_base)
4665 if (target_first_component and include_first_component and
4666 target_first_component.group(0) ==
4667 include_first_component.group(0)):
4668 return _POSSIBLE_MY_HEADER
4670 return _OTHER_HEADER
4674 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
4675 """Check rules that are applicable to #include lines.
4677 Strings on #include lines are NOT removed from elided line, to make
4678 certain tasks easier. However, to prevent false positives, checks
4679 applicable to #include lines in CheckLanguage must be put here.
4681 Args:
4682 filename: The name of the current file.
4683 clean_lines: A CleansedLines instance containing the file.
4684 linenum: The number of the line to check.
4685 include_state: An _IncludeState instance in which the headers are inserted.
4686 error: The function to call with any errors found.
4688 fileinfo = FileInfo(filename)
4689 line = clean_lines.lines[linenum]
4691 # "include" should use the new style "foo/bar.h" instead of just "bar.h"
4692 # Only do this check if the included header follows google naming
4693 # conventions. If not, assume that it's a 3rd party API that
4694 # requires special include conventions.
4696 # We also make an exception for Lua headers, which follow google
4697 # naming convention but not the include convention.
4698 match = Match(r'#include\s*"([^/]+\.h)"', line)
4699 if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
4700 error(filename, linenum, 'build/include_subdir', 4,
4701 'Include the directory when naming .h files')
4703 # we shouldn't include a file more than once. actually, there are a
4704 # handful of instances where doing so is okay, but in general it's
4705 # not.
4706 match = _RE_PATTERN_INCLUDE.search(line)
4707 if match:
4708 include = match.group(2)
4709 is_system = (match.group(1) == '<')
4710 duplicate_line = include_state.FindHeader(include)
4711 if duplicate_line >= 0:
4712 error(filename, linenum, 'build/include', 4,
4713 '"%s" already included at %s:%s' %
4714 (include, filename, duplicate_line))
4715 return
4717 for extension in GetNonHeaderExtensions():
4718 if (include.endswith('.' + extension) and
4719 os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
4720 error(filename, linenum, 'build/include', 4,
4721 'Do not include .' + extension + ' files from other packages')
4722 return
4724 if not _THIRD_PARTY_HEADERS_PATTERN.match(include):
4725 include_state.include_list[-1].append((include, linenum))
4727 # We want to ensure that headers appear in the right order:
4728 # 1) for foo.cc, foo.h (preferred location)
4729 # 2) c system files
4730 # 3) cpp system files
4731 # 4) for foo.cc, foo.h (deprecated location)
4732 # 5) other google headers
4734 # We classify each include statement as one of those 5 types
4735 # using a number of techniques. The include_state object keeps
4736 # track of the highest type seen, and complains if we see a
4737 # lower type after that.
4738 error_message = include_state.CheckNextIncludeOrder(
4739 _ClassifyInclude(fileinfo, include, is_system))
4740 if error_message:
4741 error(filename, linenum, 'build/include_order', 4,
4742 '%s. Should be: %s.h, c system, c++ system, other.' %
4743 (error_message, fileinfo.BaseName()))
4744 canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
4745 if not include_state.IsInAlphabeticalOrder(
4746 clean_lines, linenum, canonical_include):
4747 error(filename, linenum, 'build/include_alpha', 4,
4748 'Include "%s" not in alphabetical order' % include)
4749 include_state.SetLastHeader(canonical_include)
4753 def _GetTextInside(text, start_pattern):
4754 r"""Retrieves all the text between matching open and close parentheses.
4756 Given a string of lines and a regular expression string, retrieve all the text
4757 following the expression and between opening punctuation symbols like
4758 (, [, or {, and the matching close-punctuation symbol. This properly nested
4759 occurrences of the punctuations, so for the text like
4760 printf(a(), b(c()));
4761 a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
4762 start_pattern must match string having an open punctuation symbol at the end.
4764 Args:
4765 text: The lines to extract text. Its comments and strings must be elided.
4766 It can be single line and can span multiple lines.
4767 start_pattern: The regexp string indicating where to start extracting
4768 the text.
4769 Returns:
4770 The extracted text.
4771 None if either the opening string or ending punctuation could not be found.
4773 # TODO(unknown): Audit cpplint.py to see what places could be profitably
4774 # rewritten to use _GetTextInside (and use inferior regexp matching today).
4776 # Give opening punctuations to get the matching close-punctuations.
4777 matching_punctuation = {'(': ')', '{': '}', '[': ']'}
4778 closing_punctuation = set(itervalues(matching_punctuation))
4780 # Find the position to start extracting text.
4781 match = re.search(start_pattern, text, re.M)
4782 if not match: # start_pattern not found in text.
4783 return None
4784 start_position = match.end(0)
4786 assert start_position > 0, (
4787 'start_pattern must ends with an opening punctuation.')
4788 assert text[start_position - 1] in matching_punctuation, (
4789 'start_pattern must ends with an opening punctuation.')
4790 # Stack of closing punctuations we expect to have in text after position.
4791 punctuation_stack = [matching_punctuation[text[start_position - 1]]]
4792 position = start_position
4793 while punctuation_stack and position < len(text):
4794 if text[position] == punctuation_stack[-1]:
4795 punctuation_stack.pop()
4796 elif text[position] in closing_punctuation:
4797 # A closing punctuation without matching opening punctuations.
4798 return None
4799 elif text[position] in matching_punctuation:
4800 punctuation_stack.append(matching_punctuation[text[position]])
4801 position += 1
4802 if punctuation_stack:
4803 # Opening punctuations left without matching close-punctuations.
4804 return None
4805 # punctuations match.
4806 return text[start_position:position - 1]
4809 # Patterns for matching call-by-reference parameters.
4811 # Supports nested templates up to 2 levels deep using this messy pattern:
4812 # < (?: < (?: < [^<>]*
4814 # | [^<>] )*
4816 # | [^<>] )*
4818 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
4819 _RE_PATTERN_TYPE = (
4820 r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
4821 r'(?:\w|'
4822 r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
4823 r'::)+')
4824 # A call-by-reference parameter ends with '& identifier'.
4825 _RE_PATTERN_REF_PARAM = re.compile(
4826 r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
4827 r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
4828 # A call-by-const-reference parameter either ends with 'const& identifier'
4829 # or looks like 'const type& identifier' when 'type' is atomic.
4830 _RE_PATTERN_CONST_REF_PARAM = (
4831 r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
4832 r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
4833 # Stream types.
4834 _RE_PATTERN_REF_STREAM_PARAM = (
4835 r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
4838 def CheckLanguage(filename, clean_lines, linenum, file_extension,
4839 include_state, nesting_state, error):
4840 """Checks rules from the 'C++ language rules' section of cppguide.html.
4842 Some of these rules are hard to test (function overloading, using
4843 uint32 inappropriately), but we do the best we can.
4845 Args:
4846 filename: The name of the current file.
4847 clean_lines: A CleansedLines instance containing the file.
4848 linenum: The number of the line to check.
4849 file_extension: The extension (without the dot) of the filename.
4850 include_state: An _IncludeState instance in which the headers are inserted.
4851 nesting_state: A NestingState instance which maintains information about
4852 the current stack of nested blocks being parsed.
4853 error: The function to call with any errors found.
4855 # If the line is empty or consists of entirely a comment, no need to
4856 # check it.
4857 line = clean_lines.elided[linenum]
4858 if not line:
4859 return
4861 match = _RE_PATTERN_INCLUDE.search(line)
4862 if match:
4863 CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
4864 return
4866 # Reset include state across preprocessor directives. This is meant
4867 # to silence warnings for conditional includes.
4868 match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
4869 if match:
4870 include_state.ResetSection(match.group(1))
4873 # Perform other checks now that we are sure that this is not an include line
4874 CheckCasts(filename, clean_lines, linenum, error)
4875 CheckGlobalStatic(filename, clean_lines, linenum, error)
4876 CheckPrintf(filename, clean_lines, linenum, error)
4878 if file_extension in GetHeaderExtensions():
4879 # TODO(unknown): check that 1-arg constructors are explicit.
4880 # How to tell it's a constructor?
4881 # (handled in CheckForNonStandardConstructs for now)
4882 # TODO(unknown): check that classes declare or disable copy/assign
4883 # (level 1 error)
4884 pass
4886 # Check if people are using the verboten C basic types. The only exception
4887 # we regularly allow is "unsigned short port" for port.
4888 if Search(r'\bshort port\b', line):
4889 if not Search(r'\bunsigned short port\b', line):
4890 error(filename, linenum, 'runtime/int', 4,
4891 'Use "unsigned short" for ports, not "short"')
4892 else:
4893 match = Search(r'\b(short|long(?! +double)|long long)\b', line)
4894 if match:
4895 error(filename, linenum, 'runtime/int', 4,
4896 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
4898 # Check if some verboten operator overloading is going on
4899 # TODO(unknown): catch out-of-line unary operator&:
4900 # class X {};
4901 # int operator&(const X& x) { return 42; } // unary operator&
4902 # The trick is it's hard to tell apart from binary operator&:
4903 # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
4904 if Search(r'\boperator\s*&\s*\(\s*\)', line):
4905 error(filename, linenum, 'runtime/operator', 4,
4906 'Unary operator& is dangerous. Do not use it.')
4908 # Check for suspicious usage of "if" like
4909 # } if (a == b) {
4910 if Search(r'\}\s*if\s*\(', line):
4911 error(filename, linenum, 'readability/braces', 4,
4912 'Did you mean "else if"? If not, start a new line for "if".')
4914 # Check for potential format string bugs like printf(foo).
4915 # We constrain the pattern not to pick things like DocidForPrintf(foo).
4916 # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
4917 # TODO(unknown): Catch the following case. Need to change the calling
4918 # convention of the whole function to process multiple line to handle it.
4919 # printf(
4920 # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
4921 printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
4922 if printf_args:
4923 match = Match(r'([\w.\->()]+)$', printf_args)
4924 if match and match.group(1) != '__VA_ARGS__':
4925 function_name = re.search(r'\b((?:string)?printf)\s*\(',
4926 line, re.I).group(1)
4927 error(filename, linenum, 'runtime/printf', 4,
4928 'Potential format string bug. Do %s("%%s", %s) instead.'
4929 % (function_name, match.group(1)))
4931 # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
4932 match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
4933 if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
4934 error(filename, linenum, 'runtime/memset', 4,
4935 'Did you mean "memset(%s, 0, %s)"?'
4936 % (match.group(1), match.group(2)))
4938 if Search(r'\busing namespace\b', line):
4939 if Search(r'\bliterals\b', line):
4940 error(filename, linenum, 'build/namespaces_literals', 5,
4941 'Do not use namespace using-directives. '
4942 'Use using-declarations instead.')
4943 else:
4944 error(filename, linenum, 'build/namespaces', 5,
4945 'Do not use namespace using-directives. '
4946 'Use using-declarations instead.')
4948 # Detect variable-length arrays.
4949 match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
4950 if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
4951 match.group(3).find(']') == -1):
4952 # Split the size using space and arithmetic operators as delimiters.
4953 # If any of the resulting tokens are not compile time constants then
4954 # report the error.
4955 tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
4956 is_const = True
4957 skip_next = False
4958 for tok in tokens:
4959 if skip_next:
4960 skip_next = False
4961 continue
4963 if Search(r'sizeof\(.+\)', tok): continue
4964 if Search(r'arraysize\(\w+\)', tok): continue
4966 tok = tok.lstrip('(')
4967 tok = tok.rstrip(')')
4968 if not tok: continue
4969 if Match(r'\d+', tok): continue
4970 if Match(r'0[xX][0-9a-fA-F]+', tok): continue
4971 if Match(r'k[A-Z0-9]\w*', tok): continue
4972 if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
4973 if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
4974 # A catch all for tricky sizeof cases, including 'sizeof expression',
4975 # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
4976 # requires skipping the next token because we split on ' ' and '*'.
4977 if tok.startswith('sizeof'):
4978 skip_next = True
4979 continue
4980 is_const = False
4981 break
4982 if not is_const:
4983 error(filename, linenum, 'runtime/arrays', 1,
4984 'Do not use variable-length arrays. Use an appropriately named '
4985 "('k' followed by CamelCase) compile-time constant for the size.")
4987 # Check for use of unnamed namespaces in header files. Registration
4988 # macros are typically OK, so we allow use of "namespace {" on lines
4989 # that end with backslashes.
4990 if (file_extension in GetHeaderExtensions()
4991 and Search(r'\bnamespace\s*{', line)
4992 and line[-1] != '\\'):
4993 error(filename, linenum, 'build/namespaces', 4,
4994 'Do not use unnamed namespaces in header files. See '
4995 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
4996 ' for more information.')
4999 def CheckGlobalStatic(filename, clean_lines, linenum, error):
5000 """Check for unsafe global or static objects.
5002 Args:
5003 filename: The name of the current file.
5004 clean_lines: A CleansedLines instance containing the file.
5005 linenum: The number of the line to check.
5006 error: The function to call with any errors found.
5008 line = clean_lines.elided[linenum]
5010 # Match two lines at a time to support multiline declarations
5011 if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
5012 line += clean_lines.elided[linenum + 1].strip()
5014 # Check for people declaring static/global STL strings at the top level.
5015 # This is dangerous because the C++ language does not guarantee that
5016 # globals with constructors are initialized before the first access, and
5017 # also because globals can be destroyed when some threads are still running.
5018 # TODO(unknown): Generalize this to also find static unique_ptr instances.
5019 # TODO(unknown): File bugs for clang-tidy to find these.
5020 match = Match(
5021 r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
5022 r'([a-zA-Z0-9_:]+)\b(.*)',
5023 line)
5025 # Remove false positives:
5026 # - String pointers (as opposed to values).
5027 # string *pointer
5028 # const string *pointer
5029 # string const *pointer
5030 # string *const pointer
5032 # - Functions and template specializations.
5033 # string Function<Type>(...
5034 # string Class<Type>::Method(...
5036 # - Operators. These are matched separately because operator names
5037 # cross non-word boundaries, and trying to match both operators
5038 # and functions at the same time would decrease accuracy of
5039 # matching identifiers.
5040 # string Class::operator*()
5041 if (match and
5042 not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
5043 not Search(r'\boperator\W', line) and
5044 not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
5045 if Search(r'\bconst\b', line):
5046 error(filename, linenum, 'runtime/string', 4,
5047 'For a static/global string constant, use a C style string '
5048 'instead: "%schar%s %s[]".' %
5049 (match.group(1), match.group(2) or '', match.group(3)))
5050 else:
5051 error(filename, linenum, 'runtime/string', 4,
5052 'Static/global string variables are not permitted.')
5054 if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
5055 Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
5056 error(filename, linenum, 'runtime/init', 4,
5057 'You seem to be initializing a member variable with itself.')
5060 def CheckPrintf(filename, clean_lines, linenum, error):
5061 """Check for printf related issues.
5063 Args:
5064 filename: The name of the current file.
5065 clean_lines: A CleansedLines instance containing the file.
5066 linenum: The number of the line to check.
5067 error: The function to call with any errors found.
5069 line = clean_lines.elided[linenum]
5071 # When snprintf is used, the second argument shouldn't be a literal.
5072 match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
5073 if match and match.group(2) != '0':
5074 # If 2nd arg is zero, snprintf is used to calculate size.
5075 error(filename, linenum, 'runtime/printf', 3,
5076 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
5077 'to snprintf.' % (match.group(1), match.group(2)))
5079 # Check if some verboten C functions are being used.
5080 if Search(r'\bsprintf\s*\(', line):
5081 error(filename, linenum, 'runtime/printf', 5,
5082 'Never use sprintf. Use snprintf instead.')
5083 match = Search(r'\b(strcpy|strcat)\s*\(', line)
5084 if match:
5085 error(filename, linenum, 'runtime/printf', 4,
5086 'Almost always, snprintf is better than %s' % match.group(1))
5089 def IsDerivedFunction(clean_lines, linenum):
5090 """Check if current line contains an inherited function.
5092 Args:
5093 clean_lines: A CleansedLines instance containing the file.
5094 linenum: The number of the line to check.
5095 Returns:
5096 True if current line contains a function with "override"
5097 virt-specifier.
5099 # Scan back a few lines for start of current function
5100 for i in xrange(linenum, max(-1, linenum - 10), -1):
5101 match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
5102 if match:
5103 # Look for "override" after the matching closing parenthesis
5104 line, _, closing_paren = CloseExpression(
5105 clean_lines, i, len(match.group(1)))
5106 return (closing_paren >= 0 and
5107 Search(r'\boverride\b', line[closing_paren:]))
5108 return False
5111 def IsOutOfLineMethodDefinition(clean_lines, linenum):
5112 """Check if current line contains an out-of-line method definition.
5114 Args:
5115 clean_lines: A CleansedLines instance containing the file.
5116 linenum: The number of the line to check.
5117 Returns:
5118 True if current line contains an out-of-line method definition.
5120 # Scan back a few lines for start of current function
5121 for i in xrange(linenum, max(-1, linenum - 10), -1):
5122 if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
5123 return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
5124 return False
5127 def IsInitializerList(clean_lines, linenum):
5128 """Check if current line is inside constructor initializer list.
5130 Args:
5131 clean_lines: A CleansedLines instance containing the file.
5132 linenum: The number of the line to check.
5133 Returns:
5134 True if current line appears to be inside constructor initializer
5135 list, False otherwise.
5137 for i in xrange(linenum, 1, -1):
5138 line = clean_lines.elided[i]
5139 if i == linenum:
5140 remove_function_body = Match(r'^(.*)\{\s*$', line)
5141 if remove_function_body:
5142 line = remove_function_body.group(1)
5144 if Search(r'\s:\s*\w+[({]', line):
5145 # A lone colon tend to indicate the start of a constructor
5146 # initializer list. It could also be a ternary operator, which
5147 # also tend to appear in constructor initializer lists as
5148 # opposed to parameter lists.
5149 return True
5150 if Search(r'\}\s*,\s*$', line):
5151 # A closing brace followed by a comma is probably the end of a
5152 # brace-initialized member in constructor initializer list.
5153 return True
5154 if Search(r'[{};]\s*$', line):
5155 # Found one of the following:
5156 # - A closing brace or semicolon, probably the end of the previous
5157 # function.
5158 # - An opening brace, probably the start of current class or namespace.
5160 # Current line is probably not inside an initializer list since
5161 # we saw one of those things without seeing the starting colon.
5162 return False
5164 # Got to the beginning of the file without seeing the start of
5165 # constructor initializer list.
5166 return False
5169 def CheckForNonConstReference(filename, clean_lines, linenum,
5170 nesting_state, error):
5171 """Check for non-const references.
5173 Separate from CheckLanguage since it scans backwards from current
5174 line, instead of scanning forward.
5176 Args:
5177 filename: The name of the current file.
5178 clean_lines: A CleansedLines instance containing the file.
5179 linenum: The number of the line to check.
5180 nesting_state: A NestingState instance which maintains information about
5181 the current stack of nested blocks being parsed.
5182 error: The function to call with any errors found.
5184 # Do nothing if there is no '&' on current line.
5185 line = clean_lines.elided[linenum]
5186 if '&' not in line:
5187 return
5189 # If a function is inherited, current function doesn't have much of
5190 # a choice, so any non-const references should not be blamed on
5191 # derived function.
5192 if IsDerivedFunction(clean_lines, linenum):
5193 return
5195 # Don't warn on out-of-line method definitions, as we would warn on the
5196 # in-line declaration, if it isn't marked with 'override'.
5197 if IsOutOfLineMethodDefinition(clean_lines, linenum):
5198 return
5200 # Long type names may be broken across multiple lines, usually in one
5201 # of these forms:
5202 # LongType
5203 # ::LongTypeContinued &identifier
5204 # LongType::
5205 # LongTypeContinued &identifier
5206 # LongType<
5207 # ...>::LongTypeContinued &identifier
5209 # If we detected a type split across two lines, join the previous
5210 # line to current line so that we can match const references
5211 # accordingly.
5213 # Note that this only scans back one line, since scanning back
5214 # arbitrary number of lines would be expensive. If you have a type
5215 # that spans more than 2 lines, please use a typedef.
5216 if linenum > 1:
5217 previous = None
5218 if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
5219 # previous_line\n + ::current_line
5220 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
5221 clean_lines.elided[linenum - 1])
5222 elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
5223 # previous_line::\n + current_line
5224 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
5225 clean_lines.elided[linenum - 1])
5226 if previous:
5227 line = previous.group(1) + line.lstrip()
5228 else:
5229 # Check for templated parameter that is split across multiple lines
5230 endpos = line.rfind('>')
5231 if endpos > -1:
5232 (_, startline, startpos) = ReverseCloseExpression(
5233 clean_lines, linenum, endpos)
5234 if startpos > -1 and startline < linenum:
5235 # Found the matching < on an earlier line, collect all
5236 # pieces up to current line.
5237 line = ''
5238 for i in xrange(startline, linenum + 1):
5239 line += clean_lines.elided[i].strip()
5241 # Check for non-const references in function parameters. A single '&' may
5242 # found in the following places:
5243 # inside expression: binary & for bitwise AND
5244 # inside expression: unary & for taking the address of something
5245 # inside declarators: reference parameter
5246 # We will exclude the first two cases by checking that we are not inside a
5247 # function body, including one that was just introduced by a trailing '{'.
5248 # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
5249 if (nesting_state.previous_stack_top and
5250 not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
5251 isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
5252 # Not at toplevel, not within a class, and not within a namespace
5253 return
5255 # Avoid initializer lists. We only need to scan back from the
5256 # current line for something that starts with ':'.
5258 # We don't need to check the current line, since the '&' would
5259 # appear inside the second set of parentheses on the current line as
5260 # opposed to the first set.
5261 if linenum > 0:
5262 for i in xrange(linenum - 1, max(0, linenum - 10), -1):
5263 previous_line = clean_lines.elided[i]
5264 if not Search(r'[),]\s*$', previous_line):
5265 break
5266 if Match(r'^\s*:\s+\S', previous_line):
5267 return
5269 # Avoid preprocessors
5270 if Search(r'\\\s*$', line):
5271 return
5273 # Avoid constructor initializer lists
5274 if IsInitializerList(clean_lines, linenum):
5275 return
5277 # We allow non-const references in a few standard places, like functions
5278 # called "swap()" or iostream operators like "<<" or ">>". Do not check
5279 # those function parameters.
5281 # We also accept & in static_assert, which looks like a function but
5282 # it's actually a declaration expression.
5283 whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
5284 r'operator\s*[<>][<>]|'
5285 r'static_assert|COMPILE_ASSERT'
5286 r')\s*\(')
5287 if Search(whitelisted_functions, line):
5288 return
5289 elif not Search(r'\S+\([^)]*$', line):
5290 # Don't see a whitelisted function on this line. Actually we
5291 # didn't see any function name on this line, so this is likely a
5292 # multi-line parameter list. Try a bit harder to catch this case.
5293 for i in xrange(2):
5294 if (linenum > i and
5295 Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
5296 return
5298 decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
5299 for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
5300 if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
5301 not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
5302 error(filename, linenum, 'runtime/references', 2,
5303 'Is this a non-const reference? '
5304 'If so, make const or use a pointer: ' +
5305 ReplaceAll(' *<', '<', parameter))
5308 def CheckCasts(filename, clean_lines, linenum, error):
5309 """Various cast related checks.
5311 Args:
5312 filename: The name of the current file.
5313 clean_lines: A CleansedLines instance containing the file.
5314 linenum: The number of the line to check.
5315 error: The function to call with any errors found.
5317 line = clean_lines.elided[linenum]
5319 # Check to see if they're using an conversion function cast.
5320 # I just try to capture the most common basic types, though there are more.
5321 # Parameterless conversion functions, such as bool(), are allowed as they are
5322 # probably a member operator declaration or default constructor.
5323 match = Search(
5324 r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
5325 r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
5326 r'(\([^)].*)', line)
5327 expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
5328 if match and not expecting_function:
5329 matched_type = match.group(2)
5331 # matched_new_or_template is used to silence two false positives:
5332 # - New operators
5333 # - Template arguments with function types
5335 # For template arguments, we match on types immediately following
5336 # an opening bracket without any spaces. This is a fast way to
5337 # silence the common case where the function type is the first
5338 # template argument. False negative with less-than comparison is
5339 # avoided because those operators are usually followed by a space.
5341 # function<double(double)> // bracket + no space = false positive
5342 # value < double(42) // bracket + space = true positive
5343 matched_new_or_template = match.group(1)
5345 # Avoid arrays by looking for brackets that come after the closing
5346 # parenthesis.
5347 if Match(r'\([^()]+\)\s*\[', match.group(3)):
5348 return
5350 # Other things to ignore:
5351 # - Function pointers
5352 # - Casts to pointer types
5353 # - Placement new
5354 # - Alias declarations
5355 matched_funcptr = match.group(3)
5356 if (matched_new_or_template is None and
5357 not (matched_funcptr and
5358 (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
5359 matched_funcptr) or
5360 matched_funcptr.startswith('(*)'))) and
5361 not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
5362 not Search(r'new\(\S+\)\s*' + matched_type, line)):
5363 error(filename, linenum, 'readability/casting', 4,
5364 'Using deprecated casting style. '
5365 'Use static_cast<%s>(...) instead' %
5366 matched_type)
5368 if not expecting_function:
5369 CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
5370 r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
5372 # This doesn't catch all cases. Consider (const char * const)"hello".
5374 # (char *) "foo" should always be a const_cast (reinterpret_cast won't
5375 # compile).
5376 if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
5377 r'\((char\s?\*+\s?)\)\s*"', error):
5378 pass
5379 else:
5380 # Check pointer casts for other than string constants
5381 CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
5382 r'\((\w+\s?\*+\s?)\)', error)
5384 # In addition, we look for people taking the address of a cast. This
5385 # is dangerous -- casts can assign to temporaries, so the pointer doesn't
5386 # point where you think.
5388 # Some non-identifier character is required before the '&' for the
5389 # expression to be recognized as a cast. These are casts:
5390 # expression = &static_cast<int*>(temporary());
5391 # function(&(int*)(temporary()));
5393 # This is not a cast:
5394 # reference_type&(int* function_param);
5395 match = Search(
5396 r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
5397 r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
5398 if match:
5399 # Try a better error message when the & is bound to something
5400 # dereferenced by the casted pointer, as opposed to the casted
5401 # pointer itself.
5402 parenthesis_error = False
5403 match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
5404 if match:
5405 _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
5406 if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
5407 _, y2, x2 = CloseExpression(clean_lines, y1, x1)
5408 if x2 >= 0:
5409 extended_line = clean_lines.elided[y2][x2:]
5410 if y2 < clean_lines.NumLines() - 1:
5411 extended_line += clean_lines.elided[y2 + 1]
5412 if Match(r'\s*(?:->|\[)', extended_line):
5413 parenthesis_error = True
5415 if parenthesis_error:
5416 error(filename, linenum, 'readability/casting', 4,
5417 ('Are you taking an address of something dereferenced '
5418 'from a cast? Wrapping the dereferenced expression in '
5419 'parentheses will make the binding more obvious'))
5420 else:
5421 error(filename, linenum, 'runtime/casting', 4,
5422 ('Are you taking an address of a cast? '
5423 'This is dangerous: could be a temp var. '
5424 'Take the address before doing the cast, rather than after'))
5427 def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
5428 """Checks for a C-style cast by looking for the pattern.
5430 Args:
5431 filename: The name of the current file.
5432 clean_lines: A CleansedLines instance containing the file.
5433 linenum: The number of the line to check.
5434 cast_type: The string for the C++ cast to recommend. This is either
5435 reinterpret_cast, static_cast, or const_cast, depending.
5436 pattern: The regular expression used to find C-style casts.
5437 error: The function to call with any errors found.
5439 Returns:
5440 True if an error was emitted.
5441 False otherwise.
5443 line = clean_lines.elided[linenum]
5444 match = Search(pattern, line)
5445 if not match:
5446 return False
5448 # Exclude lines with keywords that tend to look like casts
5449 context = line[0:match.start(1) - 1]
5450 if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
5451 return False
5453 # Try expanding current context to see if we one level of
5454 # parentheses inside a macro.
5455 if linenum > 0:
5456 for i in xrange(linenum - 1, max(0, linenum - 5), -1):
5457 context = clean_lines.elided[i] + context
5458 if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
5459 return False
5461 # operator++(int) and operator--(int)
5462 if context.endswith(' operator++') or context.endswith(' operator--'):
5463 return False
5465 # A single unnamed argument for a function tends to look like old style cast.
5466 # If we see those, don't issue warnings for deprecated casts.
5467 remainder = line[match.end(0):]
5468 if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
5469 remainder):
5470 return False
5472 # At this point, all that should be left is actual casts.
5473 error(filename, linenum, 'readability/casting', 4,
5474 'Using C-style cast. Use %s<%s>(...) instead' %
5475 (cast_type, match.group(1)))
5477 return True
5480 def ExpectingFunctionArgs(clean_lines, linenum):
5481 """Checks whether where function type arguments are expected.
5483 Args:
5484 clean_lines: A CleansedLines instance containing the file.
5485 linenum: The number of the line to check.
5487 Returns:
5488 True if the line at 'linenum' is inside something that expects arguments
5489 of function types.
5491 line = clean_lines.elided[linenum]
5492 return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
5493 (linenum >= 2 and
5494 (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
5495 clean_lines.elided[linenum - 1]) or
5496 Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
5497 clean_lines.elided[linenum - 2]) or
5498 Search(r'\bstd::m?function\s*\<\s*$',
5499 clean_lines.elided[linenum - 1]))))
5502 _HEADERS_CONTAINING_TEMPLATES = (
5503 ('<deque>', ('deque',)),
5504 ('<functional>', ('unary_function', 'binary_function',
5505 'plus', 'minus', 'multiplies', 'divides', 'modulus',
5506 'negate',
5507 'equal_to', 'not_equal_to', 'greater', 'less',
5508 'greater_equal', 'less_equal',
5509 'logical_and', 'logical_or', 'logical_not',
5510 'unary_negate', 'not1', 'binary_negate', 'not2',
5511 'bind1st', 'bind2nd',
5512 'pointer_to_unary_function',
5513 'pointer_to_binary_function',
5514 'ptr_fun',
5515 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
5516 'mem_fun_ref_t',
5517 'const_mem_fun_t', 'const_mem_fun1_t',
5518 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
5519 'mem_fun_ref',
5521 ('<limits>', ('numeric_limits',)),
5522 ('<list>', ('list',)),
5523 ('<map>', ('map', 'multimap',)),
5524 ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
5525 'unique_ptr', 'weak_ptr')),
5526 ('<queue>', ('queue', 'priority_queue',)),
5527 ('<set>', ('set', 'multiset',)),
5528 ('<stack>', ('stack',)),
5529 ('<string>', ('char_traits', 'basic_string',)),
5530 ('<tuple>', ('tuple',)),
5531 ('<unordered_map>', ('unordered_map', 'unordered_multimap')),
5532 ('<unordered_set>', ('unordered_set', 'unordered_multiset')),
5533 ('<utility>', ('pair',)),
5534 ('<vector>', ('vector',)),
5536 # gcc extensions.
5537 # Note: std::hash is their hash, ::hash is our hash
5538 ('<hash_map>', ('hash_map', 'hash_multimap',)),
5539 ('<hash_set>', ('hash_set', 'hash_multiset',)),
5540 ('<slist>', ('slist',)),
5543 _HEADERS_MAYBE_TEMPLATES = (
5544 ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
5545 'transform',
5547 ('<utility>', ('forward', 'make_pair', 'move', 'swap')),
5550 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
5552 _re_pattern_headers_maybe_templates = []
5553 for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
5554 for _template in _templates:
5555 # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
5556 # type::max().
5557 _re_pattern_headers_maybe_templates.append(
5558 (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
5559 _template,
5560 _header))
5562 # Other scripts may reach in and modify this pattern.
5563 _re_pattern_templates = []
5564 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
5565 for _template in _templates:
5566 _re_pattern_templates.append(
5567 (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
5568 _template + '<>',
5569 _header))
5572 def FilesBelongToSameModule(filename_cc, filename_h):
5573 """Check if these two filenames belong to the same module.
5575 The concept of a 'module' here is a as follows:
5576 foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
5577 same 'module' if they are in the same directory.
5578 some/path/public/xyzzy and some/path/internal/xyzzy are also considered
5579 to belong to the same module here.
5581 If the filename_cc contains a longer path than the filename_h, for example,
5582 '/absolute/path/to/base/sysinfo.cc', and this file would include
5583 'base/sysinfo.h', this function also produces the prefix needed to open the
5584 header. This is used by the caller of this function to more robustly open the
5585 header file. We don't have access to the real include paths in this context,
5586 so we need this guesswork here.
5588 Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
5589 according to this implementation. Because of this, this function gives
5590 some false positives. This should be sufficiently rare in practice.
5592 Args:
5593 filename_cc: is the path for the source (e.g. .cc) file
5594 filename_h: is the path for the header path
5596 Returns:
5597 Tuple with a bool and a string:
5598 bool: True if filename_cc and filename_h belong to the same module.
5599 string: the additional prefix needed to open the header file.
5601 fileinfo_cc = FileInfo(filename_cc)
5602 if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
5603 return (False, '')
5605 fileinfo_h = FileInfo(filename_h)
5606 if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions():
5607 return (False, '')
5609 filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
5610 matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
5611 if matched_test_suffix:
5612 filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
5614 filename_cc = filename_cc.replace('/public/', '/')
5615 filename_cc = filename_cc.replace('/internal/', '/')
5617 filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
5618 if filename_h.endswith('-inl'):
5619 filename_h = filename_h[:-len('-inl')]
5620 filename_h = filename_h.replace('/public/', '/')
5621 filename_h = filename_h.replace('/internal/', '/')
5623 files_belong_to_same_module = filename_cc.endswith(filename_h)
5624 common_path = ''
5625 if files_belong_to_same_module:
5626 common_path = filename_cc[:-len(filename_h)]
5627 return files_belong_to_same_module, common_path
5630 def UpdateIncludeState(filename, include_dict, io=codecs):
5631 """Fill up the include_dict with new includes found from the file.
5633 Args:
5634 filename: the name of the header to read.
5635 include_dict: a dictionary in which the headers are inserted.
5636 io: The io factory to use to read the file. Provided for testability.
5638 Returns:
5639 True if a header was successfully added. False otherwise.
5641 headerfile = None
5642 try:
5643 headerfile = io.open(filename, 'r', 'utf8', 'replace')
5644 except IOError:
5645 return False
5646 linenum = 0
5647 for line in headerfile:
5648 linenum += 1
5649 clean_line = CleanseComments(line)
5650 match = _RE_PATTERN_INCLUDE.search(clean_line)
5651 if match:
5652 include = match.group(2)
5653 include_dict.setdefault(include, linenum)
5654 return True
5657 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
5658 io=codecs):
5659 """Reports for missing stl includes.
5661 This function will output warnings to make sure you are including the headers
5662 necessary for the stl containers and functions that you use. We only give one
5663 reason to include a header. For example, if you use both equal_to<> and
5664 less<> in a .h file, only one (the latter in the file) of these will be
5665 reported as a reason to include the <functional>.
5667 Args:
5668 filename: The name of the current file.
5669 clean_lines: A CleansedLines instance containing the file.
5670 include_state: An _IncludeState instance.
5671 error: The function to call with any errors found.
5672 io: The IO factory to use to read the header file. Provided for unittest
5673 injection.
5675 required = {} # A map of header name to linenumber and the template entity.
5676 # Example of required: { '<functional>': (1219, 'less<>') }
5678 for linenum in range(clean_lines.NumLines()):
5679 line = clean_lines.elided[linenum]
5680 if not line or line[0] == '#':
5681 continue
5683 # String is special -- it is a non-templatized type in STL.
5684 matched = _RE_PATTERN_STRING.search(line)
5685 if matched:
5686 # Don't warn about strings in non-STL namespaces:
5687 # (We check only the first match per line; good enough.)
5688 prefix = line[:matched.start()]
5689 if prefix.endswith('std::') or not prefix.endswith('::'):
5690 required['<string>'] = (linenum, 'string')
5692 for pattern, template, header in _re_pattern_headers_maybe_templates:
5693 if pattern.search(line):
5694 required[header] = (linenum, template)
5696 # The following function is just a speed up, no semantics are changed.
5697 if not '<' in line: # Reduces the cpu time usage by skipping lines.
5698 continue
5700 for pattern, template, header in _re_pattern_templates:
5701 matched = pattern.search(line)
5702 if matched:
5703 # Don't warn about IWYU in non-STL namespaces:
5704 # (We check only the first match per line; good enough.)
5705 prefix = line[:matched.start()]
5706 if prefix.endswith('std::') or not prefix.endswith('::'):
5707 required[header] = (linenum, template)
5709 # The policy is that if you #include something in foo.h you don't need to
5710 # include it again in foo.cc. Here, we will look at possible includes.
5711 # Let's flatten the include_state include_list and copy it into a dictionary.
5712 include_dict = dict([item for sublist in include_state.include_list
5713 for item in sublist])
5715 # Did we find the header for this file (if any) and successfully load it?
5716 header_found = False
5718 # Use the absolute path so that matching works properly.
5719 abs_filename = FileInfo(filename).FullName()
5721 # For Emacs's flymake.
5722 # If cpplint is invoked from Emacs's flymake, a temporary file is generated
5723 # by flymake and that file name might end with '_flymake.cc'. In that case,
5724 # restore original file name here so that the corresponding header file can be
5725 # found.
5726 # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
5727 # instead of 'foo_flymake.h'
5728 abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
5730 # include_dict is modified during iteration, so we iterate over a copy of
5731 # the keys.
5732 header_keys = list(include_dict.keys())
5733 for header in header_keys:
5734 (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
5735 fullpath = common_path + header
5736 if same_module and UpdateIncludeState(fullpath, include_dict, io):
5737 header_found = True
5739 # If we can't find the header file for a .cc, assume it's because we don't
5740 # know where to look. In that case we'll give up as we're not sure they
5741 # didn't include it in the .h file.
5742 # TODO(unknown): Do a better job of finding .h files so we are confident that
5743 # not having the .h file means there isn't one.
5744 if not header_found:
5745 for extension in GetNonHeaderExtensions():
5746 if filename.endswith('.' + extension):
5747 return
5749 # All the lines have been processed, report the errors found.
5750 for required_header_unstripped in sorted(required, key=required.__getitem__):
5751 template = required[required_header_unstripped][1]
5752 if required_header_unstripped.strip('<>"') not in include_dict:
5753 error(filename, required[required_header_unstripped][0],
5754 'build/include_what_you_use', 4,
5755 'Add #include ' + required_header_unstripped + ' for ' + template)
5758 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
5761 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
5762 """Check that make_pair's template arguments are deduced.
5764 G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
5765 specified explicitly, and such use isn't intended in any case.
5767 Args:
5768 filename: The name of the current file.
5769 clean_lines: A CleansedLines instance containing the file.
5770 linenum: The number of the line to check.
5771 error: The function to call with any errors found.
5773 line = clean_lines.elided[linenum]
5774 match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
5775 if match:
5776 error(filename, linenum, 'build/explicit_make_pair',
5777 4, # 4 = high confidence
5778 'For C++11-compatibility, omit template arguments from make_pair'
5779 ' OR use pair directly OR if appropriate, construct a pair directly')
5782 def CheckRedundantVirtual(filename, clean_lines, linenum, error):
5783 """Check if line contains a redundant "virtual" function-specifier.
5785 Args:
5786 filename: The name of the current file.
5787 clean_lines: A CleansedLines instance containing the file.
5788 linenum: The number of the line to check.
5789 error: The function to call with any errors found.
5791 # Look for "virtual" on current line.
5792 line = clean_lines.elided[linenum]
5793 virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
5794 if not virtual: return
5796 # Ignore "virtual" keywords that are near access-specifiers. These
5797 # are only used in class base-specifier and do not apply to member
5798 # functions.
5799 if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
5800 Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
5801 return
5803 # Ignore the "virtual" keyword from virtual base classes. Usually
5804 # there is a column on the same line in these cases (virtual base
5805 # classes are rare in google3 because multiple inheritance is rare).
5806 if Match(r'^.*[^:]:[^:].*$', line): return
5808 # Look for the next opening parenthesis. This is the start of the
5809 # parameter list (possibly on the next line shortly after virtual).
5810 # TODO(unknown): doesn't work if there are virtual functions with
5811 # decltype() or other things that use parentheses, but csearch suggests
5812 # that this is rare.
5813 end_col = -1
5814 end_line = -1
5815 start_col = len(virtual.group(2))
5816 for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
5817 line = clean_lines.elided[start_line][start_col:]
5818 parameter_list = Match(r'^([^(]*)\(', line)
5819 if parameter_list:
5820 # Match parentheses to find the end of the parameter list
5821 (_, end_line, end_col) = CloseExpression(
5822 clean_lines, start_line, start_col + len(parameter_list.group(1)))
5823 break
5824 start_col = 0
5826 if end_col < 0:
5827 return # Couldn't find end of parameter list, give up
5829 # Look for "override" or "final" after the parameter list
5830 # (possibly on the next few lines).
5831 for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
5832 line = clean_lines.elided[i][end_col:]
5833 match = Search(r'\b(override|final)\b', line)
5834 if match:
5835 error(filename, linenum, 'readability/inheritance', 4,
5836 ('"virtual" is redundant since function is '
5837 'already declared as "%s"' % match.group(1)))
5839 # Set end_col to check whole lines after we are done with the
5840 # first line.
5841 end_col = 0
5842 if Search(r'[^\w]\s*$', line):
5843 break
5846 def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
5847 """Check if line contains a redundant "override" or "final" virt-specifier.
5849 Args:
5850 filename: The name of the current file.
5851 clean_lines: A CleansedLines instance containing the file.
5852 linenum: The number of the line to check.
5853 error: The function to call with any errors found.
5855 # Look for closing parenthesis nearby. We need one to confirm where
5856 # the declarator ends and where the virt-specifier starts to avoid
5857 # false positives.
5858 line = clean_lines.elided[linenum]
5859 declarator_end = line.rfind(')')
5860 if declarator_end >= 0:
5861 fragment = line[declarator_end:]
5862 else:
5863 if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
5864 fragment = line
5865 else:
5866 return
5868 # Check that at most one of "override" or "final" is present, not both
5869 if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
5870 error(filename, linenum, 'readability/inheritance', 4,
5871 ('"override" is redundant since function is '
5872 'already declared as "final"'))
5877 # Returns true if we are at a new block, and it is directly
5878 # inside of a namespace.
5879 def IsBlockInNameSpace(nesting_state, is_forward_declaration):
5880 """Checks that the new block is directly in a namespace.
5882 Args:
5883 nesting_state: The _NestingState object that contains info about our state.
5884 is_forward_declaration: If the class is a forward declared class.
5885 Returns:
5886 Whether or not the new block is directly in a namespace.
5888 if is_forward_declaration:
5889 return len(nesting_state.stack) >= 1 and (
5890 isinstance(nesting_state.stack[-1], _NamespaceInfo))
5893 return (len(nesting_state.stack) > 1 and
5894 nesting_state.stack[-1].check_namespace_indentation and
5895 isinstance(nesting_state.stack[-2], _NamespaceInfo))
5898 def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
5899 raw_lines_no_comments, linenum):
5900 """This method determines if we should apply our namespace indentation check.
5902 Args:
5903 nesting_state: The current nesting state.
5904 is_namespace_indent_item: If we just put a new class on the stack, True.
5905 If the top of the stack is not a class, or we did not recently
5906 add the class, False.
5907 raw_lines_no_comments: The lines without the comments.
5908 linenum: The current line number we are processing.
5910 Returns:
5911 True if we should apply our namespace indentation check. Currently, it
5912 only works for classes and namespaces inside of a namespace.
5915 is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
5916 linenum)
5918 if not (is_namespace_indent_item or is_forward_declaration):
5919 return False
5921 # If we are in a macro, we do not want to check the namespace indentation.
5922 if IsMacroDefinition(raw_lines_no_comments, linenum):
5923 return False
5925 return IsBlockInNameSpace(nesting_state, is_forward_declaration)
5928 # Call this method if the line is directly inside of a namespace.
5929 # If the line above is blank (excluding comments) or the start of
5930 # an inner namespace, it cannot be indented.
5931 def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
5932 error):
5933 line = raw_lines_no_comments[linenum]
5934 if Match(r'^\s+', line):
5935 error(filename, linenum, 'runtime/indentation_namespace', 4,
5936 'Do not indent within a namespace')
5939 def ProcessLine(filename, file_extension, clean_lines, line,
5940 include_state, function_state, nesting_state, error,
5941 extra_check_functions=None):
5942 """Processes a single line in the file.
5944 Args:
5945 filename: Filename of the file that is being processed.
5946 file_extension: The extension (dot not included) of the file.
5947 clean_lines: An array of strings, each representing a line of the file,
5948 with comments stripped.
5949 line: Number of line being processed.
5950 include_state: An _IncludeState instance in which the headers are inserted.
5951 function_state: A _FunctionState instance which counts function lines, etc.
5952 nesting_state: A NestingState instance which maintains information about
5953 the current stack of nested blocks being parsed.
5954 error: A callable to which errors are reported, which takes 4 arguments:
5955 filename, line number, error level, and message
5956 extra_check_functions: An array of additional check functions that will be
5957 run on each source line. Each function takes 4
5958 arguments: filename, clean_lines, line, error
5960 raw_lines = clean_lines.raw_lines
5961 ParseNolintSuppressions(filename, raw_lines[line], line, error)
5962 nesting_state.Update(filename, clean_lines, line, error)
5963 CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
5964 error)
5965 if nesting_state.InAsmBlock(): return
5966 CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
5967 CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
5968 CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
5969 CheckLanguage(filename, clean_lines, line, file_extension, include_state,
5970 nesting_state, error)
5971 CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
5972 CheckForNonStandardConstructs(filename, clean_lines, line,
5973 nesting_state, error)
5974 CheckVlogArguments(filename, clean_lines, line, error)
5975 CheckPosixThreading(filename, clean_lines, line, error)
5976 CheckInvalidIncrement(filename, clean_lines, line, error)
5977 CheckMakePairUsesDeduction(filename, clean_lines, line, error)
5978 CheckRedundantVirtual(filename, clean_lines, line, error)
5979 CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
5980 if extra_check_functions:
5981 for check_fn in extra_check_functions:
5982 check_fn(filename, clean_lines, line, error)
5984 def FlagCxx11Features(filename, clean_lines, linenum, error):
5985 """Flag those c++11 features that we only allow in certain places.
5987 Args:
5988 filename: The name of the current file.
5989 clean_lines: A CleansedLines instance containing the file.
5990 linenum: The number of the line to check.
5991 error: The function to call with any errors found.
5993 line = clean_lines.elided[linenum]
5995 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
5997 # Flag unapproved C++ TR1 headers.
5998 if include and include.group(1).startswith('tr1/'):
5999 error(filename, linenum, 'build/c++tr1', 5,
6000 ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
6002 # Flag unapproved C++11 headers.
6003 if include and include.group(1) in ('cfenv',
6004 'condition_variable',
6005 'fenv.h',
6006 'future',
6007 'mutex',
6008 'thread',
6009 'chrono',
6010 'ratio',
6011 'regex',
6012 'system_error',
6014 error(filename, linenum, 'build/c++11', 5,
6015 ('<%s> is an unapproved C++11 header.') % include.group(1))
6017 # The only place where we need to worry about C++11 keywords and library
6018 # features in preprocessor directives is in macro definitions.
6019 if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
6021 # These are classes and free functions. The classes are always
6022 # mentioned as std::*, but we only catch the free functions if
6023 # they're not found by ADL. They're alphabetical by header.
6024 for top_name in (
6025 # type_traits
6026 'alignment_of',
6027 'aligned_union',
6029 if Search(r'\bstd::%s\b' % top_name, line):
6030 error(filename, linenum, 'build/c++11', 5,
6031 ('std::%s is an unapproved C++11 class or function. Send c-style '
6032 'an example of where it would make your code more readable, and '
6033 'they may let you use it.') % top_name)
6036 def FlagCxx14Features(filename, clean_lines, linenum, error):
6037 """Flag those C++14 features that we restrict.
6039 Args:
6040 filename: The name of the current file.
6041 clean_lines: A CleansedLines instance containing the file.
6042 linenum: The number of the line to check.
6043 error: The function to call with any errors found.
6045 line = clean_lines.elided[linenum]
6047 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
6049 # Flag unapproved C++14 headers.
6050 if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
6051 error(filename, linenum, 'build/c++14', 5,
6052 ('<%s> is an unapproved C++14 header.') % include.group(1))
6055 def ProcessFileData(filename, file_extension, lines, error,
6056 extra_check_functions=None):
6057 """Performs lint checks and reports any errors to the given error function.
6059 Args:
6060 filename: Filename of the file that is being processed.
6061 file_extension: The extension (dot not included) of the file.
6062 lines: An array of strings, each representing a line of the file, with the
6063 last element being empty if the file is terminated with a newline.
6064 error: A callable to which errors are reported, which takes 4 arguments:
6065 filename, line number, error level, and message
6066 extra_check_functions: An array of additional check functions that will be
6067 run on each source line. Each function takes 4
6068 arguments: filename, clean_lines, line, error
6070 lines = (['// marker so line numbers and indices both start at 1'] + lines +
6071 ['// marker so line numbers end in a known way'])
6073 include_state = _IncludeState()
6074 function_state = _FunctionState()
6075 nesting_state = NestingState()
6077 ResetNolintSuppressions()
6079 CheckForCopyright(filename, lines, error)
6080 ProcessGlobalSuppresions(lines)
6081 RemoveMultiLineComments(filename, lines, error)
6082 clean_lines = CleansedLines(lines)
6084 if file_extension in GetHeaderExtensions():
6085 CheckForHeaderGuard(filename, clean_lines, error)
6087 for line in range(clean_lines.NumLines()):
6088 ProcessLine(filename, file_extension, clean_lines, line,
6089 include_state, function_state, nesting_state, error,
6090 extra_check_functions)
6091 FlagCxx11Features(filename, clean_lines, line, error)
6092 nesting_state.CheckCompletedBlocks(filename, error)
6094 CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
6096 # Check that the .cc file has included its header if it exists.
6097 if _IsSourceExtension(file_extension):
6098 CheckHeaderFileIncluded(filename, include_state, error)
6100 # We check here rather than inside ProcessLine so that we see raw
6101 # lines rather than "cleaned" lines.
6102 CheckForBadCharacters(filename, lines, error)
6104 CheckForNewlineAtEOF(filename, lines, error)
6106 def ProcessConfigOverrides(filename):
6107 """ Loads the configuration files and processes the config overrides.
6109 Args:
6110 filename: The name of the file being processed by the linter.
6112 Returns:
6113 False if the current |filename| should not be processed further.
6116 abs_filename = os.path.abspath(filename)
6117 cfg_filters = []
6118 keep_looking = True
6119 while keep_looking:
6120 abs_path, base_name = os.path.split(abs_filename)
6121 if not base_name:
6122 break # Reached the root directory.
6124 cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
6125 abs_filename = abs_path
6126 if not os.path.isfile(cfg_file):
6127 continue
6129 try:
6130 with open(cfg_file) as file_handle:
6131 for line in file_handle:
6132 line, _, _ = line.partition('#') # Remove comments.
6133 if not line.strip():
6134 continue
6136 name, _, val = line.partition('=')
6137 name = name.strip()
6138 val = val.strip()
6139 if name == 'set noparent':
6140 keep_looking = False
6141 elif name == 'filter':
6142 cfg_filters.append(val)
6143 elif name == 'exclude_files':
6144 # When matching exclude_files pattern, use the base_name of
6145 # the current file name or the directory name we are processing.
6146 # For example, if we are checking for lint errors in /foo/bar/baz.cc
6147 # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
6148 # file's "exclude_files" filter is meant to be checked against "bar"
6149 # and not "baz" nor "bar/baz.cc".
6150 if base_name:
6151 pattern = re.compile(val)
6152 if pattern.match(base_name):
6153 _cpplint_state.PrintInfo('Ignoring "%s": file excluded by '
6154 '"%s". File path component "%s" matches pattern "%s"\n' %
6155 (filename, cfg_file, base_name, val))
6156 return False
6157 elif name == 'linelength':
6158 global _line_length
6159 try:
6160 _line_length = int(val)
6161 except ValueError:
6162 _cpplint_state.PrintError('Line length must be numeric.')
6163 elif name == 'extensions':
6164 global _valid_extensions
6165 try:
6166 extensions = [ext.strip() for ext in val.split(',')]
6167 _valid_extensions = set(extensions)
6168 except ValueError:
6169 sys.stderr.write('Extensions should be a comma-separated list of values;'
6170 'for example: extensions=hpp,cpp\n'
6171 'This could not be parsed: "%s"' % (val,))
6172 elif name == 'headers':
6173 global _header_extensions
6174 try:
6175 extensions = [ext.strip() for ext in val.split(',')]
6176 _header_extensions = set(extensions)
6177 except ValueError:
6178 sys.stderr.write('Extensions should be a comma-separated list of values;'
6179 'for example: extensions=hpp,cpp\n'
6180 'This could not be parsed: "%s"' % (val,))
6181 elif name == 'root':
6182 global _root
6183 _root = val
6184 else:
6185 _cpplint_state.PrintError(
6186 'Invalid configuration option (%s) in file %s\n' %
6187 (name, cfg_file))
6189 except IOError:
6190 _cpplint_state.PrintError(
6191 "Skipping config file '%s': Can't open for reading\n" % cfg_file)
6192 keep_looking = False
6194 # Apply all the accumulated filters in reverse order (top-level directory
6195 # config options having the least priority).
6196 for cfg_filter in reversed(cfg_filters):
6197 _AddFilters(cfg_filter)
6199 return True
6202 def ProcessFile(filename, vlevel, extra_check_functions=None):
6203 """Does google-lint on a single file.
6205 Args:
6206 filename: The name of the file to parse.
6208 vlevel: The level of errors to report. Every error of confidence
6209 >= verbose_level will be reported. 0 is a good default.
6211 extra_check_functions: An array of additional check functions that will be
6212 run on each source line. Each function takes 4
6213 arguments: filename, clean_lines, line, error
6216 _SetVerboseLevel(vlevel)
6217 _BackupFilters()
6219 if not ProcessConfigOverrides(filename):
6220 _RestoreFilters()
6221 return
6223 lf_lines = []
6224 crlf_lines = []
6225 try:
6226 # Support the UNIX convention of using "-" for stdin. Note that
6227 # we are not opening the file with universal newline support
6228 # (which codecs doesn't support anyway), so the resulting lines do
6229 # contain trailing '\r' characters if we are reading a file that
6230 # has CRLF endings.
6231 # If after the split a trailing '\r' is present, it is removed
6232 # below.
6233 if filename == '-':
6234 lines = codecs.StreamReaderWriter(sys.stdin,
6235 codecs.getreader('utf8'),
6236 codecs.getwriter('utf8'),
6237 'replace').read().split('\n')
6238 else:
6239 lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
6241 # Remove trailing '\r'.
6242 # The -1 accounts for the extra trailing blank line we get from split()
6243 for linenum in range(len(lines) - 1):
6244 if lines[linenum].endswith('\r'):
6245 lines[linenum] = lines[linenum].rstrip('\r')
6246 crlf_lines.append(linenum + 1)
6247 else:
6248 lf_lines.append(linenum + 1)
6250 except IOError:
6251 _cpplint_state.PrintError(
6252 "Skipping input '%s': Can't open for reading\n" % filename)
6253 _RestoreFilters()
6254 return
6256 # Note, if no dot is found, this will give the entire filename as the ext.
6257 file_extension = filename[filename.rfind('.') + 1:]
6259 # When reading from stdin, the extension is unknown, so no cpplint tests
6260 # should rely on the extension.
6261 if filename != '-' and file_extension not in GetAllExtensions():
6262 _cpplint_state.PrintError('Ignoring %s; not a valid file name '
6263 '(%s)\n' % (filename, ', '.join(GetAllExtensions())))
6264 else:
6265 ProcessFileData(filename, file_extension, lines, Error,
6266 extra_check_functions)
6268 # If end-of-line sequences are a mix of LF and CR-LF, issue
6269 # warnings on the lines with CR.
6271 # Don't issue any warnings if all lines are uniformly LF or CR-LF,
6272 # since critique can handle these just fine, and the style guide
6273 # doesn't dictate a particular end of line sequence.
6275 # We can't depend on os.linesep to determine what the desired
6276 # end-of-line sequence should be, since that will return the
6277 # server-side end-of-line sequence.
6278 if lf_lines and crlf_lines:
6279 # Warn on every line with CR. An alternative approach might be to
6280 # check whether the file is mostly CRLF or just LF, and warn on the
6281 # minority, we bias toward LF here since most tools prefer LF.
6282 for linenum in crlf_lines:
6283 Error(filename, linenum, 'whitespace/newline', 1,
6284 'Unexpected \\r (^M) found; better to use only \\n')
6286 _cpplint_state.PrintInfo('Done processing %s\n' % filename)
6287 _RestoreFilters()
6290 def PrintUsage(message):
6291 """Prints a brief usage string and exits, optionally with an error message.
6293 Args:
6294 message: The optional error message.
6296 sys.stderr.write(_USAGE)
6298 if message:
6299 sys.exit('\nFATAL ERROR: ' + message)
6300 else:
6301 sys.exit(0)
6304 def PrintCategories():
6305 """Prints a list of all the error-categories used by error messages.
6307 These are the categories used to filter messages via --filter.
6309 sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
6310 sys.exit(0)
6313 def ParseArguments(args):
6314 """Parses the command line arguments.
6316 This may set the output format and verbosity level as side-effects.
6318 Args:
6319 args: The command line arguments:
6321 Returns:
6322 The list of filenames to lint.
6324 try:
6325 (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
6326 'counting=',
6327 'filter=',
6328 'root=',
6329 'repository=',
6330 'linelength=',
6331 'extensions=',
6332 'exclude=',
6333 'headers=',
6334 'quiet',
6335 'recursive'])
6336 except getopt.GetoptError:
6337 PrintUsage('Invalid arguments.')
6339 verbosity = _VerboseLevel()
6340 output_format = _OutputFormat()
6341 filters = ''
6342 counting_style = ''
6343 recursive = False
6345 for (opt, val) in opts:
6346 if opt == '--help':
6347 PrintUsage(None)
6348 elif opt == '--output':
6349 if val not in ('emacs', 'vs7', 'eclipse', 'junit'):
6350 PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
6351 'and junit.')
6352 output_format = val
6353 elif opt == '--verbose':
6354 verbosity = int(val)
6355 elif opt == '--filter':
6356 filters = val
6357 if not filters:
6358 PrintCategories()
6359 elif opt == '--counting':
6360 if val not in ('total', 'toplevel', 'detailed'):
6361 PrintUsage('Valid counting options are total, toplevel, and detailed')
6362 counting_style = val
6363 elif opt == '--root':
6364 global _root
6365 _root = val
6366 elif opt == '--repository':
6367 global _repository
6368 _repository = val
6369 elif opt == '--linelength':
6370 global _line_length
6371 try:
6372 _line_length = int(val)
6373 except ValueError:
6374 PrintUsage('Line length must be digits.')
6375 elif opt == '--exclude':
6376 global _excludes
6377 if not _excludes:
6378 _excludes = set()
6379 _excludes.update(glob.glob(val))
6380 elif opt == '--extensions':
6381 global _valid_extensions
6382 try:
6383 _valid_extensions = set(val.split(','))
6384 except ValueError:
6385 PrintUsage('Extensions must be comma seperated list.')
6386 elif opt == '--headers':
6387 global _header_extensions
6388 try:
6389 _header_extensions = set(val.split(','))
6390 except ValueError:
6391 PrintUsage('Extensions must be comma seperated list.')
6392 elif opt == '--recursive':
6393 recursive = True
6394 elif opt == '--quiet':
6395 global _quiet
6396 _quiet = True
6398 if not filenames:
6399 PrintUsage('No files were specified.')
6401 if recursive:
6402 filenames = _ExpandDirectories(filenames)
6404 if _excludes:
6405 filenames = _FilterExcludedFiles(filenames)
6407 _SetOutputFormat(output_format)
6408 _SetVerboseLevel(verbosity)
6409 _SetFilters(filters)
6410 _SetCountingStyle(counting_style)
6412 return filenames
6414 def _ExpandDirectories(filenames):
6415 """Searches a list of filenames and replaces directories in the list with
6416 all files descending from those directories. Files with extensions not in
6417 the valid extensions list are excluded.
6419 Args:
6420 filenames: A list of files or directories
6422 Returns:
6423 A list of all files that are members of filenames or descended from a
6424 directory in filenames
6426 expanded = set()
6427 for filename in filenames:
6428 if not os.path.isdir(filename):
6429 expanded.add(filename)
6430 continue
6432 for root, _, files in os.walk(filename):
6433 for loopfile in files:
6434 fullname = os.path.join(root, loopfile)
6435 if fullname.startswith('.' + os.path.sep):
6436 fullname = fullname[len('.' + os.path.sep):]
6437 expanded.add(fullname)
6439 filtered = []
6440 for filename in expanded:
6441 if os.path.splitext(filename)[1][1:] in GetAllExtensions():
6442 filtered.append(filename)
6444 return filtered
6446 def _FilterExcludedFiles(filenames):
6447 """Filters out files listed in the --exclude command line switch. File paths
6448 in the switch are evaluated relative to the current working directory
6450 exclude_paths = [os.path.abspath(f) for f in _excludes]
6451 return [f for f in filenames if os.path.abspath(f) not in exclude_paths]
6453 def main():
6454 filenames = ParseArguments(sys.argv[1:])
6455 backup_err = sys.stderr
6456 try:
6457 # Change stderr to write with replacement characters so we don't die
6458 # if we try to print something containing non-ASCII characters.
6459 sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
6461 _cpplint_state.ResetErrorCounts()
6462 for filename in filenames:
6463 ProcessFile(filename, _cpplint_state.verbose_level)
6464 _cpplint_state.PrintErrorCounts()
6466 if _cpplint_state.output_format == 'junit':
6467 sys.stderr.write(_cpplint_state.FormatJUnitXML())
6469 finally:
6470 sys.stderr = backup_err
6472 sys.exit(_cpplint_state.error_count > 0)
6475 if __name__ == '__main__':
6476 main()