3 """The Tab Nanny despises ambiguous indentation. She knows no mercy."""
5 # Released to the public domain, by Tim Peters, 15 April 1998.
7 # XXX Note: this is now a standard library module.
8 # XXX The API needs to undergo changes however; the current code is too
9 # XXX script-like. This will be addressed later.
17 if not hasattr(tokenize
, 'NL'):
18 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
20 __all__
= ["check", "NannyNag", "process_tokens"]
28 sys
.stderr
.write(sep
+ str(arg
))
30 sys
.stderr
.write("\n")
33 global verbose
, filename_only
35 opts
, args
= getopt
.getopt(sys
.argv
[1:], "qv")
36 except getopt
.error
, msg
:
41 filename_only
= filename_only
+ 1
45 errprint("Usage:", sys
.argv
[0], "[-v] file_or_directory ...")
51 def __init__(self
, lineno
, msg
, line
):
52 self
.lineno
, self
.msg
, self
.line
= lineno
, msg
, line
61 if os
.path
.isdir(file) and not os
.path
.islink(file):
63 print "%s: listing directory" % `
file`
64 names
= os
.listdir(file)
66 fullname
= os
.path
.join(file, name
)
67 if (os
.path
.isdir(fullname
) and
68 not os
.path
.islink(fullname
) or
69 os
.path
.normcase(name
[-3:]) == ".py"):
76 errprint("%s: I/O Error: %s" % (`
file`
, str(msg
)))
80 print "checking", `
file`
, "..."
83 process_tokens(tokenize
.generate_tokens(f
.readline
))
85 except tokenize
.TokenError
, msg
:
86 errprint("%s: Token Error: %s" % (`
file`
, str(msg
)))
90 badline
= nag
.get_lineno()
93 print "%s: *** Line %d: trouble in tab city! ***" % (
95 print "offending line:", `line`
98 if ' ' in file: file = '"' + file + '"'
99 if filename_only
: print file
100 else: print file, badline
, `line`
104 print "%s: Clean bill of health." % `
file`
107 # the characters used for space and tab
112 # the original string
114 # the number of leading whitespace characters in raw
116 # the number of tabs in raw[:n]
118 # the normal form as a pair (count, trailing), where:
120 # a tuple such that raw[:n] contains count[i]
121 # instances of S * i + T
123 # the number of trailing spaces in raw[:n]
124 # It's A Theorem that m.indent_level(t) ==
125 # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
127 # true iff raw[:n] is of the form (T*)(S*)
129 def __init__(self
, ws
):
131 S
, T
= Whitespace
.S
, Whitespace
.T
142 count
= count
+ [0] * (b
- len(count
) + 1)
143 count
[b
] = count
[b
] + 1
149 self
.norm
= tuple(count
), b
150 self
.is_simple
= len(count
) <= 1
152 # return length of longest contiguous run of spaces (whether or not
154 def longest_run_of_spaces(self
):
155 count
, trailing
= self
.norm
156 return max(len(count
)-1, trailing
)
158 def indent_level(self
, tabsize
):
159 # count, il = self.norm
160 # for i in range(len(count)):
162 # il = il + (i/tabsize + 1)*tabsize * count[i]
166 # il = trailing + sum (i/ts + 1)*ts*count[i] =
167 # trailing + ts * sum (i/ts + 1)*count[i] =
168 # trailing + ts * sum i/ts*count[i] + count[i] =
169 # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
170 # trailing + ts * [(sum i/ts*count[i]) + num_tabs]
171 # and note that i/ts*count[i] is 0 when i < ts
173 count
, trailing
= self
.norm
175 for i
in range(tabsize
, len(count
)):
176 il
= il
+ i
/tabsize
* count
[i
]
177 return trailing
+ tabsize
* (il
+ self
.nt
)
179 # return true iff self.indent_level(t) == other.indent_level(t)
181 def equal(self
, other
):
182 return self
.norm
== other
.norm
184 # return a list of tuples (ts, i1, i2) such that
185 # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
186 # Intended to be used after not self.equal(other) is known, in which
187 # case it will return at least one witnessing tab size.
188 def not_equal_witness(self
, other
):
189 n
= max(self
.longest_run_of_spaces(),
190 other
.longest_run_of_spaces()) + 1
192 for ts
in range(1, n
+1):
193 if self
.indent_level(ts
) != other
.indent_level(ts
):
195 self
.indent_level(ts
),
196 other
.indent_level(ts
)) )
199 # Return true iff self.indent_level(t) < other.indent_level(t)
201 # The algorithm is due to Vincent Broman.
202 # Easy to prove it's correct.
204 # Trivial to prove n is sharp (consider T vs ST).
205 # Unknown whether there's a faster general way. I suspected so at
206 # first, but no longer.
207 # For the special (but common!) case where M and N are both of the
208 # form (T*)(S*), M.less(N) iff M.len() < N.len() and
209 # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
211 # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
212 def less(self
, other
):
213 if self
.n
>= other
.n
:
215 if self
.is_simple
and other
.is_simple
:
216 return self
.nt
<= other
.nt
217 n
= max(self
.longest_run_of_spaces(),
218 other
.longest_run_of_spaces()) + 1
219 # the self.n >= other.n test already did it for ts=1
220 for ts
in range(2, n
+1):
221 if self
.indent_level(ts
) >= other
.indent_level(ts
):
225 # return a list of tuples (ts, i1, i2) such that
226 # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
227 # Intended to be used after not self.less(other) is known, in which
228 # case it will return at least one witnessing tab size.
229 def not_less_witness(self
, other
):
230 n
= max(self
.longest_run_of_spaces(),
231 other
.longest_run_of_spaces()) + 1
233 for ts
in range(1, n
+1):
234 if self
.indent_level(ts
) >= other
.indent_level(ts
):
236 self
.indent_level(ts
),
237 other
.indent_level(ts
)) )
240 def format_witnesses(w
):
242 firsts
= map(lambda tup
: str(tup
[0]), w
)
243 prefix
= "at tab size"
245 prefix
= prefix
+ "s"
246 return prefix
+ " " + string
.join(firsts
, ', ')
248 def process_tokens(tokens
):
249 INDENT
= tokenize
.INDENT
250 DEDENT
= tokenize
.DEDENT
251 NEWLINE
= tokenize
.NEWLINE
252 JUNK
= tokenize
.COMMENT
, tokenize
.NL
253 indents
= [Whitespace("")]
256 for (type, token
, start
, end
, line
) in tokens
:
258 # a program statement, or ENDMARKER, will eventually follow,
259 # after some (possibly empty) run of tokens of the form
260 # (NL | COMMENT)* (INDENT | DEDENT+)?
261 # If an INDENT appears, setting check_equal is wrong, and will
262 # be undone when we see the INDENT.
267 thisguy
= Whitespace(token
)
268 if not indents
[-1].less(thisguy
):
269 witness
= indents
[-1].not_less_witness(thisguy
)
270 msg
= "indent not greater e.g. " + format_witnesses(witness
)
271 raise NannyNag(start
[0], msg
, line
)
272 indents
.append(thisguy
)
275 # there's nothing we need to check here! what's important is
276 # that when the run of DEDENTs ends, the indentation of the
277 # program statement (or ENDMARKER) that triggered the run is
278 # equal to what's left at the top of the indents stack
280 # Ouch! This assert triggers if the last line of the source
281 # is indented *and* lacks a newline -- then DEDENTs pop out
283 # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
288 elif check_equal
and type not in JUNK
:
289 # this is the first "real token" following a NEWLINE, so it
290 # must be the first token of the next program statement, or an
291 # ENDMARKER; the "line" argument exposes the leading whitespace
292 # for this statement; in the case of ENDMARKER, line is an empty
293 # string, so will properly match the empty string with which the
294 # "indents" stack was seeded
296 thisguy
= Whitespace(line
)
297 if not indents
[-1].equal(thisguy
):
298 witness
= indents
[-1].not_equal_witness(thisguy
)
299 msg
= "indent not equal e.g. " + format_witnesses(witness
)
300 raise NannyNag(start
[0], msg
, line
)
303 if __name__
== '__main__':