tabnanny.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. #! /usr/bin/env python3
  2. """The Tab Nanny despises ambiguous indentation. She knows no mercy.
  3. tabnanny -- Detection of ambiguous indentation
  4. For the time being this module is intended to be called as a script.
  5. However it is possible to import it into an IDE and use the function
  6. check() described below.
  7. Warning: The API provided by this module is likely to change in future
  8. releases; such changes may not be backward compatible.
  9. """
  10. # Released to the public domain, by Tim Peters, 15 April 1998.
  11. # XXX Note: this is now a standard library module.
  12. # XXX The API needs to undergo changes however; the current code is too
  13. # XXX script-like. This will be addressed later.
  14. __version__ = "6"
  15. import os
  16. import sys
  17. import tokenize
  18. if not hasattr(tokenize, 'NL'):
  19. raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
  20. __all__ = ["check", "NannyNag", "process_tokens"]
  21. verbose = 0
  22. filename_only = 0
  23. def errprint(*args):
  24. sep = ""
  25. for arg in args:
  26. sys.stderr.write(sep + str(arg))
  27. sep = " "
  28. sys.stderr.write("\n")
  29. def main():
  30. import getopt
  31. global verbose, filename_only
  32. try:
  33. opts, args = getopt.getopt(sys.argv[1:], "qv")
  34. except getopt.error as msg:
  35. errprint(msg)
  36. return
  37. for o, a in opts:
  38. if o == '-q':
  39. filename_only = filename_only + 1
  40. if o == '-v':
  41. verbose = verbose + 1
  42. if not args:
  43. errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
  44. return
  45. for arg in args:
  46. check(arg)
  47. class NannyNag(Exception):
  48. """
  49. Raised by process_tokens() if detecting an ambiguous indent.
  50. Captured and handled in check().
  51. """
  52. def __init__(self, lineno, msg, line):
  53. self.lineno, self.msg, self.line = lineno, msg, line
  54. def get_lineno(self):
  55. return self.lineno
  56. def get_msg(self):
  57. return self.msg
  58. def get_line(self):
  59. return self.line
  60. def check(file):
  61. """check(file_or_dir)
  62. If file_or_dir is a directory and not a symbolic link, then recursively
  63. descend the directory tree named by file_or_dir, checking all .py files
  64. along the way. If file_or_dir is an ordinary Python source file, it is
  65. checked for whitespace related problems. The diagnostic messages are
  66. written to standard output using the print statement.
  67. """
  68. if os.path.isdir(file) and not os.path.islink(file):
  69. if verbose:
  70. print("%r: listing directory" % (file,))
  71. names = os.listdir(file)
  72. for name in names:
  73. fullname = os.path.join(file, name)
  74. if (os.path.isdir(fullname) and
  75. not os.path.islink(fullname) or
  76. os.path.normcase(name[-3:]) == ".py"):
  77. check(fullname)
  78. return
  79. try:
  80. f = tokenize.open(file)
  81. except OSError as msg:
  82. errprint("%r: I/O Error: %s" % (file, msg))
  83. return
  84. if verbose > 1:
  85. print("checking %r ..." % file)
  86. try:
  87. process_tokens(tokenize.generate_tokens(f.readline))
  88. except tokenize.TokenError as msg:
  89. errprint("%r: Token Error: %s" % (file, msg))
  90. return
  91. except IndentationError as msg:
  92. errprint("%r: Indentation Error: %s" % (file, msg))
  93. return
  94. except NannyNag as nag:
  95. badline = nag.get_lineno()
  96. line = nag.get_line()
  97. if verbose:
  98. print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
  99. print("offending line: %r" % (line,))
  100. print(nag.get_msg())
  101. else:
  102. if ' ' in file: file = '"' + file + '"'
  103. if filename_only: print(file)
  104. else: print(file, badline, repr(line))
  105. return
  106. finally:
  107. f.close()
  108. if verbose:
  109. print("%r: Clean bill of health." % (file,))
  110. class Whitespace:
  111. # the characters used for space and tab
  112. S, T = ' \t'
  113. # members:
  114. # raw
  115. # the original string
  116. # n
  117. # the number of leading whitespace characters in raw
  118. # nt
  119. # the number of tabs in raw[:n]
  120. # norm
  121. # the normal form as a pair (count, trailing), where:
  122. # count
  123. # a tuple such that raw[:n] contains count[i]
  124. # instances of S * i + T
  125. # trailing
  126. # the number of trailing spaces in raw[:n]
  127. # It's A Theorem that m.indent_level(t) ==
  128. # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
  129. # is_simple
  130. # true iff raw[:n] is of the form (T*)(S*)
  131. def __init__(self, ws):
  132. self.raw = ws
  133. S, T = Whitespace.S, Whitespace.T
  134. count = []
  135. b = n = nt = 0
  136. for ch in self.raw:
  137. if ch == S:
  138. n = n + 1
  139. b = b + 1
  140. elif ch == T:
  141. n = n + 1
  142. nt = nt + 1
  143. if b >= len(count):
  144. count = count + [0] * (b - len(count) + 1)
  145. count[b] = count[b] + 1
  146. b = 0
  147. else:
  148. break
  149. self.n = n
  150. self.nt = nt
  151. self.norm = tuple(count), b
  152. self.is_simple = len(count) <= 1
  153. # return length of longest contiguous run of spaces (whether or not
  154. # preceding a tab)
  155. def longest_run_of_spaces(self):
  156. count, trailing = self.norm
  157. return max(len(count)-1, trailing)
  158. def indent_level(self, tabsize):
  159. # count, il = self.norm
  160. # for i in range(len(count)):
  161. # if count[i]:
  162. # il = il + (i//tabsize + 1)*tabsize * count[i]
  163. # return il
  164. # quicker:
  165. # il = trailing + sum (i//ts + 1)*ts*count[i] =
  166. # trailing + ts * sum (i//ts + 1)*count[i] =
  167. # trailing + ts * sum i//ts*count[i] + count[i] =
  168. # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] =
  169. # trailing + ts * [(sum i//ts*count[i]) + num_tabs]
  170. # and note that i//ts*count[i] is 0 when i < ts
  171. count, trailing = self.norm
  172. il = 0
  173. for i in range(tabsize, len(count)):
  174. il = il + i//tabsize * count[i]
  175. return trailing + tabsize * (il + self.nt)
  176. # return true iff self.indent_level(t) == other.indent_level(t)
  177. # for all t >= 1
  178. def equal(self, other):
  179. return self.norm == other.norm
  180. # return a list of tuples (ts, i1, i2) such that
  181. # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
  182. # Intended to be used after not self.equal(other) is known, in which
  183. # case it will return at least one witnessing tab size.
  184. def not_equal_witness(self, other):
  185. n = max(self.longest_run_of_spaces(),
  186. other.longest_run_of_spaces()) + 1
  187. a = []
  188. for ts in range(1, n+1):
  189. if self.indent_level(ts) != other.indent_level(ts):
  190. a.append( (ts,
  191. self.indent_level(ts),
  192. other.indent_level(ts)) )
  193. return a
  194. # Return True iff self.indent_level(t) < other.indent_level(t)
  195. # for all t >= 1.
  196. # The algorithm is due to Vincent Broman.
  197. # Easy to prove it's correct.
  198. # XXXpost that.
  199. # Trivial to prove n is sharp (consider T vs ST).
  200. # Unknown whether there's a faster general way. I suspected so at
  201. # first, but no longer.
  202. # For the special (but common!) case where M and N are both of the
  203. # form (T*)(S*), M.less(N) iff M.len() < N.len() and
  204. # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
  205. # XXXwrite that up.
  206. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
  207. def less(self, other):
  208. if self.n >= other.n:
  209. return False
  210. if self.is_simple and other.is_simple:
  211. return self.nt <= other.nt
  212. n = max(self.longest_run_of_spaces(),
  213. other.longest_run_of_spaces()) + 1
  214. # the self.n >= other.n test already did it for ts=1
  215. for ts in range(2, n+1):
  216. if self.indent_level(ts) >= other.indent_level(ts):
  217. return False
  218. return True
  219. # return a list of tuples (ts, i1, i2) such that
  220. # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
  221. # Intended to be used after not self.less(other) is known, in which
  222. # case it will return at least one witnessing tab size.
  223. def not_less_witness(self, other):
  224. n = max(self.longest_run_of_spaces(),
  225. other.longest_run_of_spaces()) + 1
  226. a = []
  227. for ts in range(1, n+1):
  228. if self.indent_level(ts) >= other.indent_level(ts):
  229. a.append( (ts,
  230. self.indent_level(ts),
  231. other.indent_level(ts)) )
  232. return a
  233. def format_witnesses(w):
  234. firsts = (str(tup[0]) for tup in w)
  235. prefix = "at tab size"
  236. if len(w) > 1:
  237. prefix = prefix + "s"
  238. return prefix + " " + ', '.join(firsts)
  239. def process_tokens(tokens):
  240. INDENT = tokenize.INDENT
  241. DEDENT = tokenize.DEDENT
  242. NEWLINE = tokenize.NEWLINE
  243. JUNK = tokenize.COMMENT, tokenize.NL
  244. indents = [Whitespace("")]
  245. check_equal = 0
  246. for (type, token, start, end, line) in tokens:
  247. if type == NEWLINE:
  248. # a program statement, or ENDMARKER, will eventually follow,
  249. # after some (possibly empty) run of tokens of the form
  250. # (NL | COMMENT)* (INDENT | DEDENT+)?
  251. # If an INDENT appears, setting check_equal is wrong, and will
  252. # be undone when we see the INDENT.
  253. check_equal = 1
  254. elif type == INDENT:
  255. check_equal = 0
  256. thisguy = Whitespace(token)
  257. if not indents[-1].less(thisguy):
  258. witness = indents[-1].not_less_witness(thisguy)
  259. msg = "indent not greater e.g. " + format_witnesses(witness)
  260. raise NannyNag(start[0], msg, line)
  261. indents.append(thisguy)
  262. elif type == DEDENT:
  263. # there's nothing we need to check here! what's important is
  264. # that when the run of DEDENTs ends, the indentation of the
  265. # program statement (or ENDMARKER) that triggered the run is
  266. # equal to what's left at the top of the indents stack
  267. # Ouch! This assert triggers if the last line of the source
  268. # is indented *and* lacks a newline -- then DEDENTs pop out
  269. # of thin air.
  270. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
  271. check_equal = 1
  272. del indents[-1]
  273. elif check_equal and type not in JUNK:
  274. # this is the first "real token" following a NEWLINE, so it
  275. # must be the first token of the next program statement, or an
  276. # ENDMARKER; the "line" argument exposes the leading whitespace
  277. # for this statement; in the case of ENDMARKER, line is an empty
  278. # string, so will properly match the empty string with which the
  279. # "indents" stack was seeded
  280. check_equal = 0
  281. thisguy = Whitespace(line)
  282. if not indents[-1].equal(thisguy):
  283. witness = indents[-1].not_equal_witness(thisguy)
  284. msg = "indent not equal e.g. " + format_witnesses(witness)
  285. raise NannyNag(start[0], msg, line)
  286. if __name__ == '__main__':
  287. main()