summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Israelsson Tampe <stefan.itampe@gmail.com>2018-08-20 16:29:23 +0200
committerStefan Israelsson Tampe <stefan.itampe@gmail.com>2018-08-20 16:29:23 +0200
commitc79b55f49d74a8a6d54940cde6969ff21c16067b (patch)
treeeecb2f2e675a26a5e0d43efcf4790488f077e229
parent03f46d99946975e1bd8baccfad4709a3e9d4354d (diff)
tokenize.py
-rw-r--r--modules/language/python/dict.scm9
-rw-r--r--modules/language/python/module/_python.scm16
-rw-r--r--modules/language/python/module/codecs.py50
-rw-r--r--modules/language/python/module/itertools.scm4
-rw-r--r--modules/language/python/module/token.py141
-rw-r--r--modules/language/python/module/tokenize.py793
6 files changed, 1005 insertions, 8 deletions
diff --git a/modules/language/python/dict.scm b/modules/language/python/dict.scm
index 4c94f2e..be857ba 100644
--- a/modules/language/python/dict.scm
+++ b/modules/language/python/dict.scm
@@ -312,6 +312,15 @@
#t)))))
(define-py (py-items items o)
+ (<module>
+ (to-pylist
+ (let ((l '()))
+ (module-for-each
+ (lambda (k v)
+ (set! l (cons (list (symbol->string k) (variable-ref v)) l)))
+ o)
+ l)))
+
(<hashtable>
(to-pylist
(hash-fold
diff --git a/modules/language/python/module/_python.scm b/modules/language/python/module/_python.scm
index 4f96611..531a6b5 100644
--- a/modules/language/python/module/_python.scm
+++ b/modules/language/python/module/_python.scm
@@ -127,12 +127,16 @@
#f))))
(define-method (isinstance x y)
- (if (null? y)
- #f
- (if (pair? y)
- (or (isinstance x (car y))
- (isinstance x (cdr y)))
- (is-a? x y))))
+ (cond
+ ((null? y)
+ #f)
+ ((pair? y)
+ (or (isinstance x (car y))
+ (isinstance x (cdr y))))
+ (else
+ (catch #t
+ (lambda () (is-a? x y))
+ (lambda x #f)))))
(define-method (isinstance (i <integer>) y)
(if (issubclass y int)
diff --git a/modules/language/python/module/codecs.py b/modules/language/python/module/codecs.py
new file mode 100644
index 0000000..00d2715
--- /dev/null
+++ b/modules/language/python/module/codecs.py
@@ -0,0 +1,50 @@
+module(codecs)
+
+import sys
+
+__all__ = ["lookup", "BOM", "BOM_BE",
+ "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
+ "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
+ "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
+ ]
+
+
+# UTF-8
+BOM_UTF8 = b'\xef\xbb\xbf'
+
+# UTF-16, little endian
+BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
+
+# UTF-16, big endian
+BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
+
+# UTF-32, little endian
+BOM_UTF32_LE = b'\xff\xfe\x00\x00'
+
+# UTF-32, big endian
+BOM_UTF32_BE = b'\x00\x00\xfe\xff'
+
+if sys.byteorder == 'little':
+
+ # UTF-16, native endianness
+ BOM = BOM_UTF16 = BOM_UTF16_LE
+
+ # UTF-32, native endianness
+ BOM_UTF32 = BOM_UTF32_LE
+
+else:
+
+ # UTF-16, native endianness
+ BOM = BOM_UTF16 = BOM_UTF16_BE
+
+ # UTF-32, native endianness
+ BOM_UTF32 = BOM_UTF32_BE
+
+# Old broken names (don't use in new code)
+BOM32_LE = BOM_UTF16_LE
+BOM32_BE = BOM_UTF16_BE
+BOM64_LE = BOM_UTF32_LE
+BOM64_BE = BOM_UTF32_BE
+
+def lookup(x):
+ return x
diff --git a/modules/language/python/module/itertools.scm b/modules/language/python/module/itertools.scm
index b5b3dbf..f3d7ef2 100644
--- a/modules/language/python/module/itertools.scm
+++ b/modules/language/python/module/itertools.scm
@@ -10,7 +10,7 @@
#:export (count cycle repeat accumulate chain compress dropwhile
filterfalse groupby isslice starmap takewhile
- tee zip_longest product permutation combination
+ tee zip_longest product permutations combination
combination_with_replacement))
(define count
@@ -218,7 +218,7 @@
#:final (cons (cadr x.y) l)))
(yield (reverse ret)))))))))
-(def (permutation it (= r None))
+(def (permutations it (= r None))
((make-generator ()
(lambda (yield)
(let* ((ll (for ((x : it)) ((l '())) (cons x l) #:final (reverse l)))
diff --git a/modules/language/python/module/token.py b/modules/language/python/module/token.py
new file mode 100644
index 0000000..48343c4
--- /dev/null
+++ b/modules/language/python/module/token.py
@@ -0,0 +1,141 @@
+module(token)
+"""Token constants (from "token.h")."""
+
+__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
+
+# This file is automatically generated; please don't muck it up!
+#
+# To update the symbols in this file, 'cd' to the top directory of
+# the python source tree after building the interpreter and run:
+#
+# ./python Lib/token.py
+
+#--start constants--
+ENDMARKER = 0
+NAME = 1
+NUMBER = 2
+STRING = 3
+NEWLINE = 4
+INDENT = 5
+DEDENT = 6
+LPAR = 7
+RPAR = 8
+LSQB = 9
+RSQB = 10
+COLON = 11
+COMMA = 12
+SEMI = 13
+PLUS = 14
+MINUS = 15
+STAR = 16
+SLASH = 17
+VBAR = 18
+AMPER = 19
+LESS = 20
+GREATER = 21
+EQUAL = 22
+DOT = 23
+PERCENT = 24
+LBRACE = 25
+RBRACE = 26
+EQEQUAL = 27
+NOTEQUAL = 28
+LESSEQUAL = 29
+GREATEREQUAL = 30
+TILDE = 31
+CIRCUMFLEX = 32
+LEFTSHIFT = 33
+RIGHTSHIFT = 34
+DOUBLESTAR = 35
+PLUSEQUAL = 36
+MINEQUAL = 37
+STAREQUAL = 38
+SLASHEQUAL = 39
+PERCENTEQUAL = 40
+AMPEREQUAL = 41
+VBAREQUAL = 42
+CIRCUMFLEXEQUAL = 43
+LEFTSHIFTEQUAL = 44
+RIGHTSHIFTEQUAL = 45
+DOUBLESTAREQUAL = 46
+DOUBLESLASH = 47
+DOUBLESLASHEQUAL = 48
+AT = 49
+ATEQUAL = 50
+RARROW = 51
+ELLIPSIS = 52
+OP = 53
+AWAIT = 54
+ASYNC = 55
+ERRORTOKEN = 56
+N_TOKENS = 57
+NT_OFFSET = 256
+#--end constants--
+
+tok_name = {value: name
+ for name, value in globals().items()
+ if isinstance(value, int) and not name.startswith('_')}
+__all__.extend(tok_name.values())
+
+def ISTERMINAL(x):
+ return x < NT_OFFSET
+
+def ISNONTERMINAL(x):
+ return x >= NT_OFFSET
+
+def ISEOF(x):
+ return x == ENDMARKER
+
+
+def _main():
+ import re
+ import sys
+ args = sys.argv[1:]
+ inFileName = args and args[0] or "Include/token.h"
+ outFileName = "Lib/token.py"
+ if len(args) > 1:
+ outFileName = args[1]
+ try:
+ fp = open(inFileName)
+ except OSError as err:
+ sys.stdout.write("I/O error: %s\n" % str(err))
+ sys.exit(1)
+ with fp:
+ lines = fp.read().split("\n")
+ prog = re.compile(
+ "#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
+ re.IGNORECASE)
+ tokens = {}
+ for line in lines:
+ match = prog.match(line)
+ if match:
+ name, val = match.group(1, 2)
+ val = int(val)
+ tokens[val] = name # reverse so we can sort them...
+ keys = sorted(tokens.keys())
+ # load the output skeleton from the target:
+ try:
+ fp = open(outFileName)
+ except OSError as err:
+ sys.stderr.write("I/O error: %s\n" % str(err))
+ sys.exit(2)
+ with fp:
+ format = fp.read().split("\n")
+ try:
+ start = format.index("#--start constants--") + 1
+ end = format.index("#--end constants--")
+ except ValueError:
+ sys.stderr.write("target does not contain format markers")
+ sys.exit(3)
+ lines = []
+ for val in keys:
+ lines.append("%s = %d" % (tokens[val], val))
+ format[start:end] = lines
+ try:
+ fp = open(outFileName, 'w')
+ except OSError as err:
+ sys.stderr.write("I/O error: %s\n" % str(err))
+ sys.exit(4)
+ with fp:
+ fp.write("\n".join(format))
+
diff --git a/modules/language/python/module/tokenize.py b/modules/language/python/module/tokenize.py
new file mode 100644
index 0000000..7e4012d
--- /dev/null
+++ b/modules/language/python/module/tokenize.py
@@ -0,0 +1,793 @@
+module(tokenize)
+
+"""Tokenization help for Python programs.
+
+tokenize(readline) is a generator that breaks a stream of bytes into
+Python tokens. It decodes the bytes according to PEP-0263 for
+determining source file encoding.
+
+It accepts a readline-like method which is called repeatedly to get the
+next line of input (or b"" for EOF). It generates 5-tuples with these
+members:
+
+ the token type (see token.py)
+ the token (a string)
+ the starting (row, column) indices of the token (a 2-tuple of ints)
+ the ending (row, column) indices of the token (a 2-tuple of ints)
+ the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators. Additionally, all token lists start with an ENCODING token
+which tells you which encoding was used to decode the bytes stream.
+"""
+
+__author__ = 'Ka-Ping Yee <ping@lfw.org>'
+__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
+ 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
+ 'Michael Foord')
+from builtins import open as _builtin_open
+from codecs import lookup, BOM_UTF8
+import collections
+from io import TextIOWrapper
+from itertools import chain
+import itertools as _itertools
+import re
+import sys
+from token import *
+
+cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
+blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
+
+import token
+__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
+ "NL", "untokenize", "ENCODING", "TokenInfo"]
+del token
+
+COMMENT = N_TOKENS
+tok_name[COMMENT] = 'COMMENT'
+NL = N_TOKENS + 1
+tok_name[NL] = 'NL'
+ENCODING = N_TOKENS + 2
+tok_name[ENCODING] = 'ENCODING'
+N_TOKENS += 3
+EXACT_TOKEN_TYPES = {
+ '(': LPAR,
+ ')': RPAR,
+ '[': LSQB,
+ ']': RSQB,
+ ':': COLON,
+ ',': COMMA,
+ ';': SEMI,
+ '+': PLUS,
+ '-': MINUS,
+ '*': STAR,
+ '/': SLASH,
+ '|': VBAR,
+ '&': AMPER,
+ '<': LESS,
+ '>': GREATER,
+ '=': EQUAL,
+ '.': DOT,
+ '%': PERCENT,
+ '{': LBRACE,
+ '}': RBRACE,
+ '==': EQEQUAL,
+ '!=': NOTEQUAL,
+ '<=': LESSEQUAL,
+ '>=': GREATEREQUAL,
+ '~': TILDE,
+ '^': CIRCUMFLEX,
+ '<<': LEFTSHIFT,
+ '>>': RIGHTSHIFT,
+ '**': DOUBLESTAR,
+ '+=': PLUSEQUAL,
+ '-=': MINEQUAL,
+ '*=': STAREQUAL,
+ '/=': SLASHEQUAL,
+ '%=': PERCENTEQUAL,
+ '&=': AMPEREQUAL,
+ '|=': VBAREQUAL,
+ '^=': CIRCUMFLEXEQUAL,
+ '<<=': LEFTSHIFTEQUAL,
+ '>>=': RIGHTSHIFTEQUAL,
+ '**=': DOUBLESTAREQUAL,
+ '//': DOUBLESLASH,
+ '//=': DOUBLESLASHEQUAL,
+ '@': AT,
+ '@=': ATEQUAL,
+}
+
+class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
+ def __repr__(self):
+ annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
+ return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
+ self._replace(type=annotated_type))
+
+ @property
+ def exact_type(self):
+ if self.type == OP and self.string in EXACT_TOKEN_TYPES:
+ return EXACT_TOKEN_TYPES[self.string]
+ else:
+ return self.type
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+# Note: we use unicode matching for names ("\w") but ascii matching for
+# number literals.
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'\w+'
+
+Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
+Binnumber = r'0[bB](?:_?[01])+'
+Octnumber = r'0[oO](?:_?[0-7])+'
+Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
+Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
+Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
+ r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
+Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Return the empty string, plus all of the valid string prefixes.
+def _all_string_prefixes():
+ # The valid string prefixes. Only contain the lower case versions,
+ # and don't contain any permuations (include 'fr', but not
+ # 'rf'). The various permutations will be generated.
+ _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
+ # if we add binary f-strings, add: ['fb', 'fbr']
+ result = set([''])
+ for prefix in _valid_string_prefixes:
+ for t in _itertools.permutations(prefix):
+ # create a list with upper and lower versions of each
+ # character
+ for u in _itertools.product(*[(c, c.upper()) for c in t]):
+ result.add(''.join(u))
+ return result
+
+def _compile(expr):
+ return re.compile(expr, re.UNICODE)
+
+# Note that since _all_string_prefixes includes the empty string,
+# StringPrefix can be the empty string (making it optional).
+StringPrefix = group(*_all_string_prefixes())
+pk(1)
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group(StringPrefix + "'''", StringPrefix + '"""')
+# Single-line ' or " string.
+String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+pk(2)
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
+ r"//=?", r"->",
+ r"[+\-*/%&@|^=<>]=?",
+ r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ group("'", r'\\\r?\n'),
+ StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+pk(3)
+# For a given string prefix plus quotes, endpats maps it to a regex
+# to match the remainder of that string. _prefix can be empty, for
+# a normal single or triple quoted string (with no prefix).
+endpats = {}
+for _prefix in _all_string_prefixes():
+ endpats[_prefix + "'"] = Single
+ endpats[_prefix + '"'] = Double
+ endpats[_prefix + "'''"] = Single3
+ endpats[_prefix + '"""'] = Double3
+
+# A set of all of the single and triple quoted string prefixes,
+# including the opening quotes.
+single_quoted = set()
+triple_quoted = set()
+for t in _all_string_prefixes():
+ for u in (t + '"', t + "'"):
+ single_quoted.add(u)
+ for u in (t + '"""', t + "'''"):
+ triple_quoted.add(u)
+pk(4)
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+pk(5)
+class Untokenizer:
+
+ def __init__(self):
+ self.tokens = []
+ self.prev_row = 1
+ self.prev_col = 0
+ self.encoding = None
+
+ def add_whitespace(self, start):
+ row, col = start
+ if row < self.prev_row or row == self.prev_row and col < self.prev_col:
+ raise ValueError("start ({},{}) precedes previous end ({},{})"
+ .format(row, col, self.prev_row, self.prev_col))
+ row_offset = row - self.prev_row
+ if row_offset:
+ self.tokens.append("\\\n" * row_offset)
+ self.prev_col = 0
+ col_offset = col - self.prev_col
+ if col_offset:
+ self.tokens.append(" " * col_offset)
+
+ def untokenize(self, iterable):
+ it = iter(iterable)
+ indents = []
+ startline = False
+ for t in it:
+ if len(t) == 2:
+ self.compat(t, it)
+ break
+ tok_type, token, start, end, line = t
+ if tok_type == ENCODING:
+ self.encoding = token
+ continue
+ if tok_type == ENDMARKER:
+ break
+ if tok_type == INDENT:
+ indents.append(token)
+ continue
+ elif tok_type == DEDENT:
+ indents.pop()
+ self.prev_row, self.prev_col = end
+ continue
+ elif tok_type in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ indent = indents[-1]
+ if start[1] >= len(indent):
+ self.tokens.append(indent)
+ self.prev_col = len(indent)
+ startline = False
+ self.add_whitespace(start)
+ self.tokens.append(token)
+ self.prev_row, self.prev_col = end
+ if tok_type in (NEWLINE, NL):
+ self.prev_row += 1
+ self.prev_col = 0
+ return "".join(self.tokens)
+
+ def compat(self, token, iterable):
+ indents = []
+ toks_append = self.tokens.append
+ startline = token[0] in (NEWLINE, NL)
+ prevstring = False
+
+ for tok in chain([token], iterable):
+ toknum, tokval = tok[:2]
+ if toknum == ENCODING:
+ self.encoding = tokval
+ continue
+
+ if toknum in (NAME, NUMBER, ASYNC, AWAIT):
+ tokval += ' '
+
+ # Insert a space between two consecutive strings
+ if toknum == STRING:
+ if prevstring:
+ tokval = ' ' + tokval
+ prevstring = True
+ else:
+ prevstring = False
+
+ if toknum == INDENT:
+ indents.append(tokval)
+ continue
+ elif toknum == DEDENT:
+ indents.pop()
+ continue
+ elif toknum in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ toks_append(indents[-1])
+ startline = False
+ toks_append(tokval)
+
+pk(6)
+def untokenize(iterable):
+ """Transform tokens back into Python source code.
+ It returns a bytes object, encoded using the ENCODING
+ token, which is the first token sequence output by tokenize.
+
+ Each element returned by the iterable must be a token sequence
+ with at least two elements, a token number and token value. If
+ only two tokens are passed, the resulting output is poor.
+
+ Round-trip invariant for full input:
+ Untokenized source will match input source exactly
+
+ Round-trip invariant for limited input:
+ # Output bytes will tokenize back to the input
+ t1 = [tok[:2] for tok in tokenize(f.readline)]
+ newcode = untokenize(t1)
+ readline = BytesIO(newcode).readline
+ t2 = [tok[:2] for tok in tokenize(readline)]
+ assert t1 == t2
+ """
+ ut = Untokenizer()
+ out = ut.untokenize(iterable)
+ if ut.encoding is not None:
+ out = out.encode(ut.encoding)
+ return out
+
+
+def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
+def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argument, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ try:
+ filename = readline.__self__.name
+ except AttributeError:
+ filename = None
+ bom_found = False
+ encoding = None
+ default = 'utf-8'
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return b''
+
+ def find_cookie(line):
+ try:
+ # Decode as UTF-8. Either the line is an encoding declaration,
+ # in which case it should be pure ASCII, or it must be UTF-8
+ # per default encoding.
+ line_string = line.decode('utf-8')
+ except UnicodeDecodeError:
+ msg = "invalid or missing encoding declaration"
+ if filename is not None:
+ msg = '{} for {!r}'.format(msg, filename)
+ raise SyntaxError(msg)
+
+ match = cookie_re.match(line_string)
+ if not match:
+ return None
+ encoding = _get_normal_name(match.group(1))
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ if filename is None:
+ msg = "unknown encoding: " + encoding
+ else:
+ msg = "unknown encoding for {!r}: {}".format(filename,
+ encoding)
+ raise SyntaxError(msg)
+
+ if bom_found:
+ if encoding != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ if filename is None:
+ msg = 'encoding problem: utf-8'
+ else:
+ msg = 'encoding problem for {!r}: utf-8'.format(filename)
+ raise SyntaxError(msg)
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default, []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+ if not blank_re.match(first):
+ return default, [first]
+
+ second = read_or_stop()
+ if not second:
+ return default, [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return default, [first, second]
+
+pk(7)
+def open(filename):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = _builtin_open(filename, 'rb')
+ try:
+ encoding, lines = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+ except:
+ buffer.close()
+ raise
+
+
+def tokenize(readline):
+ """
+ The tokenize() generator requires one argument, readline, which
+ must be a callable object which provides the same interface as the
+ readline() method of built-in file objects. Each call to the function
+ should return one line of input as bytes. Alternatively, readline
+ can be a callable function terminating with StopIteration:
+ readline = open(myfile, 'rb').__next__ # Example of alternate readline
+
+ The generator produces 5-tuples with these members: the token type; the
+ token string; a 2-tuple (srow, scol) of ints specifying the row and
+ column where the token begins in the source; a 2-tuple (erow, ecol) of
+ ints specifying the row and column where the token ends in the source;
+ and the line on which the token was found. The line passed is the
+ logical line; continuation lines are included.
+
+ The first token sequence will always be an ENCODING token
+ which tells you which encoding was used to decode the bytes stream.
+ """
+ # This import is here to avoid problems when the itertools module is not
+ # built yet and tokenize is imported.
+ from itertools import chain, repeat
+ encoding, consumed = detect_encoding(readline)
+ rl_gen = iter(readline, b"")
+ empty = repeat(b"")
+ return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
+
+
+def _tokenize(readline, encoding):
+ lnum = parenlev = continued = 0
+ numchars = '0123456789'
+ contstr, needcont = '', 0
+ contline = None
+ indents = [0]
+
+ # 'stashed' and 'async_*' are used for async/await parsing
+ stashed = None
+ async_def = False
+ async_def_indent = 0
+ async_def_nl = False
+
+ if encoding is not None:
+ if encoding == "utf-8-sig":
+ # BOM will already have been stripped.
+ encoding = "utf-8"
+ yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
+ while True: # loop over lines in stream
+ try:
+ line = readline()
+ except StopIteration:
+ line = b''
+
+ if encoding is not None:
+ line = line.decode(encoding)
+ lnum += 1
+ pos, max = 0, len(line)
+
+ if contstr: # continued string
+ if not line:
+ raise TokenError("EOF in multi-line string", strstart)
+ endmatch = endprog.match(line)
+ if endmatch:
+ pos = end = endmatch.end(0)
+ yield TokenInfo(STRING, contstr + line[:end],
+ strstart, (lnum, end), contline + line)
+ contstr, needcont = '', 0
+ contline = None
+ elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+ yield TokenInfo(ERRORTOKEN, contstr + line,
+ strstart, (lnum, len(line)), contline)
+ contstr = ''
+ contline = None
+ continue
+ else:
+ contstr = contstr + line
+ contline = contline + line
+ continue
+
+ elif parenlev == 0 and not continued: # new statement
+ if not line: break
+ column = 0
+ while pos < max: # measure leading whitespace
+ if line[pos] == ' ':
+ column += 1
+ elif line[pos] == '\t':
+ column = (column//tabsize + 1)*tabsize
+ elif line[pos] == '\f':
+ column = 0
+ else:
+ break
+ pos += 1
+ if pos == max:
+ break
+
+ if line[pos] in '#\r\n': # skip comments or blank lines
+ if line[pos] == '#':
+ comment_token = line[pos:].rstrip('\r\n')
+ nl_pos = pos + len(comment_token)
+ yield TokenInfo(COMMENT, comment_token,
+ (lnum, pos), (lnum, pos + len(comment_token)), line)
+ yield TokenInfo(NL, line[nl_pos:],
+ (lnum, nl_pos), (lnum, len(line)), line)
+ else:
+ yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
+ (lnum, pos), (lnum, len(line)), line)
+ continue
+
+ if column > indents[-1]: # count indents or dedents
+ indents.append(column)
+ yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+ while column < indents[-1]:
+ if column not in indents:
+ raise IndentationError(
+ "unindent does not match any outer indentation level",
+ ("<tokenize>", lnum, pos, line))
+ indents = indents[:-1]
+
+ if async_def and async_def_indent >= indents[-1]:
+ async_def = False
+ async_def_nl = False
+ async_def_indent = 0
+
+ yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+ if async_def and async_def_nl and async_def_indent >= indents[-1]:
+ async_def = False
+ async_def_nl = False
+ async_def_indent = 0
+
+ else: # continued statement
+ if not line:
+ raise TokenError("EOF in multi-line statement", (lnum, 0))
+ continued = 0
+
+ while pos < max:
+ pseudomatch = _compile(PseudoToken).match(line, pos)
+ if pseudomatch: # scan for tokens
+ start, end = pseudomatch.span(1)
+ spos, epos, pos = (lnum, start), (lnum, end), end
+ if start == end:
+ continue
+ token, initial = line[start:end], line[start]
+
+ if (initial in numchars or # ordinary number
+ (initial == '.' and token != '.' and token != '...')):
+ yield TokenInfo(NUMBER, token, spos, epos, line)
+ elif initial in '\r\n':
+ if stashed:
+ yield stashed
+ stashed = None
+ if parenlev > 0:
+ yield TokenInfo(NL, token, spos, epos, line)
+ else:
+ yield TokenInfo(NEWLINE, token, spos, epos, line)
+ if async_def:
+ async_def_nl = True
+
+ elif initial == '#':
+ assert not token.endswith("\n")
+ if stashed:
+ yield stashed
+ stashed = None
+ yield TokenInfo(COMMENT, token, spos, epos, line)
+
+ elif token in triple_quoted:
+ endprog = _compile(endpats[token])
+ endmatch = endprog.match(line, pos)
+ if endmatch: # all on one line
+ pos = endmatch.end(0)
+ token = line[start:pos]
+ yield TokenInfo(STRING, token, spos, (lnum, pos), line)
+ else:
+ strstart = (lnum, start) # multiple lines
+ contstr = line[start:]
+ contline = line
+ break
+
+ # Check up to the first 3 chars of the token to see if
+ # they're in the single_quoted set. If so, they start
+ # a string.
+ # We're using the first 3, because we're looking for
+ # "rb'" (for example) at the start of the token. If
+ # we switch to longer prefixes, this needs to be
+ # adjusted.
+ # Note that initial == token[:1].
+ # Also note that single quote checking must come after
+ # triple quote checking (above).
+ elif (initial in single_quoted or
+ token[:2] in single_quoted or
+ token[:3] in single_quoted):
+ if token[-1] == '\n': # continued string
+ strstart = (lnum, start)
+ # Again, using the first 3 chars of the
+ # token. This is looking for the matching end
+ # regex for the correct type of quote
+ # character. So it's really looking for
+ # endpats["'"] or endpats['"'], by trying to
+ # skip string prefix characters, if any.
+ endprog = _compile(endpats.get(initial) or
+ endpats.get(token[1]) or
+ endpats.get(token[2]))
+ contstr, needcont = line[start:], 1
+ contline = line
+ break
+ else: # ordinary string
+ yield TokenInfo(STRING, token, spos, epos, line)
+
+ elif initial.isidentifier(): # ordinary name
+ if token in ('async', 'await'):
+ if async_def:
+ yield TokenInfo(
+ ASYNC if token == 'async' else AWAIT,
+ token, spos, epos, line)
+ continue
+
+ tok = TokenInfo(NAME, token, spos, epos, line)
+ if token == 'async' and not stashed:
+ stashed = tok
+ continue
+
+ if token == 'def':
+ if (stashed
+ and stashed.type == NAME
+ and stashed.string == 'async'):
+
+ async_def = True
+ async_def_indent = indents[-1]
+
+ yield TokenInfo(ASYNC, stashed.string,
+ stashed.start, stashed.end,
+ stashed.line)
+ stashed = None
+
+ if stashed:
+ yield stashed
+ stashed = None
+
+ yield tok
+ elif initial == '\\': # continued stmt
+ continued = 1
+ else:
+ if initial in '([{':
+ parenlev += 1
+ elif initial in ')]}':
+ parenlev -= 1
+ if stashed:
+ yield stashed
+ stashed = None
+ yield TokenInfo(OP, token, spos, epos, line)
+ else:
+ yield TokenInfo(ERRORTOKEN, line[pos],
+ (lnum, pos), (lnum, pos+1), line)
+ pos += 1
+
+ if stashed:
+ yield stashed
+ stashed = None
+
+ for indent in indents[1:]: # pop remaining indent levels
+ yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
+ yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+pk(8)
+# An undocumented, backwards compatible, API for all the places in the standard
+# library that expect to be able to use tokenize with strings
+def generate_tokens(readline):
+ return _tokenize(readline, None)
+
+def main():
+ import argparse
+
+ # Helper error handling routines
+ def perror(message):
+ print(message, file=sys.stderr)
+
+ def error(message, filename=None, location=None):
+ if location:
+ args = (filename,) + location + (message,)
+ perror("%s:%d:%d: error: %s" % args)
+ elif filename:
+ perror("%s: error: %s" % (filename, message))
+ else:
+ perror("error: %s" % message)
+ sys.exit(1)
+
+ # Parse the arguments and options
+ parser = argparse.ArgumentParser(prog='python -m tokenize')
+ parser.add_argument(dest='filename', nargs='?',
+ metavar='filename.py',
+ help='the file to tokenize; defaults to stdin')
+ parser.add_argument('-e', '--exact', dest='exact', action='store_true',
+ help='display token names using the exact type')
+ args = parser.parse_args()
+
+ try:
+ # Tokenize the input
+ if args.filename:
+ filename = args.filename
+ with _builtin_open(filename, 'rb') as f:
+ tokens = list(tokenize(f.readline))
+ else:
+ filename = "<stdin>"
+ tokens = _tokenize(sys.stdin.readline, None)
+
+ # Output the tokenization
+ for token in tokens:
+ token_type = token.type
+ if args.exact:
+ token_type = token.exact_type
+ token_range = "%d,%d-%d,%d:" % (token.start + token.end)
+ print("%-20s%-15s%-15r" %
+ (token_range, tok_name[token_type], token.string))
+ except IndentationError as err:
+ line, column = err.args[1][1:3]
+ error(err.args[0], filename, (line, column))
+ except TokenError as err:
+ line, column = err.args[1]
+ error(err.args[0], filename, (line, column))
+ except SyntaxError as err:
+ error(err, filename)
+ except OSError as err:
+ error(err)
+ except KeyboardInterrupt:
+ print("interrupted\n")
+ except Exception as err:
+ perror("unexpected error: %s" % err)
+ raise
+pk(9)