summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Israelsson Tampe <stefan.itampe@gmail.com>2018-08-20 16:45:22 +0200
committerStefan Israelsson Tampe <stefan.itampe@gmail.com>2018-08-20 16:45:22 +0200
commita946830b2994786f84962a79e06e4c745a2c3bf5 (patch)
tree7331a948c52199c7b90626f060e1a2f367b611c8
parentc79b55f49d74a8a6d54940cde6969ff21c16067b (diff)
tokinizer compiles
-rw-r--r--modules/language/python/def.scm3
-rw-r--r--modules/language/python/module/tokenize.py17
2 files changed, 10 insertions, 10 deletions
diff --git a/modules/language/python/def.scm b/modules/language/python/def.scm
index fa4cbc5..f5466ad 100644
--- a/modules/language/python/def.scm
+++ b/modules/language/python/def.scm
@@ -1,6 +1,7 @@
(define-module (language python def)
#:use-module (oop pf-objects)
#:use-module (language python for)
+ #:use-module (language python list)
#:use-module (ice-9 match)
#:use-module (srfi srfi-11)
#:export (def lam py-apply))
@@ -147,6 +148,6 @@
#'(f a ...)
#'(apply f (let lp ((l (list (m* a) ...)))
(if (pair? l)
- (append (car l) (lp (cdr l)))
+ (append (to-list (car l)) (lp (cdr l)))
'()))))))))
diff --git a/modules/language/python/module/tokenize.py b/modules/language/python/module/tokenize.py
index 7e4012d..9cedcba 100644
--- a/modules/language/python/module/tokenize.py
+++ b/modules/language/python/module/tokenize.py
@@ -157,7 +157,7 @@ def _compile(expr):
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
-pk(1)
+
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
@@ -170,7 +170,7 @@ Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
-pk(2)
+
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
@@ -193,7 +193,7 @@ ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-pk(3)
+
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
@@ -213,14 +213,14 @@ for t in _all_string_prefixes():
single_quoted.add(u)
for u in (t + '"""', t + "'''"):
triple_quoted.add(u)
-pk(4)
+
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
-pk(5)
+
class Untokenizer:
def __init__(self):
@@ -315,7 +315,7 @@ class Untokenizer:
startline = False
toks_append(tokval)
-pk(6)
+
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
@@ -446,7 +446,7 @@ def detect_encoding(readline):
return default, [first, second]
-pk(7)
+
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
@@ -725,7 +725,6 @@ def _tokenize(readline, encoding):
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-pk(8)
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
@@ -790,4 +789,4 @@ def main():
except Exception as err:
perror("unexpected error: %s" % err)
raise
-pk(9)
+