Avoid wildcard import
This commit is contained in:
parent
eef3a17e55
commit
90df10f3a8
1 changed files with 20 additions and 20 deletions
|
@ -1,23 +1,23 @@
|
|||
# (c) 2019 Paul Sokolovsky, MIT license
|
||||
|
||||
from token import *
|
||||
import token
|
||||
from collections import namedtuple
|
||||
import io
|
||||
|
||||
|
||||
COMMENT = N_TOKENS + 0
|
||||
NL = N_TOKENS + 1
|
||||
ENCODING = N_TOKENS + 2
|
||||
tok_name[COMMENT] = "COMMENT"
|
||||
tok_name[NL] = "NL"
|
||||
tok_name[ENCODING] = "ENCODING"
|
||||
# Hacking in comments, newline and encoding as tokens
|
||||
COMMENT = token.N_TOKENS + 0
|
||||
NL = token.N_TOKENS + 1
|
||||
ENCODING = token.N_TOKENS + 2
|
||||
token.tok_name[COMMENT] = "COMMENT"
|
||||
token.tok_name[NL] = "NL"
|
||||
token.tok_name[ENCODING] = "ENCODING"
|
||||
|
||||
|
||||
class TokenInfo(namedtuple("TokenInfo", ("type", "string", "start", "end", "line"))):
|
||||
def __str__(self):
|
||||
return "TokenInfo(type=%d (%s), string=%r, startl=%d, line=%r)" % (
|
||||
self.type,
|
||||
tok_name[self.type],
|
||||
token.tok_name[self.type],
|
||||
self.string,
|
||||
self.start,
|
||||
self.line,
|
||||
|
@ -108,11 +108,11 @@ def generate_tokens(readline):
|
|||
|
||||
if paren_level == 0:
|
||||
if i > indent_stack[-1]:
|
||||
yield TokenInfo(INDENT, org_l[:i], lineno, 0, org_l)
|
||||
yield TokenInfo(token.INDENT, org_l[:i], lineno, 0, org_l)
|
||||
indent_stack.append(i)
|
||||
elif i < indent_stack[-1]:
|
||||
while i != indent_stack[-1]:
|
||||
yield TokenInfo(DEDENT, "", lineno, 0, org_l)
|
||||
yield TokenInfo(token.DEDENT, "", lineno, 0, org_l)
|
||||
indent_stack.pop()
|
||||
|
||||
while l:
|
||||
|
@ -152,7 +152,7 @@ def generate_tokens(readline):
|
|||
if l.startswith("j"):
|
||||
t += l[0]
|
||||
l = l[1:]
|
||||
yield TokenInfo(NUMBER, t, lineno, 0, org_l)
|
||||
yield TokenInfo(token.NUMBER, t, lineno, 0, org_l)
|
||||
elif l[0].isalpha() or l.startswith("_") or ord(l[0]) >= 0xAA:
|
||||
name = ""
|
||||
while l and (
|
||||
|
@ -172,10 +172,10 @@ def generate_tokens(readline):
|
|||
"f",
|
||||
):
|
||||
s, l, lineno_delta = get_str(l, readline)
|
||||
yield TokenInfo(STRING, name + s, lineno, 0, org_l)
|
||||
yield TokenInfo(token.STRING, name + s, lineno, 0, org_l)
|
||||
lineno += lineno_delta
|
||||
else:
|
||||
yield TokenInfo(NAME, name, lineno, 0, org_l)
|
||||
yield TokenInfo(token.NAME, name, lineno, 0, org_l)
|
||||
elif l == "\\\n":
|
||||
l = readline()
|
||||
lineno += 1
|
||||
|
@ -184,13 +184,13 @@ def generate_tokens(readline):
|
|||
if paren_level > 0:
|
||||
yield TokenInfo(NL, nl, lineno, 0, org_l)
|
||||
else:
|
||||
yield TokenInfo(NEWLINE, nl, lineno, 0, org_l)
|
||||
yield TokenInfo(token.NEWLINE, nl, lineno, 0, org_l)
|
||||
break
|
||||
elif l[0].isspace():
|
||||
l = l[1:]
|
||||
elif l.startswith('"') or l.startswith("'"):
|
||||
s, l, lineno_delta = get_str(l, readline)
|
||||
yield TokenInfo(STRING, s, lineno, 0, org_l)
|
||||
yield TokenInfo(token.STRING, s, lineno, 0, org_l)
|
||||
lineno += lineno_delta
|
||||
elif l.startswith("#"):
|
||||
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
|
||||
|
@ -222,11 +222,11 @@ def generate_tokens(readline):
|
|||
"->",
|
||||
):
|
||||
if l.startswith(op):
|
||||
yield TokenInfo(OP, op, lineno, 0, org_l)
|
||||
yield TokenInfo(token.OP, op, lineno, 0, org_l)
|
||||
l = l[len(op) :]
|
||||
break
|
||||
else:
|
||||
yield TokenInfo(OP, l[0], lineno, 0, org_l)
|
||||
yield TokenInfo(token.OP, l[0], lineno, 0, org_l)
|
||||
if l[0] in ("(", "[", "{"):
|
||||
paren_level += 1
|
||||
elif l[0] in (")", "]", "}"):
|
||||
|
@ -234,7 +234,7 @@ def generate_tokens(readline):
|
|||
l = l[1:]
|
||||
|
||||
while indent_stack[-1] > 0:
|
||||
yield TokenInfo(DEDENT, "", lineno, 0, "")
|
||||
yield TokenInfo(token.DEDENT, "", lineno, 0, "")
|
||||
indent_stack.pop()
|
||||
|
||||
yield TokenInfo(ENDMARKER, "", lineno, 0, "")
|
||||
yield TokenInfo(token.ENDMARKER, "", lineno, 0, "")
|
||||
|
|
Loading…
Reference in a new issue