Avoid wildcard import

This commit is contained in:
Reid 'arrdem' McKenzie 2023-03-08 15:28:34 -07:00
parent eef3a17e55
commit 90df10f3a8

View file

@ -1,23 +1,23 @@
# (c) 2019 Paul Sokolovsky, MIT license # (c) 2019 Paul Sokolovsky, MIT license
from token import * import token
from collections import namedtuple from collections import namedtuple
import io import io
# Hacking in comments, newline and encoding as tokens
COMMENT = N_TOKENS + 0 COMMENT = token.N_TOKENS + 0
NL = N_TOKENS + 1 NL = token.N_TOKENS + 1
ENCODING = N_TOKENS + 2 ENCODING = token.N_TOKENS + 2
tok_name[COMMENT] = "COMMENT" token.tok_name[COMMENT] = "COMMENT"
tok_name[NL] = "NL" token.tok_name[NL] = "NL"
tok_name[ENCODING] = "ENCODING" token.tok_name[ENCODING] = "ENCODING"
class TokenInfo(namedtuple("TokenInfo", ("type", "string", "start", "end", "line"))): class TokenInfo(namedtuple("TokenInfo", ("type", "string", "start", "end", "line"))):
def __str__(self): def __str__(self):
return "TokenInfo(type=%d (%s), string=%r, startl=%d, line=%r)" % ( return "TokenInfo(type=%d (%s), string=%r, startl=%d, line=%r)" % (
self.type, self.type,
tok_name[self.type], token.tok_name[self.type],
self.string, self.string,
self.start, self.start,
self.line, self.line,
@ -108,11 +108,11 @@ def generate_tokens(readline):
if paren_level == 0: if paren_level == 0:
if i > indent_stack[-1]: if i > indent_stack[-1]:
yield TokenInfo(INDENT, org_l[:i], lineno, 0, org_l) yield TokenInfo(token.INDENT, org_l[:i], lineno, 0, org_l)
indent_stack.append(i) indent_stack.append(i)
elif i < indent_stack[-1]: elif i < indent_stack[-1]:
while i != indent_stack[-1]: while i != indent_stack[-1]:
yield TokenInfo(DEDENT, "", lineno, 0, org_l) yield TokenInfo(token.DEDENT, "", lineno, 0, org_l)
indent_stack.pop() indent_stack.pop()
while l: while l:
@ -152,7 +152,7 @@ def generate_tokens(readline):
if l.startswith("j"): if l.startswith("j"):
t += l[0] t += l[0]
l = l[1:] l = l[1:]
yield TokenInfo(NUMBER, t, lineno, 0, org_l) yield TokenInfo(token.NUMBER, t, lineno, 0, org_l)
elif l[0].isalpha() or l.startswith("_") or ord(l[0]) >= 0xAA: elif l[0].isalpha() or l.startswith("_") or ord(l[0]) >= 0xAA:
name = "" name = ""
while l and ( while l and (
@ -172,10 +172,10 @@ def generate_tokens(readline):
"f", "f",
): ):
s, l, lineno_delta = get_str(l, readline) s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(STRING, name + s, lineno, 0, org_l) yield TokenInfo(token.STRING, name + s, lineno, 0, org_l)
lineno += lineno_delta lineno += lineno_delta
else: else:
yield TokenInfo(NAME, name, lineno, 0, org_l) yield TokenInfo(token.NAME, name, lineno, 0, org_l)
elif l == "\\\n": elif l == "\\\n":
l = readline() l = readline()
lineno += 1 lineno += 1
@ -184,13 +184,13 @@ def generate_tokens(readline):
if paren_level > 0: if paren_level > 0:
yield TokenInfo(NL, nl, lineno, 0, org_l) yield TokenInfo(NL, nl, lineno, 0, org_l)
else: else:
yield TokenInfo(NEWLINE, nl, lineno, 0, org_l) yield TokenInfo(token.NEWLINE, nl, lineno, 0, org_l)
break break
elif l[0].isspace(): elif l[0].isspace():
l = l[1:] l = l[1:]
elif l.startswith('"') or l.startswith("'"): elif l.startswith('"') or l.startswith("'"):
s, l, lineno_delta = get_str(l, readline) s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(STRING, s, lineno, 0, org_l) yield TokenInfo(token.STRING, s, lineno, 0, org_l)
lineno += lineno_delta lineno += lineno_delta
elif l.startswith("#"): elif l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l) yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
@ -222,11 +222,11 @@ def generate_tokens(readline):
"->", "->",
): ):
if l.startswith(op): if l.startswith(op):
yield TokenInfo(OP, op, lineno, 0, org_l) yield TokenInfo(token.OP, op, lineno, 0, org_l)
l = l[len(op) :] l = l[len(op) :]
break break
else: else:
yield TokenInfo(OP, l[0], lineno, 0, org_l) yield TokenInfo(token.OP, l[0], lineno, 0, org_l)
if l[0] in ("(", "[", "{"): if l[0] in ("(", "[", "{"):
paren_level += 1 paren_level += 1
elif l[0] in (")", "]", "}"): elif l[0] in (")", "]", "}"):
@ -234,7 +234,7 @@ def generate_tokens(readline):
l = l[1:] l = l[1:]
while indent_stack[-1] > 0: while indent_stack[-1] > 0:
yield TokenInfo(DEDENT, "", lineno, 0, "") yield TokenInfo(token.DEDENT, "", lineno, 0, "")
indent_stack.pop() indent_stack.pop()
yield TokenInfo(ENDMARKER, "", lineno, 0, "") yield TokenInfo(token.ENDMARKER, "", lineno, 0, "")