[NO TESTS] WIP

This commit is contained in:
Reid 'arrdem' McKenzie 2023-03-15 00:45:32 -06:00
parent 48ae28f7d6
commit 6ce0e888b9
28 changed files with 157 additions and 8430 deletions

1
.bazelignore Normal file
View file

@ -0,0 +1 @@
.git

2
.bazelrc Normal file
View file

@ -0,0 +1,2 @@
test --test_output=errors
build --keep_going

1
.bazelversion Normal file
View file

@ -0,0 +1 @@
6.0.0

1
.envrc Normal file
View file

@ -0,0 +1 @@
export VIRTUAL_ENV=/home/arrdem/.virtualenvs/flowmetal

2
.gitignore vendored
View file

@ -1,2 +1,4 @@
/**/__pycache__ /**/__pycache__
/**/*.egg-info /**/*.egg-info
scratch
bazel-*

View file

@ -1,24 +0,0 @@
bazel_dep(name = "rules_python", version = "0.19.0")
pip = use_extension("@rules_python//python:extensions.bzl", "pip")
pip.parse(
name = "pypa",
requirements_lock = "//tools/python:requirements_lock.txt",
)
use_repo(pip, "pypa")
# (Optional) Register a specific python toolchain instead of using the host version
python = use_extension("@rules_python//python:extensions.bzl", "python")
python.toolchain(
name = "python3_10",
python_version = "3.10",
)
use_repo(python, "python3_10_toolchains")
register_toolchains(
"@python3_10_toolchains//:all",
)

View file

@ -1,16 +1,52 @@
workspace( workspace(
name = "arrdem_flowmetal", name = "flowmetal"
) )
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive( http_archive(
name = "rules_python", name = "bazel_skylib",
sha256 = "ffc7b877c95413c82bfd5482c017edcf759a6250d8b24e82f41f3c8b8d9e287e", sha256 = "b8a1527901774180afc798aeb28c4634bdccf19c4d98e7bdd1ce79d1fe9aaad7",
strip_prefix = "rules_python-0.19.0", urls = [
url = "https://github.com/bazelbuild/rules_python/releases/download/0.19.0/rules_python-0.19.0.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.4.1/bazel-skylib-1.4.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.4.1/bazel-skylib-1.4.1.tar.gz",
],
) )
load("@rules_python//python:repositories.bzl", "py_repositories") load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace")
py_repositories() bazel_skylib_workspace()
rules_python_version = "c504355672223144cefb2cbf3f69e2d38e7e2726"
http_archive(
name = "rules_python",
sha256 = "3f12b492dbf7d56b0e3deed81f21d56c3241babaa52d7eb525cb7c657bba9125",
strip_prefix = "rules_python-{}".format(rules_python_version),
url = "https://github.com/bazelbuild/rules_python/archive/{}.zip".format(rules_python_version),
)
load("@rules_python//python:repositories.bzl", "python_register_toolchains")
python_register_toolchains(
name = "python3_10",
python_version = "3.10",
)
load("@python3_10//:defs.bzl", python3_10="interpreter")
load("@rules_python//python:pip.bzl", "pip_parse")
# Create a central repo that knows about the dependencies needed from
# requirements_lock.txt.
pip_parse(
name = "pypi",
python_interpreter_target = python3_10,
requirements_lock = "//tools/python:requirements_lock.txt",
)
load("@pypi//:requirements.bzl", "install_deps")
install_deps()

File diff suppressed because it is too large Load diff

View file

@ -1,6 +0,0 @@
py_project(
name = "uast",
lib_deps = [
"//components/utokenize",
]
)

View file

@ -1,5 +0,0 @@
# pycopy-ast
A small parallel implementation of Python's `ast` module, vendored from `pycopy-ast==2.9.1`.
[pycopy-ast](https://pypi.org/project/pycopy-ast/) is released under the MIT license, copyright © Paul Sokolovsky 2021.

View file

@ -1,126 +0,0 @@
# (c) 2019 Paul Sokolovsky. MIT license.
from .types import *
def dump_to_stream(t, file):
if isinstance(t, AST):
file.write(type(t).__name__)
file.write("(")
comma = False
for k in t._fields:
if k.startswith("_"):
continue
res = ""
if comma:
res += ", "
res += k + "="
file.write(res)
dump_to_stream(getattr(t, k, None), file)
comma = True
file.write(")")
elif isinstance(t, list):
file.write("[")
comma = False
for v in t:
if comma:
file.write(", ")
dump_to_stream(v, file)
comma = True
file.write("]")
else:
file.write(repr(t))
def dump(t):
import io
buf = io.StringIO()
dump_to_stream(t, buf)
return buf.getvalue()
def iter_fields(t):
for k in t._fields:
if k.startswith("_"):
continue
yield (k, getattr(t, k, None))
def copy_location(new_node, old_node):
return new_node
def parse_tokens(token_stream, filename="<unknown>", mode="exec"):
import utokenize as tokenize
from . import parser
p = parser.Parser(token_stream)
p.match(tokenize.ENCODING)
if mode == "exec":
t = p.match_mod()
elif mode == "eval":
t = Expression(body=p.require_expr())
elif mode == "single":
t = Interactive(body=p.match_stmt())
else:
raise ValueError
return t
def parse_stream(stream, filename="<unknown>", mode="exec"):
import utokenize as tokenize
tstream = tokenize.tokenize(stream.readline)
return parse_tokens(tstream)
def parse(source, filename="<unknown>", mode="exec"):
import io
return parse_stream(io.StringIO(source), filename, mode)
class NodeVisitor:
def visit(self, node):
n = node.__class__.__name__
m = getattr(self, "visit_" + n, None)
if m:
return m(node)
else:
return self.generic_visit(node)
def generic_visit(self, node):
for f in node._fields:
val = getattr(node, f)
if isinstance(val, list):
for v in val:
if isinstance(v, AST):
self.visit(v)
elif isinstance(val, AST):
self.visit(val)
class NodeTransformer(NodeVisitor):
def generic_visit(self, node):
for f in node._fields:
val = getattr(node, f)
if isinstance(val, list):
newl = []
for v in val:
if not isinstance(v, AST):
newl.append(v)
continue
newv = self.visit(v)
if newv is None:
pass
elif isinstance(newv, list):
newl.extend(newv)
else:
newl.append(newv)
setattr(node, f, newl)
elif isinstance(val, AST):
newv = self.visit(val)
setattr(node, f, newv)
return node

File diff suppressed because it is too large Load diff

View file

@ -1,467 +0,0 @@
# (c) 2019 Paul Sokolovsky. MIT license.
class AST:
def __init__(self, **fields):
for k, v in fields.items():
setattr(self, k, v)
class mod(AST):
pass
class Module(mod):
_fields = ("body",)
class Interactive(mod):
_fields = ("body",)
class Expression(mod):
_fields = ("body",)
class Suite(mod):
_fields = ("body",)
class stmt(AST):
pass
class FunctionDef(stmt):
_fields = ("name", "args", "body", "decorator_list", "returns")
class AsyncFunctionDef(stmt):
_fields = ("name", "args", "body", "decorator_list", "returns")
class ClassDef(stmt):
_fields = ("name", "bases", "keywords", "body", "decorator_list")
class Return(stmt):
_fields = ("value",)
class Delete(stmt):
_fields = ("targets",)
class Assign(stmt):
_fields = ("targets", "value")
class AugAssign(stmt):
_fields = ("target", "op", "value")
class AnnAssign(stmt):
_fields = ("target", "annotation", "value", "simple")
class For(stmt):
_fields = ("target", "iter", "body", "orelse")
class AsyncFor(stmt):
_fields = ("target", "iter", "body", "orelse")
class While(stmt):
_fields = ("test", "body", "orelse")
class If(stmt):
_fields = ("test", "body", "orelse")
class With(stmt):
_fields = ("items", "body")
class AsyncWith(stmt):
_fields = ("items", "body")
class Raise(stmt):
_fields = ("exc", "cause")
class Try(stmt):
_fields = ("body", "handlers", "orelse", "finalbody")
class Assert(stmt):
_fields = ("test", "msg")
class Import(stmt):
_fields = ("names",)
class ImportFrom(stmt):
_fields = ("module", "names", "level")
class Global(stmt):
_fields = ("names",)
class Nonlocal(stmt):
_fields = ("names",)
class Expr(stmt):
_fields = ("value",)
class Pass(stmt):
_fields = ()
class Break(stmt):
_fields = ()
class Continue(stmt):
_fields = ()
class expr(AST):
pass
class BoolOp(expr):
_fields = ("op", "values")
class BinOp(expr):
_fields = ("left", "op", "right")
class UnaryOp(expr):
_fields = ("op", "operand")
class Lambda(expr):
_fields = ("args", "body")
class IfExp(expr):
_fields = ("test", "body", "orelse")
class Dict(expr):
_fields = ("keys", "values")
class Set(expr):
_fields = ("elts",)
class ListComp(expr):
_fields = ("elt", "generators")
class SetComp(expr):
_fields = ("elt", "generators")
class DictComp(expr):
_fields = ("key", "value", "generators")
class GeneratorExp(expr):
_fields = ("elt", "generators")
class Await(expr):
_fields = ("value",)
class Yield(expr):
_fields = ("value",)
class YieldFrom(expr):
_fields = ("value",)
class Compare(expr):
_fields = ("left", "ops", "comparators")
class Call(expr):
_fields = ("func", "args", "keywords")
class Num(expr):
_fields = ("n",)
class Str(expr):
_fields = ("s",)
class FormattedValue(expr):
_fields = ("value", "conversion", "format_spec")
class JoinedStr(expr):
_fields = ("values",)
class Bytes(expr):
_fields = ("s",)
class NameConstant(expr):
_fields = ("value",)
class Ellipsis(expr):
_fields = ()
class Constant(expr):
_fields = ("value",)
class Attribute(expr):
_fields = ("value", "attr", "ctx")
class Subscript(expr):
_fields = ("value", "slice", "ctx")
class Starred(expr):
_fields = ("value", "ctx")
class Name(expr):
_fields = ("id", "ctx")
class List(expr):
_fields = ("elts", "ctx")
class Tuple(expr):
_fields = ("elts", "ctx")
class expr_context(AST):
pass
class Load(expr_context):
_fields = ()
class Store(expr_context):
_fields = ()
class StoreConst(expr_context):
_fields = ()
class Del(expr_context):
_fields = ()
class AugLoad(expr_context):
_fields = ()
class AugStore(expr_context):
_fields = ()
class Param(expr_context):
_fields = ()
class slice(AST):
pass
class Slice(slice):
_fields = ("lower", "upper", "step")
class ExtSlice(slice):
_fields = ("dims",)
class Index(slice):
_fields = ("value",)
class boolop(AST):
pass
class And(boolop):
_fields = ()
class Or(boolop):
_fields = ()
class operator(AST):
pass
class Add(operator):
_fields = ()
class Sub(operator):
_fields = ()
class Mult(operator):
_fields = ()
class MatMult(operator):
_fields = ()
class Div(operator):
_fields = ()
class Mod(operator):
_fields = ()
class Pow(operator):
_fields = ()
class LShift(operator):
_fields = ()
class RShift(operator):
_fields = ()
class BitOr(operator):
_fields = ()
class BitXor(operator):
_fields = ()
class BitAnd(operator):
_fields = ()
class FloorDiv(operator):
_fields = ()
class unaryop(AST):
pass
class Invert(unaryop):
_fields = ()
class Not(unaryop):
_fields = ()
class UAdd(unaryop):
_fields = ()
class USub(unaryop):
_fields = ()
class cmpop(AST):
pass
class Eq(cmpop):
_fields = ()
class NotEq(cmpop):
_fields = ()
class Lt(cmpop):
_fields = ()
class LtE(cmpop):
_fields = ()
class Gt(cmpop):
_fields = ()
class GtE(cmpop):
_fields = ()
class Is(cmpop):
_fields = ()
class IsNot(cmpop):
_fields = ()
class In(cmpop):
_fields = ()
class NotIn(cmpop):
_fields = ()
class comprehension(AST):
_fields = ("target", "iter", "ifs", "is_async")
class excepthandler(AST):
pass
class ExceptHandler(excepthandler):
_fields = ("type", "name", "body")
class arguments(AST):
_fields = ("args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults")
class arg(AST):
_fields = ("arg", "annotation")
class keyword(AST):
_fields = ("arg", "value")
class alias(AST):
_fields = ("name", "asname")
class withitem(AST):
_fields = ("context_expr", "optional_vars")

File diff suppressed because it is too large Load diff

View file

@ -1,3 +0,0 @@
py_project(
name = "utokenize"
)

View file

@ -1,5 +0,0 @@
# pycopy-utokenize
A small Python parser, vendored from `pycopy-utokenize==2.0`.
[pycopy-utokenize](https://pypi.org/project/pycopy-utokenize/) is released under the MIT license, copyright © Paul Sokolovsky 2021.

View file

@ -1,240 +0,0 @@
# (c) 2019 Paul Sokolovsky, MIT license
import token
from collections import namedtuple
import io
# Hacking in comments, newline and encoding as tokens
COMMENT = token.N_TOKENS + 0
NL = token.N_TOKENS + 1
ENCODING = token.N_TOKENS + 2
token.tok_name[COMMENT] = "COMMENT"
token.tok_name[NL] = "NL"
token.tok_name[ENCODING] = "ENCODING"
class TokenInfo(namedtuple("TokenInfo", ("type", "string", "start", "end", "line"))):
def __str__(self):
return "TokenInfo(type=%d (%s), string=%r, startl=%d, line=%r)" % (
self.type,
token.tok_name[self.type],
self.string,
self.start,
self.line,
)
def get_indent(l):
for i in range(len(l)):
if l[i] != " " and l[i] != "\t":
return i, l[i:]
def get_str(l, readline):
lineno = 0
s = io.StringIO()
if l.startswith('"""') or l.startswith("'''"):
sep = l[0:3]
s += sep
l = l[3:]
pos = 0
while True:
i = l.find(sep, pos)
if i >= 0:
if i > 0 and l[i - 1] == "\\":
pos = i + 1
continue
break
s += l
l = readline()
pos = 0
assert l
lineno += 1
s += l[: i + 3]
return s.getvalue(), l[i + 3 :], lineno
lbuf = io.StringIO(l)
sep = lbuf.read(1)
s += sep
while True:
c = lbuf.read(1)
if not c:
break
s += c
if c == "\\":
c = lbuf.read(1)
s += c
if c == "\n":
lbuf = io.StringIO(readline())
lineno += 1
continue
elif c == sep:
break
return s.getvalue(), lbuf.read(), lineno
def generate_tokens(readline):
indent_stack = [0]
lineno = 0
paren_level = 0
no_newline = False
# generate_tokens() doesn't yield this, only tokenine() does.
# yield TokenInfo(ENCODING, "utf-8", 0, 0, "")
while True:
l = readline()
lineno += 1
org_l = l
if not l:
break
if not l.endswith("\n"):
l += "\n"
no_newline = True
i, l = get_indent(l)
if l == "\n":
yield TokenInfo(NL, l, lineno, 0, org_l)
continue
elif l == "\x0c\n":
yield TokenInfo(NL, "\n", lineno, 0, org_l)
continue
if l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
yield TokenInfo(NL, "\n", lineno, 0, org_l)
continue
if paren_level == 0:
if i > indent_stack[-1]:
yield TokenInfo(token.INDENT, org_l[:i], lineno, 0, org_l)
indent_stack.append(i)
elif i < indent_stack[-1]:
while i != indent_stack[-1]:
yield TokenInfo(token.DEDENT, "", lineno, 0, org_l)
indent_stack.pop()
while l:
if l[0].isdigit() or (l.startswith(".") and len(l) > 1 and l[1].isdigit()):
seen_dot = False
t = ""
if l.startswith("0x") or l.startswith("0X"):
t = "0x"
l = l[2:]
elif l.startswith("0o") or l.startswith("0O"):
t = "0o"
l = l[2:]
elif l.startswith("0b") or l.startswith("0B"):
t = "0b"
l = l[2:]
while l and (
l[0].isdigit()
or l[0] == "."
or l[0] == "_"
or (t.startswith("0x") and l[0] in "ABCDEFabcdef")
):
if l[0] == ".":
if seen_dot:
break
seen_dot = True
t += l[0]
l = l[1:]
if l.startswith("e") or l.startswith("E"):
t += l[0]
l = l[1:]
if l[0] in ("+", "-"):
t += l[0]
l = l[1:]
while l and (l[0].isdigit() or l[0] == "_"):
t += l[0]
l = l[1:]
if l.startswith("j"):
t += l[0]
l = l[1:]
yield TokenInfo(token.NUMBER, t, lineno, 0, org_l)
elif l[0].isalpha() or l.startswith("_") or ord(l[0]) >= 0xAA:
name = ""
while l and (
l[0].isalpha()
or l[0].isdigit()
or l.startswith("_")
or ord(l[0]) >= 0xAA
):
name += l[0]
l = l[1:]
if (l.startswith('"') or l.startswith("'")) and name in (
"b",
"r",
"rb",
"br",
"u",
"f",
):
s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(token.STRING, name + s, lineno, 0, org_l)
lineno += lineno_delta
else:
yield TokenInfo(token.NAME, name, lineno, 0, org_l)
elif l == "\\\n":
l = readline()
lineno += 1
elif l[0] == "\n":
nl = "" if no_newline else "\n"
if paren_level > 0:
yield TokenInfo(NL, nl, lineno, 0, org_l)
else:
yield TokenInfo(token.NEWLINE, nl, lineno, 0, org_l)
break
elif l[0].isspace():
l = l[1:]
elif l.startswith('"') or l.startswith("'"):
s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(token.STRING, s, lineno, 0, org_l)
lineno += lineno_delta
elif l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
l = "\n"
else:
for op in (
"**=",
"//=",
">>=",
"<<=",
"+=",
"-=",
"*=",
"/=",
"%=",
"@=",
"&=",
"|=",
"^=",
"**",
"//",
"<<",
">>",
"==",
"!=",
">=",
"<=",
"...",
"->",
):
if l.startswith(op):
yield TokenInfo(token.OP, op, lineno, 0, org_l)
l = l[len(op) :]
break
else:
yield TokenInfo(token.OP, l[0], lineno, 0, org_l)
if l[0] in ("(", "[", "{"):
paren_level += 1
elif l[0] in (")", "]", "}"):
paren_level -= 1
l = l[1:]
while indent_stack[-1] > 0:
yield TokenInfo(token.DEDENT, "", lineno, 0, "")
indent_stack.pop()
yield TokenInfo(token.ENDMARKER, "", lineno, 0, "")

File diff suppressed because it is too large Load diff

8
pyproject.toml Normal file
View file

@ -0,0 +1,8 @@
[tool.isort]
py_version=311
line_length=100
skip_glob = [
".git/*",
".bazel/*",
"bazel-*",
]

View file

@ -7,13 +7,12 @@
# Copyright (c) 2019 Paul Sokolovsky, published under the MIT License # Copyright (c) 2019 Paul Sokolovsky, published under the MIT License
import ast import ast
import builtins
import logging import logging
import os import os
import sys import sys
import builtins
from typing import Optional, Type from typing import Optional, Type
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -54,6 +53,7 @@ class ModuleNS(ANamespace):
# parent: Optional["ModuleNS"] = None # parent: Optional["ModuleNS"] = None
pass pass
class FunctionNS(ANamespace): class FunctionNS(ANamespace):
pass pass
@ -181,6 +181,7 @@ class InterpModule:
# namespace tree and the some sort of cursor or address into the AST under interpretation # namespace tree and the some sort of cursor or address into the AST under interpretation
# representing where to resume. The logical equivalent of a program counter, but a tree path. # representing where to resume. The logical equivalent of a program counter, but a tree path.
class ModuleInterpreter(StrictNodeVisitor): class ModuleInterpreter(StrictNodeVisitor):
"""An interpreter specific to a single module.""" """An interpreter specific to a single module."""

View file

@ -19,7 +19,6 @@ print(a.baz)
import random import random
for _ in range(10): for _ in range(10):
print(random.randint(0, 1024)) print(random.randint(0, 1024))
@ -30,5 +29,4 @@ def bar(a, b, **bs):
import requests import requests
print(len(requests.get("https://pypi.org/pypi/requests/json").text)) print(len(requests.get("https://pypi.org/pypi/requests/json").text))

View file

@ -1,47 +0,0 @@
load("@bazel_skylib//rules:copy_file.bzl",
"copy_file",
)
def cp(name, src, **kwargs):
"""A slightly more convenient cp() rule. Name and out should always be the same."""
rule_name = name.replace(".", "_").replace(":", "/").replace("//", "").replace("/", "_")
copy_file(
name = rule_name,
src = src,
out = name,
**kwargs
)
return rule_name
def _copy_filegroup_impl(ctx):
all_outputs = []
for t in ctx.attr.deps:
t_prefix = t.label.package
for f in t.files.to_list():
# Strip out the source prefix...
path = f.short_path.replace(t_prefix + "/", "")
out = ctx.actions.declare_file(path)
print(ctx.attr.name, t.label, f, " => ", path)
all_outputs += [out]
ctx.actions.run_shell(
outputs=[out],
inputs=depset([f]),
arguments=[f.path, out.path],
command="cp $1 $2"
)
return [
DefaultInfo(
files=depset(all_outputs),
runfiles=ctx.runfiles(files=all_outputs))
]
copy_filegroups = rule(
implementation=_copy_filegroup_impl,
attrs={
"deps": attr.label_list(),
},
)

View file

@ -10,15 +10,6 @@ load("//tools/python:defs.bzl",
"py_project", "py_project",
) )
load("@pypa//:requirements.bzl", load("@pypi//:requirements.bzl",
py_requirement="requirement" py_requirement="requirement"
) )
load("@bazel_skylib//rules:copy_file.bzl",
"copy_file",
)
load("//tools/build_rules:cp.bzl",
"cp",
"copy_filegroups"
)

View file

@ -2,7 +2,7 @@ load("@rules_python//python:defs.bzl",
"py_runtime_pair", "py_runtime_pair",
) )
load("@arrdem_source_pypi//:requirements.bzl", "all_requirements") load("@pypi//:requirements.bzl", "all_requirements")
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
@ -13,6 +13,7 @@ exports_files([
"bzl_pytest_shim.py", "bzl_pytest_shim.py",
"bzl_unittest_shim.py", "bzl_unittest_shim.py",
"pythonshim", "pythonshim",
"requirements_lock.txt",
]) ])
py_runtime( py_runtime(
@ -40,8 +41,5 @@ py_pytest(
srcs = [ srcs = [
"test_licenses.py", "test_licenses.py",
], ],
data = [
"requirements.txt",
],
deps = all_requirements, deps = all_requirements,
) )

View file

@ -4,7 +4,6 @@ import sys
import pytest import pytest
if __name__ == "__main__": if __name__ == "__main__":
cmdline = ["--ignore=external"] + sys.argv[1:] cmdline = ["--ignore=external"] + sys.argv[1:]
print(cmdline, file=sys.stderr) print(cmdline, file=sys.stderr)

View file

@ -1,4 +1,4 @@
load("@arrdem_source_pypi//:requirements.bzl", load("@pypi//:requirements.bzl",
_py_requirement = "requirement" _py_requirement = "requirement"
) )

View file

@ -1,4 +1,5 @@
attrs==22.2.0 attrs==22.2.0
autoflake8==0.4.0
black==23.1.0 black==23.1.0
cattrs==22.2.0 cattrs==22.2.0
click==8.1.3 click==8.1.3
@ -6,6 +7,7 @@ coverage==7.2.1
exceptiongroup==1.1.0 exceptiongroup==1.1.0
hypothesis==6.68.2 hypothesis==6.68.2
iniconfig==2.0.0 iniconfig==2.0.0
isort==5.12.0
jedi==0.18.2 jedi==0.18.2
mypy-extensions==1.0.0 mypy-extensions==1.0.0
packaging==23.0 packaging==23.0
@ -14,6 +16,7 @@ pathspec==0.11.0
platformdirs==3.1.0 platformdirs==3.1.0
pluggy==1.0.0 pluggy==1.0.0
pudb==2022.1.3 pudb==2022.1.3
pyflakes==3.0.1
Pygments==2.14.0 Pygments==2.14.0
pytest==7.2.2 pytest==7.2.2
pytest-cov==4.0.0 pytest-cov==4.0.0

View file

@ -4,12 +4,8 @@ Validate 3rdparty library licenses as approved.
import re import re
from pkg_resources import (
DistInfoDistribution,
working_set,
)
import pytest import pytest
from pkg_resources import DistInfoDistribution, working_set
# Licenses approved as representing non-copyleft and not precluding commercial usage. # Licenses approved as representing non-copyleft and not precluding commercial usage.
# This is all easy, there's a good schema here. # This is all easy, there's a good schema here.
@ -57,11 +53,7 @@ LICENSES_BY_LOWERNAME.update(
) )
# As a workaround for packages which don"t have correct meadata on PyPi, hand-verified packages # As a workaround for packages which don"t have correct meadata on PyPi, hand-verified packages
APPROVED_PACKAGES = [ APPROVED_PACKAGES = []
"yamllint", # WARNING: YAMLLINT IS GLP3"d.
"Flask_Log_Request_ID", # MIT, currently depended on as a git dep.
"anosql", # BSD
]
def bash_license(ln): def bash_license(ln):