From e4b15ac7e5a90482195f50fa28db3cd998a01ab4 Mon Sep 17 00:00:00 2001 From: Reid 'arrdem' McKenzie Date: Mon, 20 Mar 2023 18:50:00 -0600 Subject: [PATCH] [NO TESTS] WIP --- projects/flowmetal/NOTES.md | 251 ------------------ projects/flowmetal/README.md | 64 +---- projects/flowmetal/TODO.org | 46 ---- projects/flowmetal/TODO.org_archive | 39 --- projects/flowmetal/call_cc_airflow.md | 47 ---- projects/flowmetal/doc/manifesto.md | 30 --- projects/flowmetal/setup.py | 34 --- .../src/resources/zonefiles/tirefireind.us.j2 | 1 + projects/reqman/src/python/reqman/__main__.py | 1 + tools/python/requirements.txt | 129 ++++----- 10 files changed, 80 insertions(+), 562 deletions(-) delete mode 100644 projects/flowmetal/NOTES.md delete mode 100644 projects/flowmetal/TODO.org delete mode 100644 projects/flowmetal/TODO.org_archive delete mode 100644 projects/flowmetal/call_cc_airflow.md delete mode 100644 projects/flowmetal/doc/manifesto.md delete mode 100644 projects/flowmetal/setup.py diff --git a/projects/flowmetal/NOTES.md b/projects/flowmetal/NOTES.md deleted file mode 100644 index 3fec0c7..0000000 --- a/projects/flowmetal/NOTES.md +++ /dev/null @@ -1,251 +0,0 @@ -# Notes - -https://github.com/Pyrlang/Pyrlang -https://en.wikipedia.org/wiki/Single_system_image - -## Example - Await - -A common pattern working in distributed environments is to want to request another system perform a job and wait for its results. -There are lots of parallels here to making a function or RPC call, except that it's a distributed system with complex failure modes. - -In a perfect world we'd want to just write something like this - - -```python -#!/usr/bin/env python3.10 - -from service.client import Client - -CLIENT = Client("http://service.local", api_key="...") -job = client.create_job(...) -result = await job -# Do something with the result -``` - -There's some room for variance here around API design taste, but this snippet is probably familiar to many Python readers. -Let's think about its failure modes. - -First, that `await` is doing a lot of heavy lifting. -Presumably it's wrapping up a polling loop of some sort. -That may be acceptable in some circumstances, but it really leaves to the client library implementer the question of what an acceptable retry policy is. - -Second, this snippet assumes that `create_job` will succeed. -There won't be an authorization error, or a network transit error, or a remote server error or anything like that. - -Third, there's no other record of whatever `job` is. -If the Python interpreter running this program dies, or the user gets bored and `C-c`'s it or the computer encounters a problem, the job will be lost. -Maybe that's OK, maybe it isn't. -But it's a risk. - -Now, let's think about taking on some of the complexity needed to solve these problems ourselves. - -### Retrying challenges - -We can manually write the retry loop polling a remote API. - -``` python -#!/usr/bin/env python3.10 - -from datetime import datetime, timedelta - -from service.client import Client - - -CLIENT = Client("http://service.local", api_key="...") -AWAIT_TIMEOUT = timedelta(minutes=30) -POLL_TIME = timedelta(seconds=10) - - -def sleep(duration=POLL_TIME): - """A slightly more useful sleep. Has our default and does coercion.""" - from time import sleep - if isinstance(duration, timedelta): - duration = duration.total_seconds() - sleep(duration) - - -# Create a job, assuming idempotence -while True: - try: - job = client.create_job(...) - start_time = datetime.now() - break - except: - sleep() - -# Waiting for the job -while True: - # Time-based timeout - if datetime.now() - start_time > AWAIT_TIMEOUT: - raise TimeoutError - - # Checking the job status, no backoff linear polling - try: - if not job.complete(): - continue - except: - sleep() - continue - - # Trying to read the job result, re-using the retry loop & total timeout machinery - try: - result = job.get() - break - except: - sleep() - continue - -# Do something with the result -``` - -We could pull [retrying](https://pypi.org/project/retrying/) off the shelf and get some real mileage here. -`retrying` is a super handy little library that provides the `@retry` decorator, which implements a variety of common retrying concerns such as retrying N times with linear or exponential back-off, and such. -It's really just the `while/try/except` state machine we just wrote a couple times as a decorator. - -``` python -#!/usr/bin/env python3.10 - -from datetime import datetime, timedelta - -from retrying import retry - -from service.client import Client - - -CLIENT = Client("http://service.local", api_key="...") -AWAIT_TIMEOUT = timedelta(minutes=30) -POLL_TIME = timedelta(seconds=10) - - -class StillWaitingException(Exception): - """Something we can throw to signal we're still waiting on an external event.""" - - -@retry(wait_fixed=POLL_TIME.total_milliseconds()) -def r_create_job(client): - """R[eliable] create job. Retries over exceptions forever with a delay. No jitter.""" - return client.create_job() - - -@retry(stop_max_delay=AWAIT_TIMEOUT.total_milliseconds(), - wait_fixed=POLL_TIME.total_milliseconds()) -def r_get_job(job): - """R[eliable] get job. Retries over exceptions up to a total time with a delay. No jitter.""" - if not job.complete(): - raise StillWaitingException - - return job.get() - - -job = r_create_job(client) -result = r_get_job(job) -# Do something with the result -``` - -That's pretty good! -We've preserved most of our direct control over the mechanical retrying behavior, we can tweak it or choose a different provider. -And we've managed to get the syntactic density of the original `await` example back ... almost. - -This is where Python's lack of an anonymous function block syntax and other lexical structures becomes a sharp limiter. -In another language like Javascript or LUA, you could probably get this down to something like - - -``` lua --- retry is a function of retrying options to a function of a callable to retry --- which returns a zero-argument callable which will execute the callable with --- the retrying behavior as specified. - -client = Client("http://service.local", api_key="...") -retry_config = {} -- Fake, obviously -with_retry = retry(retry_config) - -job = with_retry( - funtion () - return client.start_plan(...) - end)() - -result = with_retry( - function() - if job.complete() then - return job.get() - end - end)() -``` - -The insight here is that the "callback" function we're defining in the Python example as `r_get_job` and so forth has no intrinsic need to be named. -In fact choosing the arbitrary names `r_get_job` and `r_create_job` puts more load on the programmer and the reader. -Python's lack of block anonymous procedures precludes us from cramming the `if complete then get` operation or anything more complex into a `lambda` without some serious syntax crimes. - -Using [PEP-0342](https://www.python.org/dev/peps/pep-0342/#new-generator-method-send-value), it's possible to implement arbitrary coroutines in Python by `.send()`ing values to generators which may treat `yield` statements as rvalues for receiving remotely sent inputs. -This makes it possible to explicitly yield control to a remote interpreter, which will return or resume the couroutine with a result value. - -Microsoft's [Durable Functions](https://docs.microsoft.com/en-us/azure/azure-functions/durable/durable-functions-overview?tabs=python) use exactly this behavior to implement durable functions. -The "functions" provided by the API return sentinels which can be yielded to an external interpreter, which triggers processing and returns control when there are results. -This is [interpreter effect conversion pattern (Extensible Effects)](http://okmij.org/ftp/Haskell/extensible/exteff.pdf) as seen in Haskell and other tools; applied. - - -``` python -import azure.functions as func -import azure.durable_functions as df - -def orchestrator_function(context: df.DurableOrchestrationContext): - x = yield context.call_activity("F1", None) - y = yield context.call_activity("F2", x) - z = yield context.call_activity("F3", y) - result = yield context.call_activity("F4", z) - return result - -main = df.Orchestrator.create(orchestrator_function) -``` - -Now it would seem that you could "just" automate doing rewriting that to something like this - - -``` python -@df.Durable -def main(ctx): - x = context.call_activity("F1", None) - y = context.call_activity("F2", x) - z = context.call_activity("F3", y) - return context.call_activity("F4", z) -``` - -There's some prior art for doing this (https://eigenfoo.xyz/manipulating-python-asts/, https://greentreesnakes.readthedocs.io/en/latest/manipulating.html#modifying-the-tree) but it's a lot of legwork for not much. -There are also some pretty gaping correctness holes in taking the decorator based rewriting approach; -how do you deal with rewriting imported code, or code that's in classes/behind `@property` and other such tricks? - -Just not worth it. - -Now, what we _can_ do is try to hijack the entire Python interpreter to implement the properties/tracing/history recording we want there. -The default cpython lacks hooks for doing this, but we can write a python-in-python interpreter and "lift" the user's program into an interpreter we control, which ultimately gets most of its behavior "for free" from the underlying cpython interpreter. -There's [an example](https://github.com/pfalcon/pyastinterp) of doing this as part of the pycopy project; although there it's more of a Scheme-style proof of metacircular self-hosting. - -There's a modified copy of the astinterp in `scratch/` which is capable of running a considerable subset of py2/3.9 to the point of being able to source-import many libraries including `requests` and run PyPi sourced library code along with user code under hoisted interpretation. - -It doesn't support coroutines/generators yet, and there's some machinery required to make it "safe" (meaningfully single-stepable; "fix"/support eval, enable user-defined import/`__import__` through the lifted python VM) but as a proof of concept of a lifted VM I'm genuinely shocked how well this works. - -Next questions here revolve around how to "snapshot" the state of the interpreter meaningfully, and how to build a replayable interpreter log. -There are some specific challenges around how Python code interacts with native C code that could limit the viability of this approach, but at the absolute least this fully sandboxed Python interpreter could be used to implement whatever underlying magic could be desired and restricted to some language subset as desired. - -The goal is to make something like this work - - -``` python -from df import Activity - -f1 = Activity("F1") -f2 = Activity("F2") -f3 = Activity("F3") -f4 = Activity("F4") - -def main(): - return f4(f3(f2(f1(None)))) -``` - -Which may offer a possible solution to the interpreter checkpointing problem - only checkpoint "supported" operations. -Here the `Activity().__call__` operation would have special support, as with `datetime.datetime.now()` and controlling `time.sleep()`, threading and possibly `random.Random` seeding which cannot trivially be made repeatable. - -### Durability challenges - -FIXME - manually implementing snapshotting and recovery is hard - - -### Leverage with language support - -FIXME - What does a DSL that helps with all this look like? diff --git a/projects/flowmetal/README.md b/projects/flowmetal/README.md index 9f0b32f..8c6be28 100644 --- a/projects/flowmetal/README.md +++ b/projects/flowmetal/README.md @@ -1,11 +1,11 @@ # Flowmetal -> A shining mercurial metal laden with sensors and almost infinitely reconfigurable. -> +> A shining mercurial metal, laden with sensors and almost infinitely reconfigurable. > The stuff of which robots and servitors are made. Flowmetal is a substrate for automation. -It attempts to provide a programming environment wherein programs are durable, evented and asynchronous aimed at what would traditionally be described as scripting or coordination. +It provides a programming environment wherein programs are durable, evented and asynchronous by default. +It is aimed at scripting or coordination tasks, such as workflows and scheduled jobs. Let's unpack these terms. @@ -21,62 +21,16 @@ This also allows for external systems such as REST callback APIs, databases and It also allows bidirectional communication between Flowmetal programs and other more traditional programming environments. Anything that can communicate with Flowmetal can provide function implementations, or call Flowmetal programs! +This centering of evented communication makes Flowmetal ideal for **coordination** tasks, from simple task sequencing to map/reduce and other parallelism patterns. + **Asynchronous** - thanks to Flowmetal's evented execution model, waiting for slow external events either synchronously or asynchronously is second nature! -Flowmetal is especially good at waiting for very, very slow external operations. -Stuff like webhooks and batch processes. -**Scripting** - the tradeoff Flowmetal makes for the evented model is that it's slow. -While Flowmetal foreign functions could be fast, Flowmetal's interpreter isn't designed for speed. -It's designed for eventing and ensuring durability. -This makes Flowmetal suitable for interacting with and coordinating other systems, but it's not gonna win any benchmark games. +**Scripting** - Durablity and distribution of execution come at coordination costs which make Flowmetal well suited for coordination tasks, but not for heavy processing. -## An overview - -In the systems world we have SH, Borne SH, BASH, ZSH and friends which provide a common interface for connecting processes together. -However in the distributed system world we don't have a good parallel for connecting microservices; especially where complex failure handling is required. - -I previously [blogged a bit](https://www.arrdem.com/2019/04/01/the_silver_tower/) about some ideas for what this could look like. -I'm convinced that a programming environment based around [virtual resiliency](https://www.microsoft.com/en-us/research/publication/a-m-b-r-o-s-i-a-providing-performant-virtual-resiliency-for-distributed-applications/) is a worthwhile goal (having independently invented it) and worth trying to bring to a mainstream general purpose platform like Python. - -Flowmetal is an interpreted language backed by a durable event store. -The execution history of a program persists to the durable store as execution precedes. -If an interpretation step fails to persist, it can't have external effects. -This is the fundamental insight behind Microsoft AMBROSIA. -The event store also provides Flowmetal's only interface for communicating with external systems. -Other systems can attach to Flowmetal's data store and send events to and receive them from Flowmetal. -For instance Flowmetal contains a reference implementation of a HTTP callback connector and of a HTTP request connector. -This allows Flowmetal programs to request that HTTP requests be sent on their behalf, consume the result, and wait for callbacks. - -A Flowmetal setup could look something like this - - -``` - +----------------------------+ - +---------------------------+ | - +--------------------------+ |--+ - | External HTTP service(s) |--+ - +--------------------------+ - ^ ^ - | | - v v -+-----------------------+ +------------------------+ -| HTTP server connector | | HTTP request connector | -+-----------------------+ +------------------------+ - ^ ^ - | | - v v - +--------------------+ - | Shared event store | - +--------------------+ - ^ - | - v - +--------------------------+ - | Flowmetal interpreter(s) | - +--------------------------+ -``` +- For a problem statement, see [Call/CC Airflow](docs/call_cc_airflow.md). +- For an architecture overview, see [Architecture](docs/architecture.md). +- For example doodles, see [examples](examples). ## License -Mirrored from https://git.arrdem.com/arrdem/flowmetal - Published under the MIT license. See [LICENSE.md](LICENSE.md) diff --git a/projects/flowmetal/TODO.org b/projects/flowmetal/TODO.org deleted file mode 100644 index 981a0f1..0000000 --- a/projects/flowmetal/TODO.org +++ /dev/null @@ -1,46 +0,0 @@ -#+TITLE: Flowmetal TODOs - -* parser -** TODO Rework the tokens in terms of spans instead of just start points :tokens:parser: -Having start and end information allows for textual display of ranges and other -potentially interesting error formatting. Requires some refactoring. - -** TODO Implement load() in the parser :parser: -At present the parser can parse well enough, but it returns a token tree -intended for use in refactoring and autoformatting tools not a direct 'ast' list -tree which is how load() is supposed to behave. - -Figure out how to "mixin" implicit unwrapping of token boxes to values when -loading insted of reading. - -** DONE Implement parser support for :- type ascriptions :parser: -Maybe this is a special case of keywords, maybe it isn't. Support ⊢ as an alternative. Maybe |- ? - -** TODO Think about the difference between reading "data" and reading expression/code syntax :parser: -EDN suggests these two things are largely the same ... but they clearly aren't. - -** TODO Do I want to implement #_ reader discard support? :parser: -Reader discard is a convenient alternative to commenting a bunch of stuff out, -but does require a fair bit of complexity in the parser to support properly. - -** TODO Do I want to implement #?() reader conditional support? :parser: -Reader conditionals are cool for feature expressions and multiple platforms, but -are of unclear value given that I only have one target for the forseeable and -Flowmetal is already supposed to be a platform agnostic sort of thing. - -** DONE Finish out float support -** DONE Implement strings -** TODO Think about what multiple grammars / dialects look like -* TODO Look at Python SQL ORMs :server:storage: -- Must support PostgresQL -- Must support SQLite - -The goal is to be able to run the "leader" servers off of postgres and have local -state stores for wokers stored in sqlite using large amounts of the same schema. -Being able to get marshalling and unmarshalling to JSON 'for free' would be -lovely. - -* TODO Look at Flask OpenAPI spec stuff :server: -- Ideally want to go spec first -- Must be able to provide validation -- Would be nice to be able to use the spec to drive implementing the app (mounting functions to routes) diff --git a/projects/flowmetal/TODO.org_archive b/projects/flowmetal/TODO.org_archive deleted file mode 100644 index fd2acc8..0000000 --- a/projects/flowmetal/TODO.org_archive +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: org -*- - - -Archived entries from file /home/arrdem/doc/hobby/programming/lang/python/flowmetal/TODO.org - - -* DONE Implement parse() separately in the parser - :PROPERTIES: - :ARCHIVE_TIME: 2020-06-14 Sun 11:34 - :ARCHIVE_FILE: ~/doc/hobby/programming/lang/python/flowmetal/TODO.org - :ARCHIVE_CATEGORY: TODO - :ARCHIVE_TODO: DONE - :END: -Relates to implementing load() - -When we have a workable load which generates data, we'll want a read() which -generates a syntax tree so that we don't discard that API entirely. - - -* DONE Parser test suite - :PROPERTIES: - :ARCHIVE_TIME: 2020-06-14 Sun 11:34 - :ARCHIVE_FILE: ~/doc/hobby/programming/lang/python/flowmetal/TODO.org - :ARCHIVE_CATEGORY: TODO - :ARCHIVE_TODO: DONE - :END: -- Cover the various scanners -- Cover the position tracking machinery - - -* DONE Get pytest set up - :PROPERTIES: - :ARCHIVE_TIME: 2020-06-14 Sun 11:34 - :ARCHIVE_FILE: ~/doc/hobby/programming/lang/python/flowmetal/TODO.org - :ARCHIVE_CATEGORY: TODO - :ARCHIVE_TODO: DONE - :END: -As it says on the tim - diff --git a/projects/flowmetal/call_cc_airflow.md b/projects/flowmetal/call_cc_airflow.md deleted file mode 100644 index 8b5bc3f..0000000 --- a/projects/flowmetal/call_cc_airflow.md +++ /dev/null @@ -1,47 +0,0 @@ -# An Asynchronous, Distributed Task Engine - -This document presents a design without reference implementation for a distributed programming system; -sometimes called a workflow engine. -It is intended to provide architectural level clarity allowing for the development of alternative designs or implementations as may suit. - -## Problem Statement - -In building, operating and maintaining distributed systems (many computers in concert) engineers face a tooling gap. - -Within the confines of a single computer, we have shells (`bash`, `csh`, `zsh`, `oil` etc.) -and a suite of small programs which mesh together well enough for the completion of small tasks with ad-hoc automation. -This is an enormous tooling win, as it allows small tasks to be automated at least for a time with a minimum of effort and with tools close to hand. - -In interacting with networks, communicating between computers is difficult with traditional tools and communication failure becomes an ever-present concern. -Traditional automation tools such as shells are inadequate for this environment because achieving network communication is excessively difficult. - -In a distributed environment it cannot be assumed that a single machine can remain available to execute automation; -This requires an approach to automation which allows for the incremental execution of single tasks at a time with provisions for relocation and recovery should failure occur. - -It also cannot be assumed that a single machine is sufficiently available to receive and process incoming events such as callbacks. -A distributed system is needed to wrangle distributed systems. - -## Design Considerations - -- Timeouts are everywhere -- Sub-Turing/boundable -- - -## Architectural Overview - -### Events -Things that will happen, or time out. - -### Actions -Things the workflow will do, or time out. - -### Bindings -Data the workflow either was given or computed. - -### Conditionals -Decisions the workflow may make. - -### Functions -A convenient way to talk about fragments of control flow graph. - -### Tracing & Reporting diff --git a/projects/flowmetal/doc/manifesto.md b/projects/flowmetal/doc/manifesto.md deleted file mode 100644 index 0fb74c7..0000000 --- a/projects/flowmetal/doc/manifesto.md +++ /dev/null @@ -1,30 +0,0 @@ -# A manifesto - -In the last decade, immutability has been affirmed in the programming mainstream as an effective tool for making programs and state more manageable, and one which has been repeatedly implemented at acceptable performance costs. -Especially in messaging based rather than state sharing environments, immutability and "data" oriented programming is becoming more and more common. - -It also seems that much of the industry is moving towards message based reactive or network based connective systems. -Microservices seem to have won, and functions-as-a-service seem to be a rising trend reflecting a desire to offload or avoid deployment management rather than wrangle stateful services. - -In these environments, programs begin to consist entirely of messaging with other programs over shared channels such as traditional HTTP or other RPC tools or message buses such as Kafka, gRPC, ThriftMux and soforth. - -Key challenges with these connective services are: -- How they handle failure -- How they achieve reliability -- The ergonomic difficulties of building and deploying connective programs -- The operational difficulties of managing N-many 'reliable' services - -Tools like Argo, Airflow and the like begin to talk about such networked or evented programs as DAGs; providing schedulers for sequencing actions and executors for performing actions. - -Airflow provides a programmable Python scheduler environment, but fails to provide an execution isolation boundary (such as a container or other subprocess/`fork()` boundary) allowing users to bring their own dependencies. -Instead Airflow users must build custom Airflow packagings which bundle dependencies into the Airflow instance. -This means that Airflow deployments can only be centralized with difficulty due to shared dependencies and disparate dependency lifecycles and limits the return on investment of the platform by increasing operational burden. - -Argo ducks this mistake, providing a robust scheduler and leveraging k8s for its executor. -This allows Argo to be managed independently of any of the workloads it manages - a huge step forwards over Airflow - but this comes at considerable ergonomic costs for trivial tasks and provides a more limited scheduler. - -Previously I developed a system which provided a much stronger DSL than Airflow's, but made the same key mistake of not decoupling execution from the scheduler/coordinator. -Calf is a sketch of a programming language and system with a nearly fully featured DSL, and decoupling between scheduling (control flow of programs) and execution of "terminal" actions. - -In short, think a Py-Lisp where instead of doing FFI directly to the parent Python instance you do FFI by enqueuing a (potentially retryable!) request onto a shared cluster message bus, from which subscriber worker processes elsewhere provide request/response handling. -One could reasonably accuse this project of being an attempt to unify Erlang and a hosted Python to build a "BASH for distsys" tool while providing a multi-tenant execution platform that can be centrally managed. diff --git a/projects/flowmetal/setup.py b/projects/flowmetal/setup.py deleted file mode 100644 index 4d9df84..0000000 --- a/projects/flowmetal/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -from setuptools import setup - - -setup( - name="arrdem.flowmetal", - # Package metadata - version="0.0.0", - license="MIT", - description="A weird execution engine", - long_description=open("README.md").read(), - long_description_content_type="text/markdown", - author="Reid 'arrdem' McKenzie", - author_email="me@arrdem.com", - url="https://git.arrdem.com/arrdem/flowmetal", - classifiers=[ - "License :: OSI Approved :: MIT License", - "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - ], - # Package setup - package_dir={"": "src/python"}, - packages=[ - "flowmetal", - ], - entry_points={ - "console_scripts": ["iflow=flowmetal.repl:main"], - }, - install_requires=[ - "prompt-toolkit~=3.0.0", - ], - extras_require={}, -) diff --git a/projects/public_dns/src/resources/zonefiles/tirefireind.us.j2 b/projects/public_dns/src/resources/zonefiles/tirefireind.us.j2 index 7ac9e62..87484b5 100644 --- a/projects/public_dns/src/resources/zonefiles/tirefireind.us.j2 +++ b/projects/public_dns/src/resources/zonefiles/tirefireind.us.j2 @@ -19,6 +19,7 @@ buildcache {{ ttl }} IN A {{ link }} feed {{ ttl }} IN A {{ link }} ton {{ ttl }} IN A {{ link }} relay {{ ttl }} IN A {{ link }} +pxe {{ ttl }} IN A {{ link }} {% endfor %} # Host records diff --git a/projects/reqman/src/python/reqman/__main__.py b/projects/reqman/src/python/reqman/__main__.py index 5faabc1..9a76e55 100644 --- a/projects/reqman/src/python/reqman/__main__.py +++ b/projects/reqman/src/python/reqman/__main__.py @@ -43,6 +43,7 @@ SHITLIST = [ def req_name(requirement: str) -> str: requirement = requirement.lower() match = re.match(REQ_PATTERN, requirement) + assert match is not None return match.group("pkgname") or match.group("eggname") diff --git a/tools/python/requirements.txt b/tools/python/requirements.txt index 23d700f..be5b261 100644 --- a/tools/python/requirements.txt +++ b/tools/python/requirements.txt @@ -1,146 +1,155 @@ -aiohttp==3.8.1 +aiohttp==3.8.4 aiohttp-basicauth==1.0.0 aiosignal==1.2.0 alabaster==0.7.12 async-lru==1.0.3 async-timeout==4.0.2 -attrs==21.4.0 -autoflake==1.4 -Babel==2.9.1 +attrs==22.1.0 +autoflake==2.0.1 +Babel==2.11.0 bases==0.2.1 -beautifulsoup4==4.10.0 -black==21.8b0 +beautifulsoup4==4.11.2 +black==23.1.0 blake3==0.3.1 bleach==4.1.0 borg==2012.4.1 -cachetools==5.2.0 +cachetools==5.3.0 cbor2==5.4.3 -certifi==2021.10.8 +certifi==2022.9.24 chardet==4.0.0 -charset-normalizer==2.0.10 -click==7.1.2 -colored==1.4.3 +charset-normalizer==2.1.1 +click==8.1.3 +colored==1.4.4 commonmark==0.9.1 coverage==6.2 Cython==0.29.30 dataclasses==0.6 decorator==5.1.1 +deepmerge==1.1.0 Deprecated==1.2.13 -docutils==0.17.1 -ExifRead==2.3.2 -flake8==4.0.1 -Flask==2.0.2 -frozenlist==1.2.0 +docutils==0.19 +exceptiongroup==1.1.0 +ExifRead==3.0.0 +flake8==6.0.0 +Flask==2.2.3 +frozenlist==1.3.3 graphviz==0.19.1 -hypothesis==6.35.0 -icmplib==3.0.2 -idna==3.3 -imagesize==1.3.0 +hypothesis==6.68.1 +icmplib==3.0.3 +idna==3.4 +imagesize==1.4.1 importlib-metadata==4.10.0 iniconfig==1.1.1 isodate==0.6.1 -isort==5.10.1 +isort==5.12.0 itsdangerous==2.0.1 jedi==0.18.1 -Jinja2==3.0.3 +Jinja2==3.1.2 jsonschema==4.3.3 -lark==1.0.0 +jsonschema-spec==0.1.3 +lark==1.1.5 +lazy-object-proxy==1.9.0 livereload==2.6.3 -lxml==4.7.1 +lxml==4.9.2 m2r==0.2.1 -Markdown==3.3.6 -MarkupSafe==2.0.1 -mccabe==0.6.1 -meraki==1.24.0 +Markdown==3.4.1 +MarkupSafe==2.1.1 +mccabe==0.7.0 +meraki==1.27.0 mirakuru==2.4.1 mistune==2.0.1 mmh3==3.0.0 -multidict==5.2.0 +multidict==6.0.2 multiformats==0.1.4.post3 mypy-extensions==0.4.3 -numpy==1.23.1 +numpy==1.23.5 octorest==0.4 -openapi-schema-validator==0.2.0 -openapi-spec-validator==0.3.1 -packaging==21.3 +openapi-schema-validator==0.4.3 +openapi-spec-validator==0.5.5 +packaging==23.0 parso==0.8.3 +pathable==0.4.3 pathspec==0.9.0 -pep517==0.12.0 -pip==21.3.1 +pep517==0.13.0 +picobox==2.2.0 +pip==22.3.1 pip-tools==6.4.0 plac==1.3.5 platformdirs==2.4.1 pluggy==1.0.0 port-for==0.6.1 -prompt-toolkit==3.0.24 -psutil==5.9.0 -psycopg2==2.9.3 +prompt-toolkit==3.0.36 +proquint==0.2.1 +psutil==5.9.4 +psycopg2==2.9.5 pudb==2022.1 pur==5.4.2 py==1.11.0 -pycodestyle==2.8.0 -pycryptodome==3.15.0 +pycodestyle==2.10.0 +pycryptodome==3.17 pycryptodomex==3.15.0 -pyflakes==2.4.0 -Pygments==2.11.2 -pyparsing==3.0.6 -pyrsistent==0.18.1 +pyflakes==3.0.1 +Pygments==2.13.0 +pyparsing==3.0.9 +pyrsistent==0.19.3 pysha3==1.0.2 pyskein==1.0 pytest==6.2.5 -pytest-cov==3.0.0 -pytest-postgresql==4.1.0 +pytest-cov==4.0.0 +pytest-postgresql==4.1.1 pytest-pudb==0.7.0 pytest-timeout==2.1.0 -pytz==2021.3 +pytz==2022.6 PyYAML==6.0 readme-renderer==32.0 recommonmark==0.7.1 -redis==4.1.0 +redis==4.5.1 regex==2021.11.10 -requests==2.27.1 +requests==2.28.2 requests-toolbelt==0.9.1 requirements-parser==0.3.1 retry==0.9.2 +rfc3339-validator==0.1.4 scipy==1.8.1 setuptools==60.5.0 six==1.16.0 -smbus2==0.4.1 +smbus2==0.4.2 snowballstemmer==2.2.0 sortedcontainers==2.4.0 soupsieve==2.3.1 -Sphinx==4.3.2 +Sphinx==6.1.3 +sphinx_mdinclude==0.5.3 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-httpdomain==1.8.0 sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-openapi==0.7.0 +sphinxcontrib-openapi==0.8.1 sphinxcontrib-programoutput==0.17 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -termcolor==1.1.0 +termcolor==2.2.0 toml==0.10.2 -tomli==1.2.3 +tomli==2.0.1 toposort==1.7 tornado==6.1 typed-ast==1.5.1 types-setuptools==57.4.7 -typing_extensions==4.0.1 typing-validation==0.0.1.post7 +typing_extensions==4.4.0 unify==0.5 untokenize==0.1.1 -urllib3==1.26.8 +urllib3==1.26.13 urwid==2.1.2 urwid-readline==0.13 wasm==1.2 wcwidth==0.2.5 webencodings==0.5.1 websocket-client==1.2.3 -Werkzeug==2.0.2 +Werkzeug==2.2.3 wheel==0.37.1 wrapt==1.13.3 -yamllint==1.26.3 -yarl==1.7.2 -yaspin==2.1.0 +yamllint==1.29.0 +yarl==1.8.1 +yaspin==2.3.0 zipp==3.7.0