Document benchmark results

This commit is contained in:
Reid 'arrdem' McKenzie 2021-08-19 23:45:15 -06:00
parent 23840cba8e
commit 7608b6f004
3 changed files with 70 additions and 1 deletions

View file

@ -1,3 +1,59 @@
# Jobq # Jobq
Abusing sqlite3 as a job queue. Abusing sqlite3 as a job queue.
## Benchmarks
Benchmarks are extremely steady.
Flushing a sqlite file to disk seems to be the limiting factor of I/O, and pipelining multiple message writes is undoubtably the way to go.
However the purpose of the API is to use the sqlite file as the shared checkpoint between potentially many processes, so 'large' transactions are an antipattern.
Tests suggest that this library is rock steady at 100 writes per sec. and 100 polls per sec. and completely bounded by sqlite controlled I/O as evidenced by using `":memory:"` which doesn't have to `fsync()`.
``` shell
$ bazel run :benchmark
...
Target //projects/jobq:benchmark up-to-date:
bazel-bin/projects/jobq/benchmark
...
Ran 'insert' 10000 times, total time 101.810816516 (s)
mean: 0.010148992843 (s)
median: 0.009474293 (s)
stddev: 0.006727934042954838 (s)
test overhead: 3.20888086e-05 (s)
Ran 'poll' 10000 times, total time 100.482262487 (s)
mean: 0.0100152467857 (s)
median: 0.0095528585 (s)
stddev: 0.00821730176268304 (s)
test overhead: 3.2979463000000004e-05 (s)
Ran 'append_event' 10000 times, total time 105.015296419 (s)
mean: 0.0104681294652 (s)
median: 0.009592544 (s)
stddev: 0.007321370576225584 (s)
test overhead: 3.34001767e-05 (s)
Testing with :memory:
Ran 'insert' 10000 times, total time 0.37031511 (s)
mean: 3.3595880100000005e-05 (s)
median: 2.96015e-05 (s)
stddev: 1.045088890675899e-05 (s)
test overhead: 3.4356309e-06 (s)
Ran 'poll' 10000 times, total time 1.17148314 (s)
mean: 0.0001128911222 (s)
median: 9.7398e-05 (s)
stddev: 3.213524197973896e-05 (s)
test overhead: 4.2571917999999996e-06 (s)
Ran 'append_event' 10000 times, total time 0.415490332 (s)
mean: 3.78861989e-05 (s)
median: 3.3019e-05 (s)
stddev: 1.1752889674795285e-05 (s)
test overhead: 3.6628343e-06 (s)
```

View file

@ -10,6 +10,7 @@ from random import randint, choice
import string import string
from statistics import mean, median, stdev from statistics import mean, median, stdev
import tempfile import tempfile
import logging
from jobq import JobQueue from jobq import JobQueue
@ -75,7 +76,17 @@ def test_poll(q, reps):
bench(poll, reps) bench(poll, reps)
def test_append(q, reps):
def append_event():
q.append_event(randint(1, reps), {"foo": "bar"})
bench(append_event, reps)
if __name__ == "__main__": if __name__ == "__main__":
# No logs
logging.getLogger().setLevel(logging.WARN)
# Test params # Test params
reps = 10000 reps = 10000
path = "/tmp/jobq-bench.sqlite3" path = "/tmp/jobq-bench.sqlite3"
@ -89,8 +100,10 @@ if __name__ == "__main__":
q = JobQueue(path) q = JobQueue(path)
test_insert(q, reps) test_insert(q, reps)
test_poll(q, reps) test_poll(q, reps)
test_append(q, reps)
print(f"Testing with :memory:") print(f"Testing with :memory:")
q = JobQueue(":memory:") q = JobQueue(":memory:")
test_insert(q, reps) test_insert(q, reps)
test_poll(q, reps) test_poll(q, reps)
test_append(q, reps)

View file

@ -116,7 +116,7 @@ WHERE
; ;
""" """
_POLL_SQL = """\ _POLL_SQL = f"""\
UPDATE `job` UPDATE `job`
SET SET
`events` = json_insert(events, '$[#]', json_array('job_state_advanced', json_object('old', json(state), 'new', json(:state), 'timestamp', CURRENT_TIMESTAMP))) `events` = json_insert(events, '$[#]', json_array('job_state_advanced', json_object('old', json(state), 'new', json(:state), 'timestamp', CURRENT_TIMESTAMP)))