Add a 'python' prefix

This commit is contained in:
Reid 'arrdem' McKenzie 2021-04-09 01:37:24 -06:00
commit e3c099e729
20 changed files with 2 additions and 2 deletions

View file

@ -0,0 +1,20 @@
py_library(
name = "conftest",
srcs = [
"conftest.py"
],
imports = [
"."
],
)
py_pytest(
name = "test",
srcs = glob(["*.py"]),
deps = [
"//projects/calf:lib",
":conftest",
py_requirement("pytest-cov"),
],
args = ["--cov-report", "term", "--cov=calf"],
)

View file

@ -0,0 +1,7 @@
"""
Fixtures for testing Calf.
"""
import pytest
parametrize = pytest.mark.parametrize

View file

@ -0,0 +1,30 @@
"""
Tests covering the Calf grammar.
"""
import re
from calf import grammar as cg
from conftest import parametrize
@parametrize('ex', [
# Proper strings
'""',
'"foo bar"',
'"foo\n bar\n\r qux"',
'"foo\\"bar"',
'""""""',
'"""foo bar baz"""',
'"""foo "" "" "" bar baz"""',
# Unterminated string cases
'"',
'"f',
'"foo bar',
'"foo\\" bar',
'"""foo bar baz',
])
def test_match_string(ex):
assert re.fullmatch(cg.STRING_PATTERN, ex)

View file

@ -0,0 +1,89 @@
"""
Tests of calf.lexer
Tests both basic functionality, some examples and makes sure that arbitrary token sequences round
trip through the lexer.
"""
import calf.lexer as cl
from conftest import parametrize
import pytest
def lex_single_token(buffer):
"""Lexes a single token from the buffer."""
return next(iter(cl.lex_buffer(buffer)))
@parametrize(
"text,token_type",
[
("(", "PAREN_LEFT",),
(")", "PAREN_RIGHT",),
("[", "BRACKET_LEFT",),
("]", "BRACKET_RIGHT",),
("{", "BRACE_LEFT",),
("}", "BRACE_RIGHT",),
("^", "META",),
("#", "MACRO_DISPATCH",),
("'", "SINGLE_QUOTE"),
("foo", "SYMBOL",),
("foo/bar", "SYMBOL"),
(":foo", "KEYWORD",),
(":foo/bar", "KEYWORD",),
(" ,,\t ,, \t", "WHITESPACE",),
("\n\r", "WHITESPACE"),
("\n", "WHITESPACE"),
(" , ", "WHITESPACE",),
("; this is a sample comment\n", "COMMENT"),
('"foo"', "STRING"),
('"foo bar baz"', "STRING"),
],
)
def test_lex_examples(text, token_type):
t = lex_single_token(text)
assert t.value == text
assert t.type == token_type
@parametrize(
"text,token_types",
[
("foo^bar", ["SYMBOL", "META", "SYMBOL"]),
("foo bar", ["SYMBOL", "WHITESPACE", "SYMBOL"]),
("foo-bar", ["SYMBOL"]),
("foo\nbar", ["SYMBOL", "WHITESPACE", "SYMBOL"]),
(
"{[^#()]}",
[
"BRACE_LEFT",
"BRACKET_LEFT",
"META",
"MACRO_DISPATCH",
"PAREN_LEFT",
"PAREN_RIGHT",
"BRACKET_RIGHT",
"BRACE_RIGHT",
],
),
("+", ["SYMBOL"]),
("-", ["SYMBOL"]),
("1", ["INTEGER"]),
("-1", ["INTEGER"]),
("-1.0", ["FLOAT"]),
("-1e3", ["FLOAT"]),
("+1.3e", ["FLOAT"]),
("f", ["SYMBOL"]),
("f1", ["SYMBOL"]),
("f1g2", ["SYMBOL"]),
("foo13-bar", ["SYMBOL"]),
("foo+13-12bar", ["SYMBOL"]),
("+-+-+-+-+", ["SYMBOL"]),
],
)
def test_lex_compound_examples(text, token_types):
t = cl.lex_buffer(text)
result_types = [token.type for token in t]
assert result_types == token_types

View file

@ -0,0 +1,219 @@
"""
Tests of calf.parser
"""
import calf.parser as cp
from conftest import parametrize
import pytest
@parametrize("text", [
'"',
'"foo bar',
'"""foo bar',
'"""foo bar"',
])
def test_bad_strings_raise(text):
"""Tests asserting we won't let obviously bad strings fly."""
# FIXME (arrdem 2021-03-13):
# Can we provide this behavior in the lexer rather than in the parser?
with pytest.raises(ValueError):
next(cp.parse_buffer(text))
@parametrize("text", [
"[1.0",
"(1.0",
"{1.0",
])
def test_unterminated_raises(text):
"""Tests asserting that we don't let unterminated collections parse."""
with pytest.raises(cp.CalfMissingCloseParseError):
next(cp.parse_buffer(text))
@parametrize("text", [
"[{]",
"[(]",
"({)",
"([)",
"{(}",
"{[}",
])
def test_unbalanced_raises(text):
"""Tests asserting that we don't let missmatched collections parse."""
with pytest.raises(cp.CalfUnexpectedCloseParseError):
next(cp.parse_buffer(text))
@parametrize("buff, value", [
('"foo"', "foo"),
('"foo\tbar"', "foo\tbar"),
('"foo\n\rbar"', "foo\n\rbar"),
('"foo\\"bar\\""', "foo\"bar\""),
('"""foo"""', 'foo'),
('"""foo"bar"baz"""', 'foo"bar"baz'),
])
def test_strings_round_trip(buff, value):
assert next(cp.parse_buffer(buff)) == value
@parametrize('text, element_types', [
# Integers
("(1)", ["INTEGER"]),
("( 1 )", ["INTEGER"]),
("(,1,)", ["INTEGER"]),
("(1\n)", ["INTEGER"]),
("(\n1\n)", ["INTEGER"]),
("(1, 2, 3, 4)", ["INTEGER", "INTEGER", "INTEGER", "INTEGER"]),
# Floats
("(1.0)", ["FLOAT"]),
("(1.0e0)", ["FLOAT"]),
("(1e0)", ["FLOAT"]),
("(1e0)", ["FLOAT"]),
# Symbols
("(foo)", ["SYMBOL"]),
("(+)", ["SYMBOL"]),
("(-)", ["SYMBOL"]),
("(*)", ["SYMBOL"]),
("(foo-bar)", ["SYMBOL"]),
("(+foo-bar+)", ["SYMBOL"]),
("(+foo-bar+)", ["SYMBOL"]),
("( foo bar )", ["SYMBOL", "SYMBOL"]),
# Keywords
("(:foo)", ["KEYWORD"]),
("( :foo )", ["KEYWORD"]),
("(\n:foo\n)", ["KEYWORD"]),
("(,:foo,)", ["KEYWORD"]),
("(:foo :bar)", ["KEYWORD", "KEYWORD"]),
("(:foo :bar 1)", ["KEYWORD", "KEYWORD", "INTEGER"]),
# Strings
('("foo", "bar", "baz")', ["STRING", "STRING", "STRING"]),
# Lists
('([] [] ())', ["SQLIST", "SQLIST", "LIST"]),
])
def test_parse_list(text, element_types):
"""Test we can parse various lists of contents."""
l_t = next(cp.parse_buffer(text, discard_whitespace=True))
assert l_t.type == "LIST"
assert [t.type for t in l_t] == element_types
@parametrize('text, element_types', [
# Integers
("[1]", ["INTEGER"]),
("[ 1 ]", ["INTEGER"]),
("[,1,]", ["INTEGER"]),
("[1\n]", ["INTEGER"]),
("[\n1\n]", ["INTEGER"]),
("[1, 2, 3, 4]", ["INTEGER", "INTEGER", "INTEGER", "INTEGER"]),
# Floats
("[1.0]", ["FLOAT"]),
("[1.0e0]", ["FLOAT"]),
("[1e0]", ["FLOAT"]),
("[1e0]", ["FLOAT"]),
# Symbols
("[foo]", ["SYMBOL"]),
("[+]", ["SYMBOL"]),
("[-]", ["SYMBOL"]),
("[*]", ["SYMBOL"]),
("[foo-bar]", ["SYMBOL"]),
("[+foo-bar+]", ["SYMBOL"]),
("[+foo-bar+]", ["SYMBOL"]),
("[ foo bar ]", ["SYMBOL", "SYMBOL"]),
# Keywords
("[:foo]", ["KEYWORD"]),
("[ :foo ]", ["KEYWORD"]),
("[\n:foo\n]", ["KEYWORD"]),
("[,:foo,]", ["KEYWORD"]),
("[:foo :bar]", ["KEYWORD", "KEYWORD"]),
("[:foo :bar 1]", ["KEYWORD", "KEYWORD", "INTEGER"]),
# Strings
('["foo", "bar", "baz"]', ["STRING", "STRING", "STRING"]),
# Lists
('[[] [] ()]', ["SQLIST", "SQLIST", "LIST"]),
])
def test_parse_sqlist(text, element_types):
"""Test we can parse various 'square' lists of contents."""
l_t = next(cp.parse_buffer(text, discard_whitespace=True))
assert l_t.type == "SQLIST"
assert [t.type for t in l_t] == element_types
@parametrize('text, element_pairs', [
("{}",
[]),
("{:foo 1}",
[["KEYWORD", "INTEGER"]]),
("{:foo 1, :bar 2}",
[["KEYWORD", "INTEGER"],
["KEYWORD", "INTEGER"]]),
("{foo 1, bar 2}",
[["SYMBOL", "INTEGER"],
["SYMBOL", "INTEGER"]]),
("{foo 1, bar -2}",
[["SYMBOL", "INTEGER"],
["SYMBOL", "INTEGER"]]),
("{foo 1, bar -2e0}",
[["SYMBOL", "INTEGER"],
["SYMBOL", "FLOAT"]]),
("{foo ()}",
[["SYMBOL", "LIST"]]),
("{foo []}",
[["SYMBOL", "SQLIST"]]),
("{foo {}}",
[["SYMBOL", "DICT"]]),
('{"foo" {}}',
[["STRING", "DICT"]])
])
def test_parse_dict(text, element_pairs):
"""Test we can parse various mappings."""
d_t = next(cp.parse_buffer(text, discard_whitespace=True))
assert d_t.type == "DICT"
assert [[t.type for t in pair] for pair in d_t.value] == element_pairs
@parametrize("text", [
"{1}",
"{1, 2, 3}",
"{:foo}",
"{:foo :bar :baz}"
])
def test_parse_bad_dict(text):
"""Assert that dicts with missmatched pairs don't parse."""
with pytest.raises(Exception):
next(cp.parse_buffer(text))
@parametrize("text", [
"()",
"(1 1.1 1e2 -2 foo :foo foo/bar :foo/bar [{},])",
"{:foo bar, :baz [:qux]}",
"'foo",
"'[foo bar :baz 'qux, {}]",
"#foo []",
"^{} bar",
])
def test_examples(text):
"""Shotgun examples showing we can parse some stuff."""
assert list(cp.parse_buffer(text))

View file

@ -0,0 +1,22 @@
"""
"""
from conftest import parametrize
from calf.reader import read_buffer
@parametrize('text', [
"()",
"[]",
"[[[[[[[[[]]]]]]]]]",
"{1 {2 {}}}",
'"foo"',
"foo",
"'foo",
"^foo bar",
"^:foo bar",
"{\"foo\" '([:bar ^:foo 'baz 3.14159e0])}",
"[:foo bar 'baz lo/l, 1, 1.2. 1e-5 -1e2]",
])
def test_read(text):
assert list(read_buffer(text))

View file

@ -0,0 +1,17 @@
"""
Tests covering the Calf types.
"""
from calf import types as t
def test_maps_check():
assert isinstance(t.Map.of([(1, 2)]), t.Map)
def test_vectors_check():
assert isinstance(t.Vector.of([(1, 2)]), t.Vector)
def test_sets_check():
assert isinstance(t.Set.of([(1, 2)]), t.Set)