test(scripts): create measurement script tests

This commit is contained in:
Umut
2021-11-12 16:46:58 +03:00
parent e7e7a02425
commit d244bcace9
13 changed files with 213 additions and 3 deletions

View File

@@ -285,6 +285,10 @@ jobs:
if: ${{ github.event_name == 'schedule' && steps.conformance.outcome == 'success' && !cancelled() }}
run: |
make pytest_nb
- name: PyTest Progress Tracker
if: ${{ steps.conformance.outcome == 'success' && !cancelled() }}
run: |
make pytest_progress_tracker
- name: Test coverage
id: coverage
if: ${{ always() && steps.pytest.outcome != 'skipped' && !cancelled() }}

View File

@@ -58,7 +58,9 @@ pylint_benchmarks:
.PHONY: pylint_benchmarks
pylint_script:
find ./script/ -type f -name "*.py" | xargs poetry run pylint --rcfile=pylintrc
@# disable linting python files under `progress_tracker_utils/test_scripts` folder
@# because they are intentionally ill-formed so that progress tracker can be tested
find ./script/ -type f -name "*.py" -not -path "./script/progress_tracker_utils/test_scripts/*" | xargs poetry run pylint --rcfile=pylintrc
.PHONY: pylint_script
flake8:
@@ -93,6 +95,11 @@ pytest:
--cov-report=term-missing:skip-covered tests/
.PHONY: pytest
pytest_progress_tracker:
poetry run python script/progress_tracker_utils/extract_machine_info.py
poetry run pytest -svv script/progress_tracker_utils/test_progress_tracker.py
.PHONY: pytest_progress_tracker
# Not a huge fan of ignoring missing imports, but some packages do not have typing stubs
mypy:
poetry run mypy -p $(SRC_DIR) --ignore-missing-imports

View File

@@ -39,7 +39,7 @@ def register_alert(script, index, line, metrics, alerts):
# Parse the alert and append it to list of alerts
supported_operators = ["==", "!=", "<=", ">=", "<", ">"]
for operator in supported_operators:
alert_details = alert_line.split(operator)
alert_details = alert_line.split(f" {operator} ")
# An alert should be of form `{metric} {operator} {constant}`
if len(alert_details) == 2:
@@ -59,7 +59,7 @@ def register_alert(script, index, line, metrics, alerts):
except ValueError as error:
raise SyntaxError(
f"An alert is not using a constant floating point for comparison "
f"(at line {index + 1} of {script})",
f"(it uses `{value_str}` at line {index + 1} of {script})",
) from error
break

View File

@@ -0,0 +1,137 @@
"""Test file for progress tracker"""
from typing import List
import measure
import pytest
class Args:
"""Class to mimic the command line arguments that can be passed to measurement script."""
base: str
files_to_benchmark: List[str]
samples: int
keep: bool
check: bool
def __init__(self, files_to_benchmark: List[str]):
self.base = "script/progress_tracker_utils/test_scripts"
self.files_to_benchmark = files_to_benchmark
self.samples = 30
self.keep = False
self.check = True
def test_alert_on_undefined_metric():
"""Test function for alert directive on unefined metric"""
file = "script/progress_tracker_utils/test_scripts/alert_on_undefined_metric.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"An alert is using an undefined metric `Accuracy (%)` (at line 7 of {file})"
)
def test_alert_invalid_comparison_constant():
"""Test function for alert directive with invalid constant"""
file = "script/progress_tracker_utils/test_scripts/alert_invalid_comparison_constant.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"An alert is not using a constant floating point for comparison "
f'(it uses `"abc"` at line 7 of {file})'
)
def test_alert_invalid_comparison_operator():
"""Test function for alert directive that use invalid comparison operator"""
file = "script/progress_tracker_utils/test_scripts/alert_invalid_comparison_operator.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"An alert is not using any of the supported comparisons ==, !=, <=, >=, <, > "
f"(at line 7 of {file})"
)
def test_measure_end_before_start():
"""Test function for measure end directive before measure directive"""
file = "script/progress_tracker_utils/test_scripts/measure_end_before_start.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Measurements cannot end before they are defined (at line 3 of {file})"
)
def test_measure_invalid_indentation():
"""Test function for invalid indentation of measure directives"""
file = "script/progress_tracker_utils/test_scripts/measure_invalid_indentation.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Measurements should finish with the same indentation as they are defined "
f"(at lines 4 and 6 of {file})"
)
def test_measure_nested():
"""Test function for nested measure directives"""
file = "script/progress_tracker_utils/test_scripts/measure_nested.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Nested measurements are not supported (at lines 3 and 7 of {file})"
)
def test_measure_unfinished():
"""Test function for measure directives without a measure end directive"""
file = "script/progress_tracker_utils/test_scripts/measure_unfinished.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Unfinished measurements are not supported (at line 3 of {file})"
)
def test_two_targets_with_the_same_name():
"""Test function for target name collisions"""
file1 = "script/progress_tracker_utils/test_scripts/two_targets_with_the_same_name.1.py"
file2 = "script/progress_tracker_utils/test_scripts/two_targets_with_the_same_name.2.py"
args = Args([file1, file2])
with pytest.raises(RuntimeError) as excinfo:
measure.main(args)
assert str(excinfo.value) == "Target `X` is already registered"

View File

@@ -0,0 +1,7 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End
# bench: Alert: Printing Time (ms) != "abc"

View File

@@ -0,0 +1,7 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End
# bench: Alert: Printing Time (ms) === 10

View File

@@ -0,0 +1,7 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End
# bench: Alert: Accuracy (%) != 100

View File

@@ -0,0 +1,7 @@
# bench: Unit Target: X
# bench: Measure: End
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End

View File

@@ -0,0 +1,6 @@
# bench: Unit Target: X
if True:
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End

View File

@@ -0,0 +1,13 @@
# bench: Unit Target: X
# bench: Measure: Printing Time Of Three (ms)
print(42)
# bench: Measure: Printing Time Of One (ms)
print(42)
# bench: Measure: End
print(42)
# bench: Measure: End

View File

@@ -0,0 +1,5 @@
# bench: Unit Target: X
# bench: Measure: Printing Time Of Three (ms)
print(42)

View File

@@ -0,0 +1,5 @@
# bench: Full Target: X
# bench: Measure: Printing Time (ms)
print(420)
# bench: Measure: End

View File

@@ -0,0 +1,5 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End