refactor: Improve debug tooling

- Added print statements for better debugging and understanding of the code flow.
- Updated the log messages to provide more detailed information about the data and variables involved in the process of generating tests and handling challenges.
This commit is contained in:
Nicholas Tindle
2023-11-09 01:25:13 -06:00
parent 67d1e96415
commit 03da45d6e6
4 changed files with 18 additions and 1 deletions

View File

@@ -161,7 +161,9 @@ def check_regression(request: Any) -> None:
agent_benchmark_config = load_config_from_request(request)
with contextlib.suppress(Exception):
test = agent_benchmark_config.get_regression_reports_path()
print(f"Found a test {test}")
data = json.loads(test)
print(f"Got its data {data}")
challenge_location = getattr(request.node.parent.cls, "CHALLENGE_LOCATION", "")
skip_string = f"Skipping {test_name} at {challenge_location}"
@@ -188,6 +190,7 @@ def challenge_data(request: Any) -> None:
Returns:
None: The challenge data is directly passed to the test function and does not need to be returned.
"""
print(f"REQUEST DATA HERE {request}")
return request.param
@@ -240,6 +243,8 @@ def pytest_runtest_makereport(item: Any, call: Any) -> None:
"""
challenge_data = item.funcargs.get("challenge_data", None)
print(f"pytest_runtest_makereport Challenge data: {challenge_data}")
if not challenge_data:
# this will only happen for dummy dependency setup tests
return
@@ -357,10 +362,14 @@ def pytest_collection_modifyitems(items: Any, config: Any) -> None:
else {}
)
print(f"data??? {data}")
for item in items:
# Assuming item.cls is your test class
test_class_instance = item.cls()
print(f"item: {item!r}")
if "test_method" not in item.name:
continue
@@ -378,6 +387,7 @@ def pytest_collection_modifyitems(items: Any, config: Any) -> None:
# or config.getoption("--no_dep")
# or config.getoption("--maintain")
# ):
print(f"test_class_instance: {test_class_instance!r}")
dependencies = test_class_instance.dependencies
# Add depends marker dynamically

View File

@@ -34,7 +34,7 @@ def create_single_test(
# Define test class dynamically
challenge_class = types.new_class(f"Test{data['name']}", (Challenge,))
print(challenge_location)
print(f"challenge_class: {challenge_class}")
# clean_challenge_location = get_test_path(challenge_location)
setattr(challenge_class, "CHALLENGE_LOCATION", challenge_location)
@@ -112,6 +112,7 @@ def create_single_test(
)(test_method)
setattr(challenge_class, "test_method", test_method)
print(f"Challenge Class {challenge_class}")
# Attach the new class to a module so it can be discovered by pytest
module = importlib.import_module(__name__)
@@ -213,6 +214,7 @@ def generate_tests() -> None: # sourcery skip: invert-any-all
json_files, challenge_class = create_challenge(data, json_file, json_files)
print(f"Generated test for {data['name']}.")
print(f"- {data}")
print("Test generation complete.")

View File

@@ -30,6 +30,9 @@ class Challenge(ABC):
CHALLENGE_LOCATION: str = ""
scores: dict[str, Any] = {} # this is for suites
# def __repr__(self) -> str:
# return f"{self.__class__.__name__}(CHALLENGE_LOCATION={self.CHALLENGE_LOCATION}, _data_cache={self._data_cache!r})"
@property
def data(self) -> ChallengeData:
if self.CHALLENGE_LOCATION not in self._data_cache:
@@ -44,6 +47,7 @@ class Challenge(ABC):
@property
def dependencies(self) -> list:
print(f"got prop dependencies data: {self.data.dependencies}")
return self.data.dependencies
async def setup_challenge(self, config: Dict[str, Any], cutoff: int) -> None:

View File

@@ -212,6 +212,7 @@ class ChallengeData(BaseModel):
try:
return ChallengeData(**data)
except:
print(f"FAILED TO PARSE: {data}")
test = "ok"
def challenge_from_datum(self, file_datum: list[dict[str, Any]]) -> "ChallengeData":