feat(benchmark/report): Add and record TestResult.n_steps

- Added `n_steps` attribute to `TestResult` type
- Added logic to record the number of steps to `BuiltinChallenge.test_method`, `WebArenaChallenge.test_method`, and `.reports.add_test_result_to_report`
This commit is contained in:
Reinier van der Leer
2024-02-16 17:53:19 +01:00
parent a5de79beb6
commit 752bac099b
4 changed files with 9 additions and 0 deletions

View File

@@ -173,6 +173,7 @@ class BuiltinChallenge(BaseChallenge):
timeout = int(cutoff) # type: ignore
task_id = ""
n_steps = 0
timed_out = None
try:
async for step in self.run_challenge(
@@ -180,9 +181,11 @@ class BuiltinChallenge(BaseChallenge):
):
if not task_id:
task_id = step.task_id
n_steps += 1
timed_out = False
except TimeoutError:
timed_out = True
request.node.user_properties.append(("n_steps", n_steps))
request.node.user_properties.append(("timed_out", timed_out))
agent_client_config = ClientConfig(host=config.host)

View File

@@ -393,6 +393,7 @@ class WebArenaChallenge(BaseChallenge):
elif cutoff := request.config.getoption("--cutoff"):
timeout = int(cutoff)
n_steps = 0
timed_out = None
eval_results_per_step: list[list[tuple[_Eval, EvalResult]]] = []
try:
@@ -402,6 +403,7 @@ class WebArenaChallenge(BaseChallenge):
if not step.output:
logger.warn(f"Step has no output: {step}")
continue
n_steps += 1
step_eval_results = self.evaluate_step_result(
step, mock=request.config.getoption("--mock")
)
@@ -419,6 +421,7 @@ class WebArenaChallenge(BaseChallenge):
timed_out = False
except TimeoutError:
timed_out = True
request.node.user_properties.append(("n_steps", n_steps))
request.node.user_properties.append(("timed_out", timed_out))
# Get the column aggregate (highest score for each Eval)

View File

@@ -20,6 +20,8 @@ class TestResult(BaseModel):
"""If applicable, the reason why the run was not successful"""
reached_cutoff: bool | None = None # None if in progress
"""Whether the run had to be stopped due to reaching the timeout"""
n_steps: int | None = None
"""The number of steps executed by the agent"""
cost: float | None = None
"""The (known) cost incurred by the run, e.g. from using paid LLM APIs"""

View File

@@ -92,6 +92,7 @@ def add_test_result_to_report(
run_time=f"{str(round(call.duration, 3))} seconds",
fail_reason=str(call.excinfo.value) if call.excinfo else None,
reached_cutoff=user_properties.get("timed_out", False),
n_steps=user_properties.get("n_steps"),
)
)
test_report.metrics.success_percentage = (