Cleanup report json in harnesses (#79)

* use format utils for report json in test harnesses

* correct start time in dota harness to be timestamp rather than time delta value

* dota2 util imports

* update dota 2 timestamps

* update error message

* update error message
This commit is contained in:
derek-hirotsu
2023-11-15 14:36:10 -08:00
committed by GitHub
parent e787600d81
commit 7e8c5dc71a
6 changed files with 35 additions and 21 deletions

View File

@@ -4,12 +4,17 @@ import os
import time
import pydirectinput as user
import sys
from utils import console_command, get_resolution, copy_replay, copy_config, get_args
from dota2_utils import console_command, get_resolution, copy_replay, copy_config, get_args
sys.path.insert(1, os.path.join(sys.path[0], '..'))
#pylint: disable=wrong-import-position
from harness_utils.output import (
setup_log_directory, write_report_json, DEFAULT_LOGGING_FORMAT, DEFAULT_DATE_FORMAT)
setup_log_directory,
write_report_json,
format_resolution,
seconds_to_milliseconds,
DEFAULT_LOGGING_FORMAT,
DEFAULT_DATE_FORMAT)
from harness_utils.process import terminate_processes
from harness_utils.keras_service import KerasService
from harness_utils.steam import exec_steam_game
@@ -68,6 +73,7 @@ def run_benchmark():
setup_end_time = time.time()
elapsed_setup_time = round(setup_end_time - setup_start_time, 2)
logging.info("Harness setup took %f seconds", elapsed_setup_time)
test_start_time = time.time()
# TODO -> Mark benchmark start time using video OCR by looking for a players name
if kerasService.wait_for_word(word="directed", timeout=100, interval=0.5) is None:
@@ -83,18 +89,18 @@ def run_benchmark():
logging.info("Run completed. Closing game.")
test_end_time = time.time()
elapsed_test_time = round((test_end_time - elapsed_setup_time), 2)
elapsed_test_time = round((test_end_time - test_start_time), 2)
logging.info("Benchmark took %f seconds", elapsed_test_time)
terminate_processes(PROCESS_NAME)
return elapsed_setup_time, test_end_time
return test_start_time, test_end_time
try:
start_time, end_time = run_benchmark()
height, width = get_resolution()
report = {
"resolution": f"{width}x{height}",
"start_time": round((start_time * 1000)),
"end_time": round((end_time * 1000))
"resolution": format_resolution(width, height),
"start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time)
}
write_report_json(LOG_DIRECTORY, "report.json", report)

View File

@@ -76,7 +76,7 @@ def copy_replay() -> None:
destination = os.path.join(root_dir, "benchmark.dem")
shutil.copyfile(source, destination)
if not os.path.isfile(src_file):
raise Exception(f"Can't find no intro: {src_file}")
raise Exception(f"Can't find intro: {src_file}")
try:
Path(replay_path).mkdir(parents=True, exist_ok=True)
except FileExistsError as e:
@@ -97,7 +97,7 @@ def copy_config() -> None:
destination = os.path.join(root_dir, "benchmark.cfg")
shutil.copyfile(source, destination)
if not os.path.isfile(src_file):
raise Exception(f"Can't find no config: {src_file}")
raise Exception(f"Can't find config: {src_file}")
try:
Path(config_path).mkdir(parents=True, exist_ok=True)
except FileExistsError as e: