Non game harness update initial (#142)

Naming Scheme rework for non-game harnesses
This commit is contained in:
j-lin-lmg
2025-05-27 11:09:21 -07:00
committed by GitHub
parent 6f47569db6
commit 9f48268433
20 changed files with 52 additions and 34 deletions

View File

@@ -20,7 +20,7 @@ from harness_utils.output import (
) )
##### #####
### Globals # Globals
##### #####
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
@@ -31,25 +31,25 @@ CONFIG_DIR = SCRIPT_DIR / "config"
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"TimeSpy": { "TimeSpy": {
"config": CONFIG_DIR / "timespy.3dmdef", "config": CONFIG_DIR / "timespy.3dmdef",
"process_name": "3DMarkTimeSpy.exe", "process_name": "3DMarkTimeSpy.exe",
"score_name": "TimeSpyPerformanceGraphicsScore", "score_name": "TimeSpyPerformanceGraphicsScore",
"test_name": "3DMark Time Spy" "test_name": "3DMark Time Spy"
}, },
"FireStrike": { "FireStrike": {
"config": CONFIG_DIR / "firestrike.3dmdef", "config": CONFIG_DIR / "firestrike.3dmdef",
"process_name": "3DMarkICFWorkload.exe", "process_name": "3DMarkICFWorkload.exe",
"score_name": "firestrikegraphicsscorep", "score_name": "firestrikegraphicsscorep",
"test_name": "3DMark Fire Strike" "test_name": "3DMark Fire Strike"
}, },
"PortRoyal": { "PortRoyal": {
"config": CONFIG_DIR / "portroyal.3dmdef", "config": CONFIG_DIR / "portroyal.3dmdef",
"process_name": "3DMarkPortRoyal.exe", "process_name": "3DMarkPortRoyal.exe",
"score_name": "PortRoyalPerformanceGraphicsScore", "score_name": "PortRoyalPerformanceGraphicsScore",
"test_name": "3DMark Port Royal" "test_name": "3DMark Port Royal"
}, },
"SolarBay": { "SolarBay": {
"config": CONFIG_DIR / "solarbay.3dmdef", "config": CONFIG_DIR / "solarbay.3dmdef",
"process_name": "3DMarkSolarBay.exe", "process_name": "3DMarkSolarBay.exe",
"score_name": "SolarBayPerformanceGraphicsScore", "score_name": "SolarBayPerformanceGraphicsScore",
"test_name": "3DMark Solar Bay" "test_name": "3DMark Solar Bay"
} }
@@ -57,9 +57,10 @@ BENCHMARK_CONFIG = {
RESULTS_FILENAME = "myresults.xml" RESULTS_FILENAME = "myresults.xml"
REPORT_PATH = LOG_DIR / RESULTS_FILENAME REPORT_PATH = LOG_DIR / RESULTS_FILENAME
def setup_logging(): def setup_logging():
"""setup logging""" """setup logging"""
setup_log_directory(LOG_DIR) setup_log_directory(str(LOG_DIR))
logging.basicConfig(filename=LOG_DIR / "harness.log", logging.basicConfig(filename=LOG_DIR / "harness.log",
format=DEFAULT_LOGGING_FORMAT, format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
@@ -73,8 +74,9 @@ def setup_logging():
def get_arguments(): def get_arguments():
"""get arguments""" """get arguments"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument( parser.add_argument("--benchmark", dest="benchmark",
"--benchmark", dest="benchmark", help="Benchmark test type", required=True, choices=BENCHMARK_CONFIG.keys()) help="Benchmark test type", required=True,
choices=BENCHMARK_CONFIG.keys())
argies = parser.parse_args() argies = parser.parse_args()
return argies return argies
@@ -94,16 +96,17 @@ def run_benchmark(process_name, command_to_run):
while True: while True:
now = time.time() now = time.time()
elapsed = now - start_time elapsed = now - start_time
if elapsed >= 30: #seconds if elapsed >= 30: # seconds
raise ValueError("BenchMark subprocess did not start in time") raise ValueError("BenchMark subprocess did not start in time")
process = is_process_running(process_name) process = is_process_running(process_name)
if process is not None: if process is not None:
process.nice(psutil.HIGH_PRIORITY_CLASS) process.nice(psutil.HIGH_PRIORITY_CLASS)
break break
time.sleep(0.2) time.sleep(0.2)
_, _ = proc.communicate() # blocks until 3dmark exits _, _ = proc.communicate() # blocks until 3dmark exits
return proc return proc
try: try:
setup_logging() setup_logging()
args = get_arguments() args = get_arguments()
@@ -118,7 +121,9 @@ try:
logging.error("3DMark exited with return code %d", pr.returncode) logging.error("3DMark exited with return code %d", pr.returncode)
sys.exit(pr.returncode) sys.exit(pr.returncode)
score = get_score(BENCHMARK_CONFIG[args.benchmark]["score_name"], REPORT_PATH) score = get_score(
BENCHMARK_CONFIG[args.benchmark]["score_name"],
REPORT_PATH)
if score is None: if score is None:
logging.error("Could not find average FPS output!") logging.error("Could not find average FPS output!")
sys.exit(1) sys.exit(1)
@@ -129,7 +134,8 @@ try:
logging.info("Score was %s", score) logging.info("Score was %s", score)
report = { report = {
"test": BENCHMARK_CONFIG[args.benchmark]["test_name"], "test": "3DMark",
"test_parameter": args.benchmark,
"unit": "score", "unit": "score",
"score": score, "score": score,
"start_time": seconds_to_milliseconds(strt), "start_time": seconds_to_milliseconds(strt),

View File

@@ -3,17 +3,17 @@ from pathlib import Path
from blender_utils import BENCHMARK_CONFIG, find_blender, run_blender_render, download_scene from blender_utils import BENCHMARK_CONFIG, find_blender, run_blender_render, download_scene
from argparse import ArgumentParser from argparse import ArgumentParser
import logging import logging
import os.path
import sys import sys
import time import time
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, str(Path(sys.path[0]).parent))
from harness_utils.output import DEFAULT_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, write_report_json, seconds_to_milliseconds from harness_utils.output import DEFAULT_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, write_report_json, seconds_to_milliseconds
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR.joinpath("run") LOG_DIR = SCRIPT_DIR.joinpath("run")
def setup_logging(): def setup_logging():
"""default logging config""" """default logging config"""
LOG_DIR.mkdir(exist_ok=True) LOG_DIR.mkdir(exist_ok=True)
@@ -26,15 +26,18 @@ def setup_logging():
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger('').addHandler(console) logging.getLogger('').addHandler(console)
VALID_DEVICES = ["CPU", "CUDA", "OPTIX", "HIP", "ONEAPI", "METAL"] VALID_DEVICES = ["CPU", "CUDA", "OPTIX", "HIP", "ONEAPI", "METAL"]
def main(): def main():
"""entry point for test script""" """entry point for test script"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument("-d", "--device", dest="device", parser.add_argument("-d", "--device", dest="device",
help="device", metavar="device", required=True) help="device", metavar="device", required=True)
parser.add_argument( parser.add_argument(
"--benchmark", dest="benchmark", help="Benchmark test type", metavar="benchmark", required=True) "--benchmark", dest="benchmark", help="Benchmark test type",
metavar="benchmark", required=True)
args = parser.parse_args() args = parser.parse_args()
if args.device not in VALID_DEVICES: if args.device not in VALID_DEVICES:
raise Exception(f"invalid device selection: {args.device}") raise Exception(f"invalid device selection: {args.device}")
@@ -49,23 +52,25 @@ def main():
score = run_blender_render( score = run_blender_render(
executable_path, LOG_DIR, args.device.upper(), benchmark) executable_path, LOG_DIR, args.device.upper(), benchmark)
end_time = time.time() end_time = time.time()
logging.info(f'Finished rendering {args.benchmark} in %d seconds', (end_time - start_time)) logging.info(
f'Finished rendering {args.benchmark} in %d seconds',
(end_time - start_time))
if score is None: if score is None:
raise Exception("no duration was found in the log to use as the score") raise Exception("no duration was found in the log to use as the score")
report = { report = {
"test": f"Blender {args.benchmark} Render {args.device.upper()}", "test": "Blender Render",
"test_parameter": args.benchmark,
"score": score, "score": score,
"unit": "seconds", "unit": "seconds",
"version": version, "version": version,
"device": args.device, "device": args.device,
"benchmark": args.benchmark,
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time) "end_time": seconds_to_milliseconds(end_time)
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(str(LOG_DIR), "report.json", report)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -4,7 +4,7 @@ import getpass
import subprocess import subprocess
import sys import sys
from pathlib import Path from pathlib import Path
from gravitymark_utils import friendly_test_name, get_args, get_score, create_gravitymark_command from gravitymark_utils import friendly_test_param, get_args, get_score, create_gravitymark_command
PARENT_DIR = str(Path(sys.path[0], "..")) PARENT_DIR = str(Path(sys.path[0], ".."))
sys.path.append(PARENT_DIR) sys.path.append(PARENT_DIR)
@@ -19,7 +19,7 @@ GRAVITYMARK_PATH = Path("C:/", "Program Files", "GravityMark", "bin")
GRAVITYMARK_EXE = GRAVITYMARK_PATH / "GravityMark.exe" GRAVITYMARK_EXE = GRAVITYMARK_PATH / "GravityMark.exe"
args = get_args() args = get_args()
api = f"-{args.api}" API = f"-{args.api}"
script_dir = Path(__file__).resolve().parent script_dir = Path(__file__).resolve().parent
log_dir = script_dir / "run" log_dir = script_dir / "run"
@@ -36,9 +36,11 @@ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger("").addHandler(console) logging.getLogger("").addHandler(console)
gravitymark_log_path = Path("C:/Users", getpass.getuser(), ".GravityMark", "GravityMark.log") gravitymark_log_path = Path(
"C:/Users", getpass.getuser(),
".GravityMark", "GravityMark.log")
image_path = log_dir / "result.png" image_path = log_dir / "result.png"
command = create_gravitymark_command(GRAVITYMARK_EXE, api, image_path) command = create_gravitymark_command(GRAVITYMARK_EXE, API, image_path)
try: try:
logging.info('Starting benchmark!') logging.info('Starting benchmark!')
@@ -47,7 +49,8 @@ try:
result = subprocess.run(command, check=True, cwd=GRAVITYMARK_PATH) result = subprocess.run(command, check=True, cwd=GRAVITYMARK_PATH)
if result.returncode > 0: if result.returncode > 0:
logging.error("GravityMark exited with return code %d", result.returncode) logging.error("GravityMark exited with return code %d",
result.returncode)
sys.exit(1) sys.exit(1)
score = get_score(gravitymark_log_path) score = get_score(gravitymark_log_path)
@@ -57,12 +60,13 @@ try:
sys.exit(1) sys.exit(1)
report = { report = {
"test": friendly_test_name(args.api), "test": "GravityMark",
"test_name": friendly_test_param(args.api),
"score": score, "score": score,
"unit": "score" "unit": "score"
} }
write_report_json(log_dir, "report.json", report) write_report_json(str(log_dir), "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -23,16 +23,16 @@ CLI_OPTIONS = {
"-status": "1" "-status": "1"
} }
def friendly_test_name(api: str) -> str: def friendly_test_param(api: str) -> str:
"""return a friendlier string given the API harness argument""" """return a friendlier string given the API harness argument"""
if api == "vulkan": if api == "vulkan":
return "GravityMark Vulkan" return "Vulkan"
if api == "opengl": if api == "opengl":
return "GravityMark OpenGL" return "OpenGL"
if api == "direct3d12": if api == "direct3d12":
return "GravityMark DX12" return "DX12"
if api == "direct3d11": if api == "direct3d11":
return "GravityMark DX11" return "DX11"
return api return api
def get_args() -> Namespace: def get_args() -> Namespace:

View File

@@ -13,7 +13,9 @@ def setup_log_directory(log_dir: str) -> None:
os.mkdir(log_dir) os.mkdir(log_dir)
def write_report_json(log_dir: str, report_name: str, report_json: any) -> None: # change in future, this any bothers me, should be dict
def write_report_json(
log_dir: str, report_name: str, report_json: any) -> None:
"""Writes the json output of a harness to the log directory""" """Writes the json output of a harness to the log directory"""
with open(os.path.join(log_dir, report_name), "w", encoding="utf-8") as file: with open(os.path.join(log_dir, report_name), "w", encoding="utf-8") as file:
file.write(json.dumps(report_json)) file.write(json.dumps(report_json))

View File

Before

Width:  |  Height:  |  Size: 165 KiB

After

Width:  |  Height:  |  Size: 165 KiB

View File

@@ -23,8 +23,8 @@ logging.getLogger('').addHandler(console)
executable = os.path.join(INSTALL_DIR, EXECUTABLE) executable = os.path.join(INSTALL_DIR, EXECUTABLE)
report_dest = os.path.join(log_dir, "report.xml") report_dest = os.path.join(log_dir, "report.xml")
argstr = f"/GGBENCH {report_dest}" ARGSTR = f"/GGBENCH {report_dest}"
result = subprocess.run([executable, "/GGBENCH", report_dest], check=False) result = subprocess.run([executable, ARGSTR], check=False)
if result.returncode > 0: if result.returncode > 0:
logging.error("Aida failed with exit code {result.returncode}") logging.error("Aida failed with exit code {result.returncode}")

View File

@@ -0,0 +1 @@
# This is a non-game harness template