mirror of
https://github.com/LTTLabsOSS/markbench-tests.git
synced 2026-01-07 21:24:06 -05:00
Non game harness update initial (#142)
Naming Scheme rework for non-game harnesses
This commit is contained in:
@@ -20,7 +20,7 @@ from harness_utils.output import (
|
||||
)
|
||||
|
||||
#####
|
||||
### Globals
|
||||
# Globals
|
||||
#####
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
LOG_DIR = SCRIPT_DIR / "run"
|
||||
@@ -31,25 +31,25 @@ CONFIG_DIR = SCRIPT_DIR / "config"
|
||||
BENCHMARK_CONFIG = {
|
||||
"TimeSpy": {
|
||||
"config": CONFIG_DIR / "timespy.3dmdef",
|
||||
"process_name": "3DMarkTimeSpy.exe",
|
||||
"process_name": "3DMarkTimeSpy.exe",
|
||||
"score_name": "TimeSpyPerformanceGraphicsScore",
|
||||
"test_name": "3DMark Time Spy"
|
||||
},
|
||||
"FireStrike": {
|
||||
"config": CONFIG_DIR / "firestrike.3dmdef",
|
||||
"process_name": "3DMarkICFWorkload.exe",
|
||||
"process_name": "3DMarkICFWorkload.exe",
|
||||
"score_name": "firestrikegraphicsscorep",
|
||||
"test_name": "3DMark Fire Strike"
|
||||
},
|
||||
"PortRoyal": {
|
||||
"config": CONFIG_DIR / "portroyal.3dmdef",
|
||||
"process_name": "3DMarkPortRoyal.exe",
|
||||
"process_name": "3DMarkPortRoyal.exe",
|
||||
"score_name": "PortRoyalPerformanceGraphicsScore",
|
||||
"test_name": "3DMark Port Royal"
|
||||
},
|
||||
"SolarBay": {
|
||||
"config": CONFIG_DIR / "solarbay.3dmdef",
|
||||
"process_name": "3DMarkSolarBay.exe",
|
||||
"process_name": "3DMarkSolarBay.exe",
|
||||
"score_name": "SolarBayPerformanceGraphicsScore",
|
||||
"test_name": "3DMark Solar Bay"
|
||||
}
|
||||
@@ -57,9 +57,10 @@ BENCHMARK_CONFIG = {
|
||||
RESULTS_FILENAME = "myresults.xml"
|
||||
REPORT_PATH = LOG_DIR / RESULTS_FILENAME
|
||||
|
||||
|
||||
def setup_logging():
|
||||
"""setup logging"""
|
||||
setup_log_directory(LOG_DIR)
|
||||
setup_log_directory(str(LOG_DIR))
|
||||
logging.basicConfig(filename=LOG_DIR / "harness.log",
|
||||
format=DEFAULT_LOGGING_FORMAT,
|
||||
datefmt=DEFAULT_DATE_FORMAT,
|
||||
@@ -73,8 +74,9 @@ def setup_logging():
|
||||
def get_arguments():
|
||||
"""get arguments"""
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--benchmark", dest="benchmark", help="Benchmark test type", required=True, choices=BENCHMARK_CONFIG.keys())
|
||||
parser.add_argument("--benchmark", dest="benchmark",
|
||||
help="Benchmark test type", required=True,
|
||||
choices=BENCHMARK_CONFIG.keys())
|
||||
argies = parser.parse_args()
|
||||
return argies
|
||||
|
||||
@@ -94,16 +96,17 @@ def run_benchmark(process_name, command_to_run):
|
||||
while True:
|
||||
now = time.time()
|
||||
elapsed = now - start_time
|
||||
if elapsed >= 30: #seconds
|
||||
if elapsed >= 30: # seconds
|
||||
raise ValueError("BenchMark subprocess did not start in time")
|
||||
process = is_process_running(process_name)
|
||||
if process is not None:
|
||||
process.nice(psutil.HIGH_PRIORITY_CLASS)
|
||||
break
|
||||
time.sleep(0.2)
|
||||
_, _ = proc.communicate() # blocks until 3dmark exits
|
||||
_, _ = proc.communicate() # blocks until 3dmark exits
|
||||
return proc
|
||||
|
||||
|
||||
try:
|
||||
setup_logging()
|
||||
args = get_arguments()
|
||||
@@ -118,7 +121,9 @@ try:
|
||||
logging.error("3DMark exited with return code %d", pr.returncode)
|
||||
sys.exit(pr.returncode)
|
||||
|
||||
score = get_score(BENCHMARK_CONFIG[args.benchmark]["score_name"], REPORT_PATH)
|
||||
score = get_score(
|
||||
BENCHMARK_CONFIG[args.benchmark]["score_name"],
|
||||
REPORT_PATH)
|
||||
if score is None:
|
||||
logging.error("Could not find average FPS output!")
|
||||
sys.exit(1)
|
||||
@@ -129,7 +134,8 @@ try:
|
||||
logging.info("Score was %s", score)
|
||||
|
||||
report = {
|
||||
"test": BENCHMARK_CONFIG[args.benchmark]["test_name"],
|
||||
"test": "3DMark",
|
||||
"test_parameter": args.benchmark,
|
||||
"unit": "score",
|
||||
"score": score,
|
||||
"start_time": seconds_to_milliseconds(strt),
|
||||
|
||||
@@ -3,17 +3,17 @@ from pathlib import Path
|
||||
from blender_utils import BENCHMARK_CONFIG, find_blender, run_blender_render, download_scene
|
||||
from argparse import ArgumentParser
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
|
||||
sys.path.insert(1, os.path.join(sys.path[0], '..'))
|
||||
sys.path.insert(1, str(Path(sys.path[0]).parent))
|
||||
from harness_utils.output import DEFAULT_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, write_report_json, seconds_to_milliseconds
|
||||
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
LOG_DIR = SCRIPT_DIR.joinpath("run")
|
||||
|
||||
|
||||
def setup_logging():
|
||||
"""default logging config"""
|
||||
LOG_DIR.mkdir(exist_ok=True)
|
||||
@@ -26,15 +26,18 @@ def setup_logging():
|
||||
console.setFormatter(formatter)
|
||||
logging.getLogger('').addHandler(console)
|
||||
|
||||
|
||||
VALID_DEVICES = ["CPU", "CUDA", "OPTIX", "HIP", "ONEAPI", "METAL"]
|
||||
|
||||
|
||||
def main():
|
||||
"""entry point for test script"""
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("-d", "--device", dest="device",
|
||||
help="device", metavar="device", required=True)
|
||||
parser.add_argument(
|
||||
"--benchmark", dest="benchmark", help="Benchmark test type", metavar="benchmark", required=True)
|
||||
"--benchmark", dest="benchmark", help="Benchmark test type",
|
||||
metavar="benchmark", required=True)
|
||||
args = parser.parse_args()
|
||||
if args.device not in VALID_DEVICES:
|
||||
raise Exception(f"invalid device selection: {args.device}")
|
||||
@@ -49,23 +52,25 @@ def main():
|
||||
score = run_blender_render(
|
||||
executable_path, LOG_DIR, args.device.upper(), benchmark)
|
||||
end_time = time.time()
|
||||
logging.info(f'Finished rendering {args.benchmark} in %d seconds', (end_time - start_time))
|
||||
logging.info(
|
||||
f'Finished rendering {args.benchmark} in %d seconds',
|
||||
(end_time - start_time))
|
||||
|
||||
if score is None:
|
||||
raise Exception("no duration was found in the log to use as the score")
|
||||
|
||||
report = {
|
||||
"test": f"Blender {args.benchmark} Render {args.device.upper()}",
|
||||
"test": "Blender Render",
|
||||
"test_parameter": args.benchmark,
|
||||
"score": score,
|
||||
"unit": "seconds",
|
||||
"version": version,
|
||||
"device": args.device,
|
||||
"benchmark": args.benchmark,
|
||||
"start_time": seconds_to_milliseconds(start_time),
|
||||
"end_time": seconds_to_milliseconds(end_time)
|
||||
}
|
||||
|
||||
write_report_json(LOG_DIR, "report.json", report)
|
||||
write_report_json(str(LOG_DIR), "report.json", report)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -4,7 +4,7 @@ import getpass
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from gravitymark_utils import friendly_test_name, get_args, get_score, create_gravitymark_command
|
||||
from gravitymark_utils import friendly_test_param, get_args, get_score, create_gravitymark_command
|
||||
|
||||
PARENT_DIR = str(Path(sys.path[0], ".."))
|
||||
sys.path.append(PARENT_DIR)
|
||||
@@ -19,7 +19,7 @@ GRAVITYMARK_PATH = Path("C:/", "Program Files", "GravityMark", "bin")
|
||||
GRAVITYMARK_EXE = GRAVITYMARK_PATH / "GravityMark.exe"
|
||||
|
||||
args = get_args()
|
||||
api = f"-{args.api}"
|
||||
API = f"-{args.api}"
|
||||
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
log_dir = script_dir / "run"
|
||||
@@ -36,9 +36,11 @@ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
||||
console.setFormatter(formatter)
|
||||
logging.getLogger("").addHandler(console)
|
||||
|
||||
gravitymark_log_path = Path("C:/Users", getpass.getuser(), ".GravityMark", "GravityMark.log")
|
||||
gravitymark_log_path = Path(
|
||||
"C:/Users", getpass.getuser(),
|
||||
".GravityMark", "GravityMark.log")
|
||||
image_path = log_dir / "result.png"
|
||||
command = create_gravitymark_command(GRAVITYMARK_EXE, api, image_path)
|
||||
command = create_gravitymark_command(GRAVITYMARK_EXE, API, image_path)
|
||||
|
||||
try:
|
||||
logging.info('Starting benchmark!')
|
||||
@@ -47,7 +49,8 @@ try:
|
||||
result = subprocess.run(command, check=True, cwd=GRAVITYMARK_PATH)
|
||||
|
||||
if result.returncode > 0:
|
||||
logging.error("GravityMark exited with return code %d", result.returncode)
|
||||
logging.error("GravityMark exited with return code %d",
|
||||
result.returncode)
|
||||
sys.exit(1)
|
||||
|
||||
score = get_score(gravitymark_log_path)
|
||||
@@ -57,12 +60,13 @@ try:
|
||||
sys.exit(1)
|
||||
|
||||
report = {
|
||||
"test": friendly_test_name(args.api),
|
||||
"test": "GravityMark",
|
||||
"test_name": friendly_test_param(args.api),
|
||||
"score": score,
|
||||
"unit": "score"
|
||||
}
|
||||
|
||||
write_report_json(log_dir, "report.json", report)
|
||||
write_report_json(str(log_dir), "report.json", report)
|
||||
except Exception as e:
|
||||
logging.error("Something went wrong running the benchmark!")
|
||||
logging.exception(e)
|
||||
|
||||
@@ -23,16 +23,16 @@ CLI_OPTIONS = {
|
||||
"-status": "1"
|
||||
}
|
||||
|
||||
def friendly_test_name(api: str) -> str:
|
||||
def friendly_test_param(api: str) -> str:
|
||||
"""return a friendlier string given the API harness argument"""
|
||||
if api == "vulkan":
|
||||
return "GravityMark Vulkan"
|
||||
return "Vulkan"
|
||||
if api == "opengl":
|
||||
return "GravityMark OpenGL"
|
||||
return "OpenGL"
|
||||
if api == "direct3d12":
|
||||
return "GravityMark DX12"
|
||||
return "DX12"
|
||||
if api == "direct3d11":
|
||||
return "GravityMark DX11"
|
||||
return "DX11"
|
||||
return api
|
||||
|
||||
def get_args() -> Namespace:
|
||||
|
||||
@@ -13,7 +13,9 @@ def setup_log_directory(log_dir: str) -> None:
|
||||
os.mkdir(log_dir)
|
||||
|
||||
|
||||
def write_report_json(log_dir: str, report_name: str, report_json: any) -> None:
|
||||
# change in future, this any bothers me, should be dict
|
||||
def write_report_json(
|
||||
log_dir: str, report_name: str, report_json: any) -> None:
|
||||
"""Writes the json output of a harness to the log directory"""
|
||||
with open(os.path.join(log_dir, report_name), "w", encoding="utf-8") as file:
|
||||
file.write(json.dumps(report_json))
|
||||
|
||||
|
Before Width: | Height: | Size: 165 KiB After Width: | Height: | Size: 165 KiB |
@@ -23,8 +23,8 @@ logging.getLogger('').addHandler(console)
|
||||
|
||||
executable = os.path.join(INSTALL_DIR, EXECUTABLE)
|
||||
report_dest = os.path.join(log_dir, "report.xml")
|
||||
argstr = f"/GGBENCH {report_dest}"
|
||||
result = subprocess.run([executable, "/GGBENCH", report_dest], check=False)
|
||||
ARGSTR = f"/GGBENCH {report_dest}"
|
||||
result = subprocess.run([executable, ARGSTR], check=False)
|
||||
|
||||
if result.returncode > 0:
|
||||
logging.error("Aida failed with exit code {result.returncode}")
|
||||
1
zz_non_game_harness_template/harness.py
Normal file
1
zz_non_game_harness_template/harness.py
Normal file
@@ -0,0 +1 @@
|
||||
# This is a non-game harness template
|
||||
Reference in New Issue
Block a user