mirror of
https://github.com/LTTLabsOSS/markbench-tests.git
synced 2026-01-09 14:07:56 -05:00
Support sequential Cinebench runs (#29)
Update Cinebench test harness to allow for sequential runs of different Cinebench performance tests
This commit is contained in:
@@ -8,6 +8,7 @@ Changes are grouped by the date they are merged to the main branch of the reposi
|
||||
|
||||
- Update strategies for marking start and end time in Returnal test harness
|
||||
- Minor changes to logging output in DOTA 2 and Total War: Warhammer III test harnesses
|
||||
- Update Cinebench test harness to allow for sequential runs of different Cinebench performance tests
|
||||
|
||||
## 2023-12-07
|
||||
|
||||
|
||||
@@ -19,10 +19,15 @@ from harness_utils.output import (
|
||||
)
|
||||
|
||||
CINEBENCH_PATH = r"C:\Cinebench2024\Cinebench.exe"
|
||||
GPU_TEST = "g_CinebenchGpuTest=true"
|
||||
CPU_1_TEST = "g_CinebenchCpu1Test=true"
|
||||
CPU_X_TEST = "g_CinebenchCpuXTest=true"
|
||||
TEST_OPTIONS = {
|
||||
"cpu-single-core": "g_CinebenchCpu1Test=true",
|
||||
"cpu-multi-core": "g_CinebenchCpuXTest=true",
|
||||
"gpu": "g_CinebenchGpuTest=true"
|
||||
"cpu-single-core": [CPU_1_TEST],
|
||||
"cpu-multi-core": [CPU_X_TEST],
|
||||
"cpu-both": [CPU_X_TEST, CPU_1_TEST],
|
||||
"gpu": [GPU_TEST],
|
||||
"all": [GPU_TEST, CPU_X_TEST, CPU_1_TEST]
|
||||
}
|
||||
DURATION_OPTION = "g_CinebenchMinimumTestDuration=1"
|
||||
|
||||
@@ -46,51 +51,53 @@ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
||||
console.setFormatter(formatter)
|
||||
logging.getLogger("").addHandler(console)
|
||||
|
||||
test_option = TEST_OPTIONS[args.test]
|
||||
test_types = TEST_OPTIONS[args.test]
|
||||
|
||||
try:
|
||||
logging.info('Starting benchmark!')
|
||||
setup_start_time = time.time()
|
||||
session_report = []
|
||||
for test_type in test_types:
|
||||
setup_start_time = time.time()
|
||||
with subprocess.Popen(
|
||||
[CINEBENCH_PATH, test_type, DURATION_OPTION],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
bufsize=1,
|
||||
universal_newlines=True) as proc:
|
||||
logging.info("Cinebench started. Waiting for setup to finish to set process priority.")
|
||||
for line in proc.stdout:
|
||||
if "BEFORERENDERING" in line:
|
||||
elapsed_setup_time = round(time.time() - setup_start_time, 2)
|
||||
logging.info("Setup took %.2f seconds", elapsed_setup_time)
|
||||
logging.info("Setting Cinebench process priority to high (PID: %s)", proc.pid)
|
||||
process = psutil.Process(proc.pid)
|
||||
process.nice(psutil.HIGH_PRIORITY_CLASS)
|
||||
start_time = time.time()
|
||||
break
|
||||
out, _ = proc.communicate()
|
||||
|
||||
with subprocess.Popen(
|
||||
[CINEBENCH_PATH, test_option, DURATION_OPTION],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
bufsize=1,
|
||||
universal_newlines=True) as proc:
|
||||
logging.info("Cinebench started. Waiting for setup to finish to set process priority.")
|
||||
for line in proc.stdout:
|
||||
if "BEFORERENDERING" in line:
|
||||
elapsed_setup_time = round(time.time() - setup_start_time, 2)
|
||||
logging.info("Setup took %.2f seconds", elapsed_setup_time)
|
||||
logging.info("Setting Cinebench process priority to high (PID: %s)", proc.pid)
|
||||
process = psutil.Process(proc.pid)
|
||||
process.nice(psutil.HIGH_PRIORITY_CLASS)
|
||||
start_time = time.time()
|
||||
break
|
||||
out, _ = proc.communicate()
|
||||
if proc.returncode > 0:
|
||||
logging.error("Cinebench exited with return code %d", proc.returncode)
|
||||
sys.exit(proc.returncode)
|
||||
|
||||
if proc.returncode > 0:
|
||||
logging.error("Cinebench exited with return code %d", proc.returncode)
|
||||
sys.exit(proc.returncode)
|
||||
score = get_score(out)
|
||||
if score is None:
|
||||
logging.error("Could not find score in Cinebench output!")
|
||||
sys.exit(1)
|
||||
|
||||
score = get_score(out)
|
||||
if score is None:
|
||||
logging.error("Could not find score in Cinebench output!")
|
||||
sys.exit(1)
|
||||
end_time = time.time()
|
||||
elapsed_test_time = round(end_time - start_time, 2)
|
||||
logging.info("Benchmark took %.2f seconds", elapsed_test_time)
|
||||
|
||||
end_time = time.time()
|
||||
elapsed_test_time = round(end_time - start_time, 2)
|
||||
logging.info("Benchmark took %.2f seconds", elapsed_test_time)
|
||||
report = {
|
||||
"test_arg": test_type,
|
||||
"score": score,
|
||||
"start_time": seconds_to_milliseconds(start_time),
|
||||
"end_time": seconds_to_milliseconds(end_time)
|
||||
}
|
||||
session_report.append(report)
|
||||
|
||||
report = {
|
||||
"test": args.test,
|
||||
"score": score,
|
||||
"start_time": seconds_to_milliseconds(start_time),
|
||||
"end_time": seconds_to_milliseconds(end_time)
|
||||
}
|
||||
|
||||
write_report_json(log_dir, "report.json", report)
|
||||
write_report_json(log_dir, "report.json", session_report)
|
||||
except Exception as e:
|
||||
logging.error("Something went wrong running the benchmark!")
|
||||
logging.exception(e)
|
||||
|
||||
@@ -6,8 +6,5 @@ output_dir: "run"
|
||||
options:
|
||||
- name: test
|
||||
type: select
|
||||
values:
|
||||
- "cpu-single-core"
|
||||
- "cpu-multi-core"
|
||||
- "gpu"
|
||||
values: ["cpu-single-core", "cpu-multi-core", "cpu-both", "gpu", "all"]
|
||||
tooltip: Select which Cinebench test to run
|
||||
Reference in New Issue
Block a user