Merge branch 'main' into stress-automate

This commit is contained in:
j-lin-lmg
2025-11-03 11:36:15 -08:00
committed by GitHub
97 changed files with 1625 additions and 581 deletions

4
.gitignore vendored
View File

@@ -14,6 +14,8 @@ flac-1.4.3-win.zip
primesieve-12.3* primesieve-12.3*
y-cruncher v0.8.2.9522/ y-cruncher v0.8.2.9522/
y-cruncher v0.8.2.9522.zip y-cruncher v0.8.2.9522.zip
y-cruncher v0.8.6.9545/
y-cruncher.v0.8.6.9545b.zip
basegame_no_intro_videos.archive basegame_no_intro_videos.archive
*.blend *.blend
0001.png 0001.png
@@ -31,6 +33,8 @@ godot-4.2.1-stable/
godot-4.2.1-stable.zip godot-4.2.1-stable.zip
godot-4.3-stable/ godot-4.3-stable/
godot-4.3-stable.zip godot-4.3-stable.zip
godot-4.4.1-stable/
godot-4.4.1-stable.zip
mingw64/ mingw64/
# python # python

View File

@@ -20,7 +20,7 @@ from harness_utils.output import (
) )
##### #####
### Globals # Globals
##### #####
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
@@ -31,25 +31,25 @@ CONFIG_DIR = SCRIPT_DIR / "config"
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"TimeSpy": { "TimeSpy": {
"config": CONFIG_DIR / "timespy.3dmdef", "config": CONFIG_DIR / "timespy.3dmdef",
"process_name": "3DMarkTimeSpy.exe", "process_name": "3DMarkTimeSpy.exe",
"score_name": "TimeSpyPerformanceGraphicsScore", "score_name": "TimeSpyPerformanceGraphicsScore",
"test_name": "3DMark Time Spy" "test_name": "3DMark Time Spy"
}, },
"FireStrike": { "FireStrike": {
"config": CONFIG_DIR / "firestrike.3dmdef", "config": CONFIG_DIR / "firestrike.3dmdef",
"process_name": "3DMarkICFWorkload.exe", "process_name": "3DMarkICFWorkload.exe",
"score_name": "firestrikegraphicsscorep", "score_name": "firestrikegraphicsscorep",
"test_name": "3DMark Fire Strike" "test_name": "3DMark Fire Strike"
}, },
"PortRoyal": { "PortRoyal": {
"config": CONFIG_DIR / "portroyal.3dmdef", "config": CONFIG_DIR / "portroyal.3dmdef",
"process_name": "3DMarkPortRoyal.exe", "process_name": "3DMarkPortRoyal.exe",
"score_name": "PortRoyalPerformanceGraphicsScore", "score_name": "PortRoyalPerformanceGraphicsScore",
"test_name": "3DMark Port Royal" "test_name": "3DMark Port Royal"
}, },
"SolarBay": { "SolarBay": {
"config": CONFIG_DIR / "solarbay.3dmdef", "config": CONFIG_DIR / "solarbay.3dmdef",
"process_name": "3DMarkSolarBay.exe", "process_name": "3DMarkSolarBay.exe",
"score_name": "SolarBayPerformanceGraphicsScore", "score_name": "SolarBayPerformanceGraphicsScore",
"test_name": "3DMark Solar Bay" "test_name": "3DMark Solar Bay"
} }
@@ -57,9 +57,10 @@ BENCHMARK_CONFIG = {
RESULTS_FILENAME = "myresults.xml" RESULTS_FILENAME = "myresults.xml"
REPORT_PATH = LOG_DIR / RESULTS_FILENAME REPORT_PATH = LOG_DIR / RESULTS_FILENAME
def setup_logging(): def setup_logging():
"""setup logging""" """setup logging"""
setup_log_directory(LOG_DIR) setup_log_directory(str(LOG_DIR))
logging.basicConfig(filename=LOG_DIR / "harness.log", logging.basicConfig(filename=LOG_DIR / "harness.log",
format=DEFAULT_LOGGING_FORMAT, format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
@@ -73,8 +74,9 @@ def setup_logging():
def get_arguments(): def get_arguments():
"""get arguments""" """get arguments"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument( parser.add_argument("--benchmark", dest="benchmark",
"--benchmark", dest="benchmark", help="Benchmark test type", required=True, choices=BENCHMARK_CONFIG.keys()) help="Benchmark test type", required=True,
choices=BENCHMARK_CONFIG.keys())
argies = parser.parse_args() argies = parser.parse_args()
return argies return argies
@@ -94,16 +96,17 @@ def run_benchmark(process_name, command_to_run):
while True: while True:
now = time.time() now = time.time()
elapsed = now - start_time elapsed = now - start_time
if elapsed >= 30: #seconds if elapsed >= 30: # seconds
raise ValueError("BenchMark subprocess did not start in time") raise ValueError("BenchMark subprocess did not start in time")
process = is_process_running(process_name) process = is_process_running(process_name)
if process is not None: if process is not None:
process.nice(psutil.HIGH_PRIORITY_CLASS) process.nice(psutil.HIGH_PRIORITY_CLASS)
break break
time.sleep(0.2) time.sleep(0.2)
_, _ = proc.communicate() # blocks until 3dmark exits _, _ = proc.communicate() # blocks until 3dmark exits
return proc return proc
try: try:
setup_logging() setup_logging()
args = get_arguments() args = get_arguments()
@@ -118,7 +121,9 @@ try:
logging.error("3DMark exited with return code %d", pr.returncode) logging.error("3DMark exited with return code %d", pr.returncode)
sys.exit(pr.returncode) sys.exit(pr.returncode)
score = get_score(BENCHMARK_CONFIG[args.benchmark]["score_name"], REPORT_PATH) score = get_score(
BENCHMARK_CONFIG[args.benchmark]["score_name"],
REPORT_PATH)
if score is None: if score is None:
logging.error("Could not find average FPS output!") logging.error("Could not find average FPS output!")
sys.exit(1) sys.exit(1)
@@ -129,7 +134,8 @@ try:
logging.info("Score was %s", score) logging.info("Score was %s", score)
report = { report = {
"test": BENCHMARK_CONFIG[args.benchmark]["test_name"], "test": "3DMark",
"test_parameter": args.benchmark,
"unit": "score", "unit": "score",
"score": score, "score": score,
"start_time": seconds_to_milliseconds(strt), "start_time": seconds_to_milliseconds(strt),

View File

@@ -26,7 +26,7 @@ formatter = logging.Formatter(LOGGING_FORMAT)
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger('').addHandler(console) logging.getLogger('').addHandler(console)
EXECUTABLE = "7zr_24.07.exe" EXECUTABLE = "7zr_25.00.exe"
ABS_EXECUTABLE_PATH = os.path.join( ABS_EXECUTABLE_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), EXECUTABLE) os.path.dirname(os.path.realpath(__file__)), EXECUTABLE)
@@ -35,18 +35,18 @@ if os.path.isfile(ABS_EXECUTABLE_PATH) is False:
"7-Zip executable not found, downloading from network drive") "7-Zip executable not found, downloading from network drive")
copy_from_network_drive() copy_from_network_drive()
command = f'{ABS_EXECUTABLE_PATH}' COMMAND = f'{ABS_EXECUTABLE_PATH}'
command = command.rstrip() COMMAND = COMMAND.rstrip()
t1 = time.time() t1 = time.time()
logging.info("Starting 7-Zip benchmark! This may take a minute or so...") logging.info("Starting 7-Zip benchmark! This may take a minute or so...")
with Popen([command, "b", "3"], cwd=os.path.dirname( with Popen([COMMAND, "b", "3"], cwd=os.path.dirname(
os.path.realpath(__file__)), stdout=subprocess.PIPE) as process: os.path.realpath(__file__)), stdout=subprocess.PIPE) as process:
stdout_data, stderr = process.communicate() stdout_data, stderr = process.communicate()
list_of_strings = stdout_data.decode('utf-8').splitlines() list_of_strings = stdout_data.decode('utf-8').splitlines()
SPEED_PATTERN = r'^Avr:\s*([0-9]*)\s.*\|\s*([0-9]*)\s.*$' SPEED_PATTERN = r'^Avr:\s*([0-9]*)\s.*\|\s*([0-9]*)\s.*$'
VERSION_PATTERN = r'7-Zip (\d+\.\d+).*' VERSION_PATTERN = r'7-Zip \(r\) (\d+\.\d+).*'
VERSION = "" VERSION = ""
SPEED_C = "" SPEED_C = ""

View File

@@ -6,7 +6,7 @@ import shutil
def copy_from_network_drive(): def copy_from_network_drive():
"""Download 7zip from network drive""" """Download 7zip from network drive"""
source = r"\\Labs\labs\01_Installers_Utilities\7ZIP\7zr_24.07.exe" source = r"\\labs.lmg.gg\labs\01_Installers_Utilities\7ZIP\7zr_25.00.exe"
root_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = os.path.dirname(os.path.realpath(__file__))
destination = os.path.join(root_dir, "7zr_24.07.exe") destination = os.path.join(root_dir, "7zr_25.00.exe")
shutil.copyfile(source, destination) shutil.copyfile(source, destination)

View File

@@ -4,15 +4,20 @@ All notable changes to this project will be documented in this file.
Changes are grouped by the date they are merged to the main branch of the repository and are ordered from newest to oldest. Dates use the ISO 8601 extended calendar date format, i.e. YYYY-MM-DD. Changes are grouped by the date they are merged to the main branch of the repository and are ordered from newest to oldest. Dates use the ISO 8601 extended calendar date format, i.e. YYYY-MM-DD.
## 2025-07-15
- Updated 7-Zip to 25.00
- Updated Y-Cruncher to v0.8.6.9545
- Updated Godot compile to 4.4.1-stable
## 2025-04-02 ## 2025-04-02
- Fixed Keras not finding the FPS in Shadow of the Tomb Raider - Fixed Keras not finding the FPS in Shadow of the Tomb Raider
- Added a screenshot function for Vulkan games for Keras-OCR via DXcam - Added a screenshot function for Vulkan games for Keras-OCR via DXcam
- Added Keras functionality to Red Dead Redemption 2 - Added Keras functionality to Red Dead Redemption 2
- Added Strange Brigade (VK) to the team - Added Strange Brigade (VK) to the team
- Updated PugetBench harness to include Davinci and After Effects - Updated PugetBench harness to include DaVinci and After Effects
- Updated PugetBench to more consistently find version numbers and include them in the report.json - Updated PugetBench to more consistently find version numbers and include them in the report.json
- Updated Rocket League harness to check what camera it is on and keep flipping through till it's on the correct one - Updated Rocket League harness to check what camera it is on and keep flipping through till it's on the correct one
- Updated Procyon AI harnesses to have verison numbers in report.json - Updated Procyon AI harnesses to have version numbers in report.json
- Replaced the hardcoded path for Cyberpunk2077 and instead use the get_app_install_location instead - Replaced the hardcoded path for Cyberpunk2077 and instead use the get_app_install_location instead
- Added DOTA 2 screenshotting for video config - Added DOTA 2 screenshotting for video config
- Added beta harness of Marvel Rivals - Added beta harness of Marvel Rivals

View File

@@ -40,6 +40,7 @@ intro_videos = [
os.path.join(VIDEO_PATH, "attract.bk2"), os.path.join(VIDEO_PATH, "attract.bk2"),
os.path.join(VIDEO_PATH, "cm_f1_sting.bk2") os.path.join(VIDEO_PATH, "cm_f1_sting.bk2")
] ]
user.FAILSAFE = False
def find_latest_result_file(base_path): def find_latest_result_file(base_path):

View File

@@ -24,8 +24,9 @@ LOG_DIRECTORY = SCRIPT_DIRECTORY.joinpath("run")
PROCESS_NAME = "alanwake2.exe" PROCESS_NAME = "alanwake2.exe"
EXECUTABLE_PATH = find_epic_executable() EXECUTABLE_PATH = find_epic_executable()
GAME_ID = "c4763f236d08423eb47b4c3008779c84%3A93f2a8c3547846eda966cb3c152a026e%3Adc9d2e595d0e4650b35d659f90d41059?action=launch&silent=true" GAME_ID = "c4763f236d08423eb47b4c3008779c84%3A93f2a8c3547846eda966cb3c152a026e%3Adc9d2e595d0e4650b35d659f90d41059?action=launch&silent=true"
gamefoldername = "AlanWake2" GAMEFOLDERNAME = "AlanWake2"
user.FAILSAFE = False
def setup_logging(): def setup_logging():
"""default logging config""" """default logging config"""
@@ -169,7 +170,7 @@ try:
"resolution": f"{width}x{height}", "resolution": f"{width}x{height}",
"start_time": round((start_time * 1000)), "start_time": round((start_time * 1000)),
"end_time": round((end_time * 1000)), "end_time": round((end_time * 1000)),
"game_version": find_eg_game_version(gamefoldername) "game_version": find_eg_game_version(GAMEFOLDERNAME)
} }
am.create_manifest() am.create_manifest()

View File

@@ -6,13 +6,13 @@ import time
import getpass import getpass
import glob import glob
import os import os
from aotse_utils import read_current_resolution, find_score_in_log, delete_old_scores, get_args from aotse_utils import read_current_resolution, find_score_in_log, delete_old_scores, get_args, replace_exe, restore_exe
PARENT_DIR = str(Path(sys.path[0], "..")) PARENT_DIR = str(Path(sys.path[0], ".."))
sys.path.append(PARENT_DIR) sys.path.append(PARENT_DIR)
from harness_utils.keras_service import KerasService from harness_utils.keras_service import KerasService
from harness_utils.steam import get_app_install_location, get_build_id, exec_steam_game from harness_utils.steam import get_build_id, exec_steam_game
from harness_utils.output import ( from harness_utils.output import (
DEFAULT_DATE_FORMAT, DEFAULT_DATE_FORMAT,
DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_FORMAT,
@@ -32,7 +32,7 @@ CONFIG_FILENAME = "settings.ini"
STEAM_GAME_ID = 507490 STEAM_GAME_ID = 507490
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
EXECUTABLE = "AshesEscalation_DX12.exe" EXECUTABLE = "StardockLauncher.exe"
CONFIG_DIR = SCRIPT_DIR / "config" CONFIG_DIR = SCRIPT_DIR / "config"
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"GPU_Benchmark": { "GPU_Benchmark": {
@@ -49,7 +49,6 @@ BENCHMARK_CONFIG = {
} }
} }
CFG = f"{CONFIG_PATH}\\{CONFIG_FILENAME}" CFG = f"{CONFIG_PATH}\\{CONFIG_FILENAME}"
GAME_DIR = get_app_install_location(STEAM_GAME_ID)
def start_game(): def start_game():
"""Launch the game with no launcher or start screen""" """Launch the game with no launcher or start screen"""
@@ -60,6 +59,7 @@ def run_benchmark():
"""Start the benchmark""" """Start the benchmark"""
# Start game via Steam and enter fullscreen mode # Start game via Steam and enter fullscreen mode
setup_start_time = time.time() setup_start_time = time.time()
replace_exe()
start_game() start_game()
time.sleep(10) time.sleep(10)
@@ -85,16 +85,14 @@ def run_benchmark():
logging.info("Benchmark started. Waiting for benchmark to complete.") logging.info("Benchmark started. Waiting for benchmark to complete.")
time.sleep(180) time.sleep(180)
# result = kerasService.wait_for_word("complete", timeout=240, interval=0.5)
# if not result:
# logging.info("Did not see the Benchmark Complete pop up. Did it run?")
# sys.exit(1)
test_end_time = time.time() - 2 test_end_time = time.time()
time.sleep(2) time.sleep(2)
elapsed_test_time = round((test_end_time - test_start_time), 2) elapsed_test_time = round((test_end_time - test_start_time), 2)
logging.info("Benchmark took %f seconds", elapsed_test_time) logging.info("Benchmark took %f seconds", elapsed_test_time)
time.sleep(3) time.sleep(3)
restore_exe()
return test_start_time, test_end_time return test_start_time, test_end_time
setup_log_directory(LOG_DIR) setup_log_directory(LOG_DIR)

View File

@@ -8,6 +8,7 @@ from pathlib import Path
import psutil import psutil
import glob import glob
import time import time
import shutil
from argparse import ArgumentParser from argparse import ArgumentParser
PARENT_DIR = str(Path(sys.path[0], "..")) PARENT_DIR = str(Path(sys.path[0], ".."))
@@ -15,14 +16,16 @@ sys.path.append(PARENT_DIR)
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, os.path.join(sys.path[0], '..'))
from harness_utils.steam import get_app_install_location
USERNAME = getpass.getuser() USERNAME = getpass.getuser()
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR.joinpath("run") LOG_DIR = SCRIPT_DIR.joinpath("run")
PROCESS_NAME = "stellaris.exe" STEAM_GAME_ID = 507490
STEAM_GAME_ID = 281990
CONFIG_FILENAME = "settings.ini" CONFIG_FILENAME = "settings.ini"
USERNAME = getpass.getuser() USERNAME = getpass.getuser()
CONFIG_PATH = Path(f"C:\\Users\\{USERNAME}\\Documents\\My Games\\Ashes of the Singularity - Escalation") CONFIG_PATH = Path(f"C:\\Users\\{USERNAME}\\Documents\\My Games\\Ashes of the Singularity - Escalation")
EXE_PATH = Path(get_app_install_location(STEAM_GAME_ID))
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"GPU_Benchmark": { "GPU_Benchmark": {
"hardware": "GPU", "hardware": "GPU",
@@ -119,3 +122,36 @@ def wait_for_benchmark_process(test_name, process_name, timeout=60):
# Wait for 1 second before checking again # Wait for 1 second before checking again
time.sleep(1) time.sleep(1)
def replace_exe():
"""Replaces the Strange Brigade launcher exe with the Vulkan exe for immediate launching
"""
check_backup = Path(f"{EXE_PATH}\\StardockLauncher_launcher.exe")
launcher_exe = Path(f"{EXE_PATH}\\StardockLauncher.exe")
dx12_exe = Path(f"{EXE_PATH}\\AshesEscalation_DX12.exe")
if not os.path.exists(check_backup):
os.rename(launcher_exe, check_backup)
shutil.copy(dx12_exe, launcher_exe)
logging.info("Replacing launcher file in %s", EXE_PATH)
elif os.path.exists(check_backup):
if not os.path.exists(launcher_exe):
shutil.copy(dx12_exe, launcher_exe)
logging.info("Replacing launcher file in %s", EXE_PATH)
else:
logging.info("Launcher already replaced with DX12 exe.")
def restore_exe():
"""Restores the launcher exe back to the original exe name to close the loop.
"""
check_backup = Path(f"{EXE_PATH}\\StardockLauncher_launcher.exe")
launcher_exe = Path(f"{EXE_PATH}\\StardockLauncher.exe")
if not os.path.exists(check_backup):
logging.info("Launcher already restored or file does not exist.")
elif os.path.exists(check_backup):
if not os.path.exists(launcher_exe):
os.rename(check_backup, launcher_exe)
logging.info("Restoring launcher file in %s", EXE_PATH)
else:
os.remove(launcher_exe)
os.rename(check_backup, launcher_exe)
logging.info("Restoring launcher file in %s", EXE_PATH)

View File

@@ -1,6 +1,6 @@
friendly_name: "Ashes of the Singularity: Escalation" friendly_name: "Ashes of the Singularity: Escalation"
executable: "aotse.py" executable: "aotse.py"
process_name: "AshesEscalation_DX12.exe" process_name: "StardockLauncher.exe"
output_dir: "run" output_dir: "run"
options: options:
- name: kerasHost - name: kerasHost

View File

@@ -1,39 +1,42 @@
#pylint: disable=missing-module-docstring # pylint: disable=missing-module-docstring
from argparse import ArgumentParser
import logging import logging
import os
from pathlib import Path from pathlib import Path
import time import time
import sys import sys
import re import re
import pydirectinput as user import pydirectinput as user
import getpass
sys.path.insert(1, str(Path(sys.path[0]).parent))
sys.path.insert(1, os.path.join(sys.path[0], '..')) # pylint: disable=wrong-import-position
#pylint: disable=wrong-import-position
from harness_utils.process import terminate_processes from harness_utils.process import terminate_processes
from harness_utils.output import ( from harness_utils.output import (
format_resolution, format_resolution,
setup_log_directory, setup_logging,
write_report_json, write_report_json,
seconds_to_milliseconds, seconds_to_milliseconds,
DEFAULT_LOGGING_FORMAT,
DEFAULT_DATE_FORMAT
) )
from harness_utils.steam import get_build_id, exec_steam_game from harness_utils.steam import get_build_id, exec_steam_game
from harness_utils.keras_service import KerasService from harness_utils.keras_service import KerasService
from harness_utils.artifacts import ArtifactManager, ArtifactType from harness_utils.artifacts import ArtifactManager, ArtifactType
from harness_utils.misc import press_n_times from harness_utils.misc import (
press_n_times,
int_time,
find_word,
keras_args)
SCRIPT_DIR = Path(__file__).resolve().parent USERNAME = getpass.getuser()
LOG_DIR = SCRIPT_DIR.joinpath("run")
PROCESS_NAME = "ACShadows.exe"
STEAM_GAME_ID = 3159330 STEAM_GAME_ID = 3159330
CONFIG_LOCATION = "C:\\Users\\Administrator\\Documents\\Assassin's Creed Shadows" SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run"
PROCESS_NAME = "ACShadows.exe"
CONFIG_LOCATION = f"C:\\Users\\{USERNAME}\\Documents\\Assassin's Creed Shadows"
CONFIG_FILENAME = "ACShadows.ini" CONFIG_FILENAME = "ACShadows.ini"
user.FAILSAFE = False user.FAILSAFE = False
def read_current_resolution(): def read_current_resolution():
"""Reads resolutions settings from local game file""" """Reads resolutions settings from local game file"""
height_pattern = re.compile(r"FullscreenWidth=(\d+)") height_pattern = re.compile(r"FullscreenWidth=(\d+)")
@@ -52,115 +55,132 @@ def read_current_resolution():
width_value = width_match.group(1) width_value = width_match.group(1)
return (height_value, width_value) return (height_value, width_value)
def find_word(keras_service, word, msg, timeout = 30, interval = 1):
"""function to call keras """
if keras_service.wait_for_word(word = word, timeout = timeout, interval = interval) is None:
logging.info(msg)
sys.exit(1)
def int_time():
"""rounds time to int"""
return int(time.time())
def delete_videos(): def delete_videos():
"""deletes intro videos""" """deletes intro videos"""
base_dir = r"C:\Program Files (x86)\Steam\steamapps\common\Assassin's Creed Shadows" base_dir = Path(
videos_dir = os.path.join(base_dir, "videos") r"C:\Program Files (x86)\Steam\steamapps\common\Assassin's Creed Shadows")
videos_en_dir = os.path.join(videos_dir, "en") videos_dir = base_dir / "videos"
videos_en_dir = videos_dir / "en"
# List of video files to delete # List of video files to delete
videos_to_delete = [ videos_to_delete = [
os.path.join(videos_dir, "ANVIL_Logo.webm"), videos_dir / "ANVIL_Logo.webm",
os.path.join(videos_dir, "INTEL_Logo.webm"), videos_dir / "INTEL_Logo.webm",
os.path.join(videos_dir, "HUB_Bootflow_FranchiseIntro.webm"), videos_dir / "HUB_Bootflow_FranchiseIntro.webm",
os.path.join(videos_dir, "UbisoftLogo.webm"), videos_dir / "HUB_Bootflow_AbstergoIntro.webm",
os.path.join(videos_en_dir, "Epilepsy.webm"), videos_dir / "UbisoftLogo.webm",
os.path.join(videos_en_dir, "warning_disclaimer.webm"), videos_en_dir / "Epilepsy.webm",
os.path.join(videos_en_dir, "WarningSaving.webm") videos_en_dir / "warning_disclaimer.webm",
videos_en_dir / "WarningSaving.webm"
] ]
for file_path in videos_to_delete: for file_path in videos_to_delete:
if os.path.exists(file_path): if file_path.exists():
try: try:
os.remove(file_path) file_path.unlink()
logging.info("Deleted: %f", file_path) logging.info("Deleted: %s", file_path)
except Exception as e: except Exception as e:
logging.error("Error deleting %f: %e", file_path, e) logging.error("Error deleting %s: %s", file_path, e)
def move_benchmark_file(): def move_benchmark_file():
"""moves html benchmark results to log folder""" """moves html benchmark results to log folder"""
src_dir = r"C:\Users\Administrator\Documents\Assassin's Creed Shadows\benchmark_reports" src_dir = Path(
f"C:\\Users\\{USERNAME}\\Documents\\Assassin's Creed Shadows\\benchmark_reports")
for filename in os.listdir(src_dir): for src_path in src_dir.iterdir():
src_path = os.path.join(src_dir, filename) dest_path = LOG_DIR / src_path.name
dest_path = os.path.join(LOG_DIR, filename)
if os.path.isfile(src_path): if src_path.is_file():
try: try:
os.rename(src_path, dest_path) src_path.rename(dest_path)
logging.info("Benchmark HTML moved") logging.info("Benchmark HTML moved")
except Exception as e: except Exception as e:
logging.error("Failed to move %s: %e", src_path, e) logging.error("Failed to move %s: %s", src_path, e)
else: else:
logging.error("Benchmark HTML not found.") logging.error("Benchmark HTML not found.")
def start_game(): def start_game():
"""Starts the game process""" """Starts the game process"""
exec_steam_game(STEAM_GAME_ID) exec_steam_game(STEAM_GAME_ID)
logging.info("Launching Game from Steam") logging.info("Launching Game from Steam")
def navi_settings(am): def navi_settings(am):
"""navigates and takes pictures of settings""" """navigates and takes pictures of settings"""
user.press("space") user.press("space")
time.sleep(1) time.sleep(1)
am.take_screenshot("display1.png", ArtifactType.CONFIG_IMAGE, "display settings 1") am.take_screenshot(
"display1.png", ArtifactType.CONFIG_IMAGE, "display settings 1")
press_n_times("down", 13, 0.3) press_n_times("down", 13, 0.3)
am.take_screenshot("display2.png", ArtifactType.CONFIG_IMAGE, "display settings 2") am.take_screenshot(
"display2.png", ArtifactType.CONFIG_IMAGE, "display settings 2")
press_n_times("down", 4, 0.3) press_n_times("down", 4, 0.3)
am.take_screenshot("display3.png", ArtifactType.CONFIG_IMAGE, "display settings 3") am.take_screenshot(
"display3.png", ArtifactType.CONFIG_IMAGE, "display settings 3")
user.press("c") user.press("c")
time.sleep(1) time.sleep(1)
am.take_screenshot("scalability1.png", ArtifactType.CONFIG_IMAGE, "scalability settings 1") am.take_screenshot(
"scalability1.png", ArtifactType.CONFIG_IMAGE,
"scalability settings 1")
press_n_times("down", 10, 0.3) press_n_times("down", 10, 0.3)
am.take_screenshot("scalability2.png", ArtifactType.CONFIG_IMAGE, "scalability settings 2") am.take_screenshot(
"scalability2.png", ArtifactType.CONFIG_IMAGE,
"scalability settings 2")
press_n_times("down", 6, 0.3) press_n_times("down", 6, 0.3)
am.take_screenshot("scalability3.png", ArtifactType.CONFIG_IMAGE, "scalability settings 3") am.take_screenshot(
"scalability3.png", ArtifactType.CONFIG_IMAGE,
"scalability settings 3")
press_n_times("down", 5, 0.3) press_n_times("down", 5, 0.3)
am.take_screenshot("scalability4.png", ArtifactType.CONFIG_IMAGE, "scalability settings 4") am.take_screenshot(
"scalability4.png", ArtifactType.CONFIG_IMAGE,
"scalability settings 4")
user.press("esc") user.press("esc")
def run_benchmark(keras_service): def run_benchmark(keras_service):
"""runs the benchmark""" """runs the benchmark"""
delete_videos() delete_videos()
start_game() start_game()
setup_start_time = int_time() setup_start_time = int_time()
am = ArtifactManager(LOG_DIR) am = ArtifactManager(LOG_DIR)
time.sleep(20) time.sleep(15)
if keras_service.wait_for_word(word="animus", timeout=30, interval = 1) is None: if keras_service.wait_for_word(
word="hardware", timeout=30, interval=1) is None:
logging.info("did not find hardware")
else:
user.mouseDown()
time.sleep(0.2)
user.press("space")
if keras_service.wait_for_word(
word="animus", timeout=130, interval=1) is None:
logging.info("did not find main menu") logging.info("did not find main menu")
sys.exit(1) sys.exit(1)
user.press("f1") user.press("f1")
find_word(keras_service, "system", "couldn't find system") find_word(keras_service, "system", "Couldn't find 'System' button")
user.press("down") user.press("down")
@@ -168,10 +188,16 @@ def run_benchmark(keras_service):
user.press("space") user.press("space")
find_word(keras_service, "benchmark", "couldn't find benchmark") find_word(
keras_service, "benchmark",
"couldn't find 'benchmark' on screen before settings")
navi_settings(am) navi_settings(am)
find_word(
keras_service, "benchmark",
"couldn't find 'benchmark' on screen after settings")
user.press("down") user.press("down")
time.sleep(1) time.sleep(1)
@@ -180,9 +206,10 @@ def run_benchmark(keras_service):
setup_end_time = int_time() setup_end_time = int_time()
elapsed_setup_time = setup_end_time - setup_start_time elapsed_setup_time = setup_end_time - setup_start_time
logging.info("Setup took %f seconds", elapsed_setup_time) logging.info("Setup took %d seconds", elapsed_setup_time)
if keras_service.wait_for_word(word = "benchmark", timeout = 30, interval = 1) is None: if keras_service.wait_for_word(
word="benchmark", timeout=50, interval=1) is None:
logging.info("did not find benchmark") logging.info("did not find benchmark")
sys.exit(1) sys.exit(1)
@@ -190,16 +217,16 @@ def run_benchmark(keras_service):
time.sleep(100) time.sleep(100)
if keras_service.wait_for_word(word = "results", timeout = 30, interval = 1) is None: find_word(keras_service, "results", "did not find results screen", 60)
logging.info("did not find end screen")
sys.exit(1)
test_end_time = int_time() test_end_time = int_time() - 2
elapsed_test_time = test_end_time - test_start_time elapsed_test_time = test_end_time - test_start_time
logging.info("Benchmark took %f seconds", elapsed_test_time) logging.info("Benchmark took %d seconds", elapsed_test_time)
am.take_screenshot("benchmark_results.png", ArtifactType.RESULTS_IMAGE, "benchmark results") am.take_screenshot(
"benchmark_results.png", ArtifactType.RESULTS_IMAGE,
"benchmark results")
user.press("x") user.press("x")
@@ -217,28 +244,11 @@ def run_benchmark(keras_service):
return test_start_time, test_end_time return test_start_time, test_end_time
def setup_logging():
"""setup logging"""
setup_log_directory(LOG_DIR)
logging.basicConfig(filename=f'{LOG_DIR}/harness.log',
format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT,
level=logging.DEBUG)
console = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def main(): def main():
"""entry point""" """entry point"""
parser = ArgumentParser() keras_service = KerasService(
parser.add_argument("--kerasHost", dest="keras_host", keras_args().keras_host, keras_args().keras_port)
help="Host for Keras OCR service", required=True)
parser.add_argument("--kerasPort", dest="keras_port",
help="Port for Keras OCR service", required=True)
args = parser.parse_args()
keras_service = KerasService(args.keras_host, args.keras_port)
start_time, endtime = run_benchmark(keras_service) start_time, endtime = run_benchmark(keras_service)
height, width = read_current_resolution() height, width = read_current_resolution()
report = { report = {
@@ -249,9 +259,10 @@ def main():
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(LOG_DIR, "report.json", report)
if __name__ == "__main__": if __name__ == "__main__":
try: try:
setup_logging() setup_logging(LOG_DIR)
main() main()
except Exception as ex: except Exception as ex:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")

View File

@@ -3,17 +3,17 @@ from pathlib import Path
from blender_utils import BENCHMARK_CONFIG, find_blender, run_blender_render, download_scene from blender_utils import BENCHMARK_CONFIG, find_blender, run_blender_render, download_scene
from argparse import ArgumentParser from argparse import ArgumentParser
import logging import logging
import os.path
import sys import sys
import time import time
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, str(Path(sys.path[0]).parent))
from harness_utils.output import DEFAULT_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, write_report_json, seconds_to_milliseconds from harness_utils.output import DEFAULT_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, write_report_json, seconds_to_milliseconds
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR.joinpath("run") LOG_DIR = SCRIPT_DIR.joinpath("run")
def setup_logging(): def setup_logging():
"""default logging config""" """default logging config"""
LOG_DIR.mkdir(exist_ok=True) LOG_DIR.mkdir(exist_ok=True)
@@ -26,15 +26,18 @@ def setup_logging():
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger('').addHandler(console) logging.getLogger('').addHandler(console)
VALID_DEVICES = ["CPU", "CUDA", "OPTIX", "HIP", "ONEAPI", "METAL"] VALID_DEVICES = ["CPU", "CUDA", "OPTIX", "HIP", "ONEAPI", "METAL"]
def main(): def main():
"""entry point for test script""" """entry point for test script"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument("-d", "--device", dest="device", parser.add_argument("-d", "--device", dest="device",
help="device", metavar="device", required=True) help="device", metavar="device", required=True)
parser.add_argument( parser.add_argument(
"--benchmark", dest="benchmark", help="Benchmark test type", metavar="benchmark", required=True) "--benchmark", dest="benchmark", help="Benchmark test type",
metavar="benchmark", required=True)
args = parser.parse_args() args = parser.parse_args()
if args.device not in VALID_DEVICES: if args.device not in VALID_DEVICES:
raise Exception(f"invalid device selection: {args.device}") raise Exception(f"invalid device selection: {args.device}")
@@ -49,23 +52,25 @@ def main():
score = run_blender_render( score = run_blender_render(
executable_path, LOG_DIR, args.device.upper(), benchmark) executable_path, LOG_DIR, args.device.upper(), benchmark)
end_time = time.time() end_time = time.time()
logging.info(f'Finished rendering {args.benchmark} in %d seconds', (end_time - start_time)) logging.info(
f'Finished rendering {args.benchmark} in %d seconds',
(end_time - start_time))
if score is None: if score is None:
raise Exception("no duration was found in the log to use as the score") raise Exception("no duration was found in the log to use as the score")
report = { report = {
"test": f"Blender {args.benchmark} Render {args.device.upper()}", "test": "Blender Render",
"test_parameter": args.benchmark,
"score": score, "score": score,
"unit": "seconds", "unit": "seconds",
"version": version, "version": version,
"device": args.device, "device": args.device,
"benchmark": args.benchmark,
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time) "end_time": seconds_to_milliseconds(end_time)
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(str(LOG_DIR), "report.json", report)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -80,7 +80,7 @@ def download_scene(scene: BlenderScene) -> None:
def copy_scene_from_network_drive(file_name, destination): def copy_scene_from_network_drive(file_name, destination):
"""copy blend file from network drive""" """copy blend file from network drive"""
network_dir = Path("\\\\Labs\\labs\\03_ProcessingFiles\\Blender Render") network_dir = Path("\\\\labs.lmg.gg\\labs\\03_ProcessingFiles\\Blender Render")
source_path = network_dir.joinpath(file_name) source_path = network_dir.joinpath(file_name)
logging.info("Copying %s from %s", file_name, source_path) logging.info("Copying %s from %s", file_name, source_path)
shutil.copyfile(source_path, destination) shutil.copyfile(source_path, destination)
@@ -97,16 +97,17 @@ def time_to_seconds(time_string):
return seconds return seconds
def run_blender_render(executable_path: Path, log_directory: Path, device: str, benchmark: BlenderScene) -> str: def run_blender_render(executable_path: Path, log_directory: Path, device: str,
benchmark: BlenderScene) -> str:
"""Execute the blender render of barbershop, returns the duration as string""" """Execute the blender render of barbershop, returns the duration as string"""
blend_log = log_directory.joinpath("blender.log") blend_log = log_directory.joinpath("blender.log")
blend_path = SCRIPT_DIR.joinpath(benchmark.file_name) blend_path = SCRIPT_DIR.joinpath(benchmark.file_name)
cmd_line = f'"{str(executable_path)}" -b -E CYCLES -y "{str(blend_path)}" -f 1 -- --cycles-device {device} --cycles-print-stats' cmd_line = f'"{str(executable_path)}" -b -E CYCLES -y "{str(blend_path)}" -f 1 -- --cycles-device {device} --cycles-print-stats'
with open(blend_log,'w' , encoding="utf-8") as f_obj: with open(blend_log, 'w', encoding="utf-8") as f_obj:
subprocess.run(cmd_line, stdout=f_obj, text=True, check=True) subprocess.run(cmd_line, stdout=f_obj, text=True, check=True)
# example: Time: 02:59.57 (Saving: 00:00.16) # example: Time: 02:59.57 (Saving: 00:00.16)
time_regex = r"Time: (.*) \(Saving.*\)" time_regex = r".*Time:\s+([\d:.]+)\s+\(Saving.*\)"
time = None time = None
with open(blend_log, 'r', encoding="utf-8") as file: with open(blend_log, 'r', encoding="utf-8") as file:

View File

@@ -117,10 +117,12 @@ for report in json_array:
scene_report = { scene_report = {
"timestamp": report['timestamp'], "timestamp": report['timestamp'],
"version": blender_version, "version": blender_version,
"test": f"Blender Benchmark {report['scene']['label']} {DEVICE_TYPE}", "test": "Blender Benchmark",
"test_parameter": f"{report['scene']['label']} ",
"score": round(report['stats']['samples_per_minute'], 2), "score": round(report['stats']['samples_per_minute'], 2),
"unit": "samples per minute", "unit": "samples per minute",
"device": report['device_info']['compute_devices'][0]['name'] "device": report['device_info']['compute_devices'][0]['name'],
"device_type": DEVICE_TYPE,
} }
logging.info(json.dumps(scene_report, indent=2)) logging.info(json.dumps(scene_report, indent=2))

View File

@@ -33,7 +33,8 @@ DURATION_OPTION = "g_CinebenchMinimumTestDuration=1"
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument( parser.add_argument(
"-t", "--test", dest="test", help="Cinebench test type", required=True, choices=TEST_OPTIONS.keys()) "-t", "--test", dest="test", help="Cinebench test type", required=True,
choices=TEST_OPTIONS.keys())
args = parser.parse_args() args = parser.parse_args()
script_dir = Path(__file__).resolve().parent script_dir = Path(__file__).resolve().parent
@@ -63,21 +64,30 @@ try:
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
bufsize=1, bufsize=1,
universal_newlines=True) as proc: universal_newlines=True) as proc:
logging.info("Cinebench started. Waiting for setup to finish to set process priority.") logging.info(
"Cinebench started. Waiting for setup to finish to set process priority.")
START_TIME = 0
if proc.stdout is None:
logging.error("Cinebench process did not start correctly!")
sys.exit(1)
for line in proc.stdout: for line in proc.stdout:
if "BEFORERENDERING" in line: if "BEFORERENDERING" in line:
elapsed_setup_time = round(time.time() - setup_start_time, 2) elapsed_setup_time = round(
time.time() - setup_start_time, 2)
logging.info("Setup took %.2f seconds", elapsed_setup_time) logging.info("Setup took %.2f seconds", elapsed_setup_time)
logging.info("Setting Cinebench process priority to high (PID: %s)", proc.pid) logging.info(
"Setting Cinebench process priority to high (PID: %s)",
proc.pid)
process = psutil.Process(proc.pid) process = psutil.Process(proc.pid)
process.nice(psutil.HIGH_PRIORITY_CLASS) process.nice(psutil.HIGH_PRIORITY_CLASS)
start_time = time.time() START_TIME = time.time()
break break
out, _ = proc.communicate() out, _ = proc.communicate()
if proc.returncode > 0: if proc.returncode > 0:
logging.warning("Cinebench exited with return code %d", proc.returncode) logging.warning(
"Cinebench exited with return code %d", proc.returncode)
score = get_score(out) score = get_score(out)
if score is None: if score is None:
@@ -85,19 +95,20 @@ try:
sys.exit(1) sys.exit(1)
end_time = time.time() end_time = time.time()
elapsed_test_time = round(end_time - start_time, 2) elapsed_test_time = round(end_time - START_TIME, 2)
logging.info("Benchmark took %.2f seconds", elapsed_test_time) logging.info("Benchmark took %.2f seconds", elapsed_test_time)
report = { report = {
"test": friendly_test_name(test_type), "test": "Cinebench 2024",
"test_parameter": friendly_test_name(test_type),
"score": score, "score": score,
"unit": "score", "unit": "score",
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(START_TIME),
"end_time": seconds_to_milliseconds(end_time) "end_time": seconds_to_milliseconds(end_time)
} }
session_report.append(report) session_report.append(report)
write_report_json(log_dir, "report.json", session_report) write_report_json(str(log_dir), "report.json", session_report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -20,7 +20,7 @@ from harness_utils.output import (
DEFAULT_DATE_FORMAT DEFAULT_DATE_FORMAT
) )
from harness_utils.steam import exec_steam_game, get_build_id from harness_utils.steam import exec_steam_game, get_build_id
from harness_utils.keras_service import KerasService from harness_utils.keras_service import KerasService, ScreenSplitConfig, ScreenShotDivideMethod, ScreenShotQuadrant
from harness_utils.artifacts import ArtifactManager, ArtifactType from harness_utils.artifacts import ArtifactManager, ArtifactType
from harness_utils.misc import mouse_scroll_n_times from harness_utils.misc import mouse_scroll_n_times
@@ -29,6 +29,10 @@ SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR.joinpath("run") LOG_DIR = SCRIPT_DIR.joinpath("run")
PROCESS_NAME = "cities2.exe" PROCESS_NAME = "cities2.exe"
STEAM_GAME_ID = 949230 STEAM_GAME_ID = 949230
top_left_keras = ScreenSplitConfig(
divide_method=ScreenShotDivideMethod.QUADRANT,
quadrant=ScreenShotQuadrant.TOP_LEFT)
launcher_files = [ launcher_files = [
"bootstrapper-v2.exe", "bootstrapper-v2.exe",
"launcher.exe", "launcher.exe",
@@ -39,7 +43,6 @@ save_files = [
"Benchmark.cok.cid" "Benchmark.cok.cid"
] ]
config_files = [ config_files = [
"continue_game.json",
"UserState.coc" "UserState.coc"
] ]
@@ -89,12 +92,41 @@ def run_benchmark(keras_service):
result = keras_service.wait_for_word("paradox", interval=0.5, timeout=100) result = keras_service.wait_for_word("paradox", interval=0.5, timeout=100)
if not result: if not result:
logging.info("Could not find the paused notification. Unable to mark start time!") logging.info("Could not find the Paradox logo. Did the game launch?")
sys.exit(1) sys.exit(1)
user.press("esc") user.press("esc")
user.press("esc") user.press("esc")
user.press("esc") user.press("esc")
time.sleep(20) time.sleep(15)
result = keras_service.wait_for_word("new", interval=0.5, timeout=100)
if not result:
logging.info("Did not find the main menu. Did the game crash?")
sys.exit(1)
result = keras_service.look_for_word("load", attempts=10, interval=1)
if not result:
logging.info("Did not find the load game option. Did the save game copy?")
sys.exit(1)
# Navigate to load save menu
gui.moveTo(result["x"], result["y"])
time.sleep(0.2)
gui.click()
time.sleep(0.2)
result = keras_service.look_for_word("benchmark", attempts=10, interval=1, split_config=top_left_keras)
if not result:
logging.info("Did not find the save game original date. Did the keras click correctly?")
sys.exit(1)
# Loading the game
gui.moveTo(result["x"], result["y"])
time.sleep(0.2)
gui.click()
time.sleep(0.2)
user.press("enter")
time.sleep(10)
result = keras_service.wait_for_word("grand", interval=0.5, timeout=100) result = keras_service.wait_for_word("grand", interval=0.5, timeout=100)
if not result: if not result:
@@ -102,6 +134,8 @@ def run_benchmark(keras_service):
sys.exit(1) sys.exit(1)
elapsed_setup_time = round(int(time.time()) - setup_start_time, 2) elapsed_setup_time = round(int(time.time()) - setup_start_time, 2)
logging.info("Setup took %f seconds", elapsed_setup_time) logging.info("Setup took %f seconds", elapsed_setup_time)
gui.moveTo(result["x"], result["y"])
time.sleep(0.2)
time.sleep(2) time.sleep(2)
logging.info('Starting benchmark') logging.info('Starting benchmark')
user.press("3") user.press("3")

View File

@@ -18,7 +18,7 @@ LAUNCHCONFIG_LOCATION = Path(f"{LOCALAPPDATA}\\Paradox Interactive")
INSTALL_LOCATION = Path(get_app_install_location(STEAM_GAME_ID)) INSTALL_LOCATION = Path(get_app_install_location(STEAM_GAME_ID))
APPDATA = os.getenv("APPDATA") APPDATA = os.getenv("APPDATA")
CONFIG_LOCATION = Path(f"{APPDATA}\\..\\LocalLow\\Colossal Order\\Cities Skylines II") CONFIG_LOCATION = Path(f"{APPDATA}\\..\\LocalLow\\Colossal Order\\Cities Skylines II")
SAVE_LOCATION = Path(f"{CONFIG_LOCATION}\\Saves") SAVE_LOCATION = Path(f"{CONFIG_LOCATION}\\Saves\\76561199517889423")
CONFIG_FILENAME = "launcher-settings.json" CONFIG_FILENAME = "launcher-settings.json"
@@ -37,7 +37,7 @@ def read_current_resolution():
def copy_continuegame(config_files: list[str]) -> None: def copy_continuegame(config_files: list[str]) -> None:
"""Copy launcher files to game directory""" """Copy continue game files to config directory"""
for file in config_files: for file in config_files:
try: try:
src_path = SCRIPT_DIRECTORY / "config" / file src_path = SCRIPT_DIRECTORY / "config" / file
@@ -84,7 +84,7 @@ def copy_launcherpath():
shutil.copy(src_path, dest_path) shutil.copy(src_path, dest_path)
#os.chmod(dest_path, stat.S_IREAD) #os.chmod(dest_path, stat.S_IREAD)
except OSError as err: except OSError as err:
logging.error("Could not copy the launcherpath file. %s", e) logging.error("Could not copy the launcherpath file. %s", err)
raise err raise err
@@ -98,5 +98,5 @@ def copy_benchmarksave(save_files: list[str]) -> None:
logging.info("Copying: %s -> %s", file, dest_path) logging.info("Copying: %s -> %s", file, dest_path)
shutil.copy(src_path, dest_path) shutil.copy(src_path, dest_path)
except OSError as err: except OSError as err:
logging.error("Could not copy launcher files. %s", err) logging.error("Could not copy the save game. %s", err)
raise err raise err

View File

@@ -1,5 +1,5 @@
User Settings User Settings
{ {
"lastSaveGameMetadata": "4d6c3ccb7ecaebb43efcf0913bb053b0", "lastSaveGameMetadata": "e284bbf6f86bd366ca930783985b849c",
"naturalDisasters": false "naturalDisasters": false
} }

View File

@@ -1,6 +0,0 @@
{
"title": "Benchmark",
"desc": "Population: 99766 Money: \u00a280542653",
"date": "2024-08-20T14:35:47",
"rawGameVersion": "1.1.7f1"
}

View File

@@ -1,5 +1,5 @@
{ {
"continuelastsave": true, "continuelastsave": false,
"noworkshop": false, "noworkshop": false,
"disablemods": false, "disablemods": false,
"nolog": false, "nolog": false,

Binary file not shown.

View File

@@ -1 +1 @@
9a03f3dfd36a585d8397c021d92d4184 582572b506bb32028036437e27f475ae

View File

@@ -29,7 +29,14 @@ PROCESS_NAME = "cs2.exe"
STEAM_GAME_ID = 730 STEAM_GAME_ID = 730
STEAM_USER_ID = get_registry_active_user() STEAM_USER_ID = get_registry_active_user()
cfg = Path(get_steam_folder_path(), "userdata", str(STEAM_USER_ID), str(STEAM_GAME_ID), "local", "cfg", "cs2_video.txt") cfg = Path(
get_steam_folder_path(),
"userdata", str(STEAM_USER_ID),
str(STEAM_GAME_ID),
"local", "cfg", "cs2_video.txt")
user.FAILSAFE = False
def setup_logging(): def setup_logging():
"""default logging config""" """default logging config"""
@@ -66,7 +73,7 @@ def run_benchmark(keras_service):
result = keras_service.wait_for_word("play", timeout=30, interval=0.1) result = keras_service.wait_for_word("play", timeout=30, interval=0.1)
if not result: if not result:
logging.info("Did not find the play menu. Did the game load?") logging.info("Did not find the play menu. Did the game load?")
sys.exit(1) raise RuntimeError
height, width = get_resolution() height, width = get_resolution()
location = None location = None
@@ -74,14 +81,25 @@ def run_benchmark(keras_service):
# We check the resolution so we know which screenshot to use for the locate on screen function # We check the resolution so we know which screenshot to use for the locate on screen function
match width: match width:
case "1920": case "1920":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\settings_1080.png") location = gui.locateOnScreen(
f"{SCRIPT_DIR}\\screenshots\\settings_1080.png", minSearchTime=5, confidence=0.6)
case "2560": case "2560":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\settings_1440.png") location = gui.locateOnScreen(
f"{SCRIPT_DIR}\\screenshots\\settings_1440.png", minSearchTime=5, confidence=0.6)
case "3840": case "3840":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\settings_2160.png") location = gui.locateOnScreen(
f"{SCRIPT_DIR}\\screenshots\\settings_2160.png", minSearchTime=5, confidence=0.6)
case _: case _:
logging.error("Could not find the settings cog. The game resolution is currently %s, %s. Are you using a standard resolution?", height, width) logging.error(
sys.exit(1) "Could not find the settings cog. The game resolution is currently %s, %s. Are you using a standard resolution?",
height, width)
raise RuntimeError
if location is None:
logging.error(
"Could not find the settings cog. The game resolution is currently %s, %s. Are you using a standard resolution?",
height, width)
raise RuntimeError
click_me = gui.center(location) click_me = gui.center(location)
gui.moveTo(click_me.x, click_me.y) gui.moveTo(click_me.x, click_me.y)
@@ -93,7 +111,7 @@ def run_benchmark(keras_service):
result = keras_service.look_for_word(word="video", attempts=10, interval=1) result = keras_service.look_for_word(word="video", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the video menu button. Did Keras enter settings correctly?") logging.info("Did not find the video menu button. Did Keras enter settings correctly?")
sys.exit(1) raise RuntimeError
gui.moveTo(result["x"], result["y"]) gui.moveTo(result["x"], result["y"])
gui.mouseDown() gui.mouseDown()
@@ -103,14 +121,14 @@ def run_benchmark(keras_service):
if keras_service.wait_for_word(word="brightness", timeout=30, interval=1) is None: if keras_service.wait_for_word(word="brightness", timeout=30, interval=1) is None:
logging.info("Did not find the video settings menu. Did the menu get stuck?") logging.info("Did not find the video settings menu. Did the menu get stuck?")
sys.exit(1) raise RuntimeError
am.take_screenshot("video.png", ArtifactType.CONFIG_IMAGE, "picture of video settings") am.take_screenshot("video.png", ArtifactType.CONFIG_IMAGE, "picture of video settings")
result = keras_service.look_for_word(word="advanced", attempts=10, interval=1) result = keras_service.look_for_word(word="advanced", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the advanced video menu. Did Keras click correctly?") logging.info("Did not find the advanced video menu. Did Keras click correctly?")
sys.exit(1) raise RuntimeError
gui.moveTo(result["x"], result["y"]) gui.moveTo(result["x"], result["y"])
gui.mouseDown() gui.mouseDown()
@@ -118,12 +136,14 @@ def run_benchmark(keras_service):
gui.mouseUp() gui.mouseUp()
time.sleep(0.2) time.sleep(0.2)
am.take_screenshot("advanced_video_1.png", ArtifactType.CONFIG_IMAGE, "first picture of advanced video settings") am.take_screenshot("advanced_video_1.png", ArtifactType.CONFIG_IMAGE,
"first picture of advanced video settings")
result = keras_service.look_for_word(word="boost", attempts=10, interval=1) result = keras_service.look_for_word(word="boost", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the keyword 'Boost' in the advanced video menu. Did Keras click correctly?") logging.info(
sys.exit(1) "Did not find the keyword 'Boost' in the advanced video menu. Did Keras click correctly?")
raise RuntimeError
gui.moveTo(result["x"], result["y"]) gui.moveTo(result["x"], result["y"])
time.sleep(1) time.sleep(1)
@@ -131,9 +151,11 @@ def run_benchmark(keras_service):
time.sleep(1) time.sleep(1)
if keras_service.wait_for_word(word="particle", timeout=30, interval=1) is None: if keras_service.wait_for_word(word="particle", timeout=30, interval=1) is None:
logging.info("Did not find the keyword 'Particle' in advanced video menu. Did Keras scroll correctly?") logging.info(
sys.exit(1) "Did not find the keyword 'Particle' in advanced video menu. Did Keras scroll correctly?")
am.take_screenshot("advanced_video_2.png", ArtifactType.CONFIG_IMAGE, "second picture of advanced video settings") raise RuntimeError
am.take_screenshot("advanced_video_2.png", ArtifactType.CONFIG_IMAGE,
"second picture of advanced video settings")
logging.info('Starting benchmark') logging.info('Starting benchmark')
user.press("`") user.press("`")
@@ -149,7 +171,7 @@ def run_benchmark(keras_service):
time.sleep(3) time.sleep(3)
if keras_service.wait_for_word(word="benchmark", timeout=30, interval=0.1) is None: if keras_service.wait_for_word(word="benchmark", timeout=30, interval=0.1) is None:
logging.error("Didn't see the title of the benchmark. Did the map load?") logging.error("Didn't see the title of the benchmark. Did the map load?")
sys.exit(1) raise RuntimeError
setup_end_time = int(time.time()) setup_end_time = int(time.time())
elapsed_setup_time = round(setup_end_time - setup_start_time, 2) elapsed_setup_time = round(setup_end_time - setup_start_time, 2)
@@ -166,7 +188,7 @@ def run_benchmark(keras_service):
test_start_time = int(time.time()) test_start_time = int(time.time())
logging.info("Saw \'lets roll\'! Marking the time.") logging.info("Saw \'lets roll\'! Marking the time.")
time.sleep(112) # sleep duration during gameplay time.sleep(112) # sleep duration during gameplay
# Default fallback end time # Default fallback end time
test_end_time = int(time.time()) test_end_time = int(time.time())
@@ -174,10 +196,10 @@ def run_benchmark(keras_service):
result = keras_service.wait_for_word(word="console", timeout=30, interval=0.1) result = keras_service.wait_for_word(word="console", timeout=30, interval=0.1)
if result is None: if result is None:
logging.error("The console didn't open. Please check settings and try again.") logging.error("The console didn't open. Please check settings and try again.")
sys.exit(1) raise RuntimeError
else:
test_end_time = int(time.time()) test_end_time = int(time.time())
logging.info("The console opened. Marking end time.") logging.info("The console opened. Marking end time.")
# allow time for result screen to populate # allow time for result screen to populate
time.sleep(8) time.sleep(8)
@@ -227,3 +249,5 @@ if __name__ == "__main__":
logging.error("something went wrong running the benchmark!") logging.error("something went wrong running the benchmark!")
logging.exception(ex) logging.exception(ex)
sys.exit(1) sys.exit(1)
finally:
terminate_processes(PROCESS_NAME)

View File

@@ -46,7 +46,8 @@
"Stellaris", "Stellaris",
"shusaura", "shusaura",
"wukong", "wukong",
"vgamepad" "vgamepad",
"DaVinci"
], ],
"ignoreRegExpList": [ "ignoreRegExpList": [
"import .*" "import .*"

View File

@@ -27,6 +27,8 @@ LOG_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, "run")
PROCESS_NAME = "cyberpunk2077.exe" PROCESS_NAME = "cyberpunk2077.exe"
user.FAILSAFE = False
def start_game(): def start_game():
"""Launch the game with no launcher or start screen""" """Launch the game with no launcher or start screen"""
return exec_steam_game(STEAM_GAME_ID, game_params=["--launcher-skip", "-skipStartScreen"]) return exec_steam_game(STEAM_GAME_ID, game_params=["--launcher-skip", "-skipStartScreen"])
@@ -85,10 +87,9 @@ def navigate_settings() -> None:
am.take_screenshot("graphics_1.png", ArtifactType.CONFIG_IMAGE, "graphics menu 1") am.take_screenshot("graphics_1.png", ArtifactType.CONFIG_IMAGE, "graphics menu 1")
user.press("down") user.press("down")
time.sleep(0.5)
rast = kerasService.wait_for_word("view", interval=1, timeout=2) user.press("down") #gets you to film grain
if rast: time.sleep(0.5)
press_n_times("up", 2, 0.2) #gets you to film grain
dlss = kerasService.wait_for_word("dlss", interval=1, timeout=2) dlss = kerasService.wait_for_word("dlss", interval=1, timeout=2)
if dlss: if dlss:

View File

@@ -27,7 +27,8 @@ def get_args() -> any:
def copy_from_network_drive(): def copy_from_network_drive():
"""Copies mod file from network drive to harness folder""" """Copies mod file from network drive to harness folder"""
src_path = Path(r"\\Labs\labs\03_ProcessingFiles\Cyberpunk 2077\basegame_no_intro_videos.archive") src_path = Path(
r"\\labs.lmg.gg\labs\03_ProcessingFiles\Cyberpunk 2077\basegame_no_intro_videos.archive")
dest_path = SCRIPT_DIRECTORY / "basegame_no_intro_videos.archive" dest_path = SCRIPT_DIRECTORY / "basegame_no_intro_videos.archive"
shutil.copyfile(src_path, dest_path) shutil.copyfile(src_path, dest_path)

21
doomdarkages/README.md Normal file
View File

@@ -0,0 +1,21 @@
# Doom: The Dark Ages
This script navigates through in-game menus to the built in benchmark and runs it with the current settings. It then waits for a results screen while running the Abyssal Forest benchmark.
## Prerequisites
- Python 3.10+
- Doom: The Dark Ages installed
- Keras OCR service
## Options
- `kerasHost`: string representing the IP address of the Keras service. e.x. `0.0.0.0`
- `kerasPort`: string representing the port of the Keras service. e.x. `8080`
## Output
report.json
- `resolution`: string representing the resolution the test was run at, formatted as "[width]x[height]", e.x. `1920x1080`
- `start_time`: number representing a timestamp of the test's start time in milliseconds
- `end_time`: number representing a timestamp of the test's end time in milliseconds

View File

@@ -0,0 +1,256 @@
"""Doom: The Dark Ages test script"""
import logging
from argparse import ArgumentParser
from pathlib import Path
import os.path
import time
import sys
import pydirectinput as user
from doomdarkages_utils import get_resolution
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from doomdarkages_utils import copy_launcher_config
from harness_utils.steam import exec_steam_game, get_build_id
from harness_utils.keras_service import KerasService
from harness_utils.misc import press_n_times, mouse_scroll_n_times
from harness_utils.process import terminate_processes
from harness_utils.output import (
format_resolution,
seconds_to_milliseconds,
setup_log_directory,
write_report_json,
DEFAULT_LOGGING_FORMAT,
DEFAULT_DATE_FORMAT,
)
from harness_utils.artifacts import ArtifactManager, ArtifactType
SCRIPT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
LOG_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, "run")
PROCESS_NAME = "DOOMTheDarkAges"
STEAM_GAME_ID = 3017860
username = os.getlogin()
BENCHMARK_RESULTS_PATH = f"C:\\Users\\{username}\\Saved Games\\id Software\\DOOMTheDarkAges\\base\\benchmark"
user.FAILSAFE = False
def start_game():
"""Launch the game with no launcher or start screen"""
copy_launcher_config()
return exec_steam_game(STEAM_GAME_ID, game_params=["+com_skipIntroVideo", "1"])
def find_latest_result_file(base_path):
"""Look for files in the benchmark results path that match the pattern.
Returns the most recent benchmark file."""
base_path = Path(base_path)
files = list(base_path.glob("benchmark-*.json"))
if not files:
raise ValueError(f"No benchmark-*.json files found in {base_path}")
return max(files, key=lambda p: p.stat().st_mtime)
def run_benchmark():
"""Runs the actual benchmark."""
start_game()
am = ArtifactManager(LOG_DIRECTORY)
setup_start_time = int(time.time())
time.sleep(25)
# Press space to proceed to the main menu
result = kerasService.wait_for_word_vulkan("press", timeout=80)
if not result:
logging.info("Didn't see title screen. Check settings and try again.")
sys.exit(1)
logging.info("Hit the title screen. Continuing")
time.sleep(2)
user.press("space")
time.sleep(4)
# Navigate menus and take screenshots using the artifact manager
result = kerasService.wait_for_word_vulkan("campaign", interval=3, timeout=60)
if not result:
logging.info("Didn't land on the main menu!")
sys.exit(1)
logging.info("Saw the main menu. Proceeding.")
time.sleep(1)
press_n_times("down", 3, 0.2)
user.press("enter")
time.sleep(1)
result = kerasService.wait_for_word_vulkan("daze", interval=3, timeout=15)
if not result:
logging.info("Didn't see the game settings. Did it navigate correctly?")
sys.exit(1)
logging.info("Saw the game settings. Proceeding.")
press_n_times("q", 2, 0.2)
time.sleep(1)
# Screenshotting the display settings
result = kerasService.wait_for_word_vulkan("display", interval=3, timeout=15)
if not result:
logging.info("Didn't find the video settings. Did it navigate correctly?")
sys.exit(1)
am.take_screenshot_vulkan("video1.png", ArtifactType.CONFIG_IMAGE, "1st screenshot of video settings menu")
mouse_scroll_n_times(6, -200, 0.2)
time.sleep(1)
result = kerasService.wait_for_word_vulkan("nvidia", interval=3, timeout=15)
if not result:
logging.info("Didn't find the NVIDIA Reflex setting. Did it navigate correctly?")
sys.exit(1)
am.take_screenshot_vulkan("video2.png", ArtifactType.CONFIG_IMAGE, "2nd screenshot of video settings menu")
mouse_scroll_n_times(6, -200, 0.2)
time.sleep(1)
result = kerasService.wait_for_word_vulkan("advanced", interval=3, timeout=15)
if not result:
logging.info("Didn't find the advanced heading. Did it navigate correctly?")
sys.exit(1)
am.take_screenshot_vulkan("video3.png", ArtifactType.CONFIG_IMAGE, "3rd screenshot of video settings menu")
mouse_scroll_n_times(5, -200, 0.2)
time.sleep(1)
result = kerasService.wait_for_word_vulkan("shading", interval=3, timeout=15)
if not result:
logging.info("Didn't find the shading quality setting. Did it navigate correctly?")
sys.exit(1)
am.take_screenshot_vulkan("video4.png", ArtifactType.CONFIG_IMAGE, "4th screenshot of video settings menu")
mouse_scroll_n_times(5, -220, 0.2)
time.sleep(0.2)
result = kerasService.wait_for_word_vulkan("brightness", interval=3, timeout=15)
if not result:
logging.info("Didn't find the brightness setting. Did it navigate correctly?")
sys.exit(1)
am.take_screenshot_vulkan("video5.png", ArtifactType.CONFIG_IMAGE, "5th screenshot of video settings menu")
user.press("escape")
time.sleep(0.2)
# Navigating to the benchmark
result = kerasService.wait_for_word_vulkan("campaign", interval=3, timeout=20)
if not result:
logging.info("Didn't land on the main menu!")
sys.exit(1)
logging.info("Saw the main menu. Proceeding.")
time.sleep(1)
user.press("up")
user.press("enter")
time.sleep(1)
result = kerasService.wait_for_word_vulkan("benchmarks", interval=3, timeout=15)
if not result:
logging.info("Didn't navigate to the extras menu. Did it navigate properly?")
sys.exit(1)
logging.info("Saw the extras menu. Proceeding.")
time.sleep(1)
user.press("up")
user.press("enter")
time.sleep(1)
result = kerasService.wait_for_word_vulkan("abyssal", interval=3, timeout=15)
if not result:
logging.info("Don't see the Abyssal Forest benchmark option. Did it navigate properly?")
sys.exit(1)
logging.info("See the benchmarks. Starting the Abyssal Forest benchmark level.")
time.sleep(1)
press_n_times("down", 2, 0.2)
user.press("enter")
time.sleep(1)
elapsed_setup_time = round(int(time.time()) - setup_start_time, 2)
logging.info("Setup took %f seconds", elapsed_setup_time)
result = kerasService.wait_for_word_vulkan("frame", interval=0.5, timeout=90)
if not result:
logging.info("Benchmark didn't start. Did the game crash?")
sys.exit(1)
logging.info("Benchmark started. Waiting for benchmark to complete.")
test_start_time = int(time.time()) + 8
# Sleeping for the duration of the benchmark
time.sleep(110)
test_end_time = None
result = kerasService.wait_for_word_vulkan("results", interval=0.5, timeout=90)
if result:
logging.info("Found the results screen. Marking the out time.")
test_end_time = int(time.time()) - 2
time.sleep(2)
else:
logging.info("Results screen was not found!" +
"Did harness not wait long enough? Or test was too long?")
sys.exit(1)
logging.info("Results screen was found! Finishing benchmark.")
results_file = find_latest_result_file(BENCHMARK_RESULTS_PATH)
am.take_screenshot_vulkan("result.png", ArtifactType.RESULTS_IMAGE, "screenshot of results")
am.copy_file(results_file, ArtifactType.RESULTS_TEXT, "benchmark results/settings xml file")
elapsed_test_time = round(test_end_time - test_start_time, 2)
logging.info("Benchmark took %f seconds", elapsed_test_time)
terminate_processes(PROCESS_NAME)
am.create_manifest()
return test_start_time, test_end_time
setup_log_directory(LOG_DIRECTORY)
logging.basicConfig(
filename=f"{LOG_DIRECTORY}/harness.log",
format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT,
level=logging.DEBUG,
)
console = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
parser = ArgumentParser()
parser.add_argument(
"--kerasHost", dest="keras_host", help="Host for Keras OCR service", required=True
)
parser.add_argument(
"--kerasPort", dest="keras_port", help="Port for Keras OCR service", required=True
)
args = parser.parse_args()
kerasService = KerasService(args.keras_host, args.keras_port)
try:
start_time, end_time = run_benchmark()
width, height = get_resolution()
report = {
"resolution": format_resolution(width, height),
"start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time),
"version": get_build_id(STEAM_GAME_ID)
}
write_report_json(LOG_DIRECTORY, "report.json", report)
except Exception as e:
logging.error("Something went wrong running the benchmark!")
logging.exception(e)
terminate_processes(PROCESS_NAME)
sys.exit(1)

View File

@@ -0,0 +1,62 @@
"""Utility functions supporting Doom: The Dark Ages test script."""
import os
import re
from pathlib import Path
import sys
import logging
import shutil
import json
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from harness_utils.steam import get_app_install_location
SCRIPT_DIRECTORY = Path(__file__).resolve().parent
RUN_DIR = SCRIPT_DIRECTORY / "run"
STEAM_GAME_ID = 3017860
username = os.getlogin()
BENCHMARK_PATH = f"C:\\Users\\{username}\\Saved Games\\id Software\\DOOMTheDarkAges\\base\\benchmark"
RES_REGEX = re.compile(r'\s*(\d+)\s*[x×]\s*(\d+)')
def get_resolution() -> tuple[int, int]:
"""Gets resolution width and height from local xml file created by game."""
try:
bench_file = max(
RUN_DIR.glob("benchmark-*.json"),
key=lambda p: p.stat().st_mtime
)
except ValueError as exc:
# No files matched, propagate as a clearer FileNotFoundError
raise FileNotFoundError(
f"No benchmark-*.json files in {RUN_DIR}"
) from exc
if not bench_file.is_file():
raise FileNotFoundError("Benchmark file not found.")
with bench_file.open(encoding="utf-8") as f:
data = json.load(f)
res_string = data.get("resolution", "")
m = RES_REGEX.search(res_string)
if not m:
raise ValueError(
f"Cannot parse 'resolution' in {bench_file.name}: {res_string!r}"
)
width, height = map(int, m.groups())
return width, height
def copy_launcher_config() -> None:
"""Copy launcher config to doom launcher config folder"""
try:
launcherconfig_path = Path(get_app_install_location(STEAM_GAME_ID), "launcherData\\base\\configs")
launcherconfig_path.mkdir(parents=True, exist_ok=True)
src_path = SCRIPT_DIRECTORY / "launcher.cfg"
dest_path = launcherconfig_path / "launcher.cfg"
logging.info("Copying: %s -> %s", src_path, dest_path)
shutil.copy(src_path, dest_path)
except OSError as err:
logging.error("Could not copy config file.")
raise err

16
doomdarkages/launcher.cfg Normal file
View File

@@ -0,0 +1,16 @@
rgl_driverMustBeExactMatch 0
rgl_minNvidiaDriverVersion 57680
rgl_minAMDDriverMajorVersion 25
rgl_minAMDDriverMinorVersion 5
rgl_minAMDDriverPatchVersion 1
rgl_minAMDDriverMajorVersionWin8 25
rgl_minAMDDriverMinorVersionWin8 6
rgl_minAMDDriverPatchVersionWin8 1
rgl_minAMDDriverMajorVersionWin7 25
rgl_minAMDDriverMinorVersionWin7 6
rgl_minAMDDriverPatchVersionWin7 1
rgl_minIntelDriverMajorVersion 101
rgl_minIntelDriverMinorVersion 6732
rgl_showAMDStartupWarning 0
rgl_showIntelStartupWarning 0
rgl_showNvidiaStartupWarning 0

View File

@@ -0,0 +1,10 @@
friendly_name: "Doom: The Dark Ages"
executable: "doomdarkages.py"
process_name: "DOOMTheDarkAges.exe"
hidden: 0
output_dir: "run"
options:
- name: kerasHost
type: input
- name: kerasPort
type: input

View File

@@ -1,13 +1,13 @@
"""Dota 2 test script""" """Dota 2 test script"""
import logging import logging
import os from pathlib import Path
import time import time
import pyautogui as gui import pyautogui as gui
import pydirectinput as user import pydirectinput as user
import sys import sys
from dota2_utils import get_resolution, copy_replay, copy_config, get_args from dota2_utils import get_resolution, copy_replay, copy_config, get_args
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, str(Path(sys.path[0]).parent))
from harness_utils.output import ( from harness_utils.output import (
setup_log_directory, setup_log_directory,
@@ -22,12 +22,12 @@ from harness_utils.steam import exec_steam_game
from harness_utils.artifacts import ArtifactManager, ArtifactType from harness_utils.artifacts import ArtifactManager, ArtifactType
SCRIPT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) SCRIPT_DIRECTORY = Path(__file__).resolve().parent
LOG_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, "run") LOG_DIRECTORY = SCRIPT_DIRECTORY / "run"
PROCESS_NAME = "dota2.exe" PROCESS_NAME = "dota2.exe"
STEAM_GAME_ID = 570 STEAM_GAME_ID = 570
setup_log_directory(LOG_DIRECTORY) setup_log_directory(str(LOG_DIRECTORY))
logging.basicConfig(filename=f'{LOG_DIRECTORY}/harness.log', logging.basicConfig(filename=f'{LOG_DIRECTORY}/harness.log',
format=DEFAULT_LOGGING_FORMAT, format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
@@ -40,10 +40,12 @@ logging.getLogger('').addHandler(console)
args = get_args() args = get_args()
kerasService = KerasService(args.keras_host, args.keras_port) kerasService = KerasService(args.keras_host, args.keras_port)
user.FAILSAFE = False
def start_game(): def start_game():
"""Launch the game with console enabled and FPS unlocked""" """Launch the game with console enabled and FPS unlocked"""
return exec_steam_game(STEAM_GAME_ID, game_params=["-console", "+fps_max 0"]) return exec_steam_game(
STEAM_GAME_ID, game_params=["-console", "+fps_max 0"])
def console_command(command): def console_command(command):
@@ -68,25 +70,43 @@ def run_benchmark():
time.sleep(1) time.sleep(1)
# waiting about a minute for the main menu to appear # waiting about a minute for the main menu to appear
if kerasService.wait_for_word(word="heroes", timeout=80, interval=1) is None: if kerasService.wait_for_word(
logging.error("Game didn't start in time. Check settings and try again.") word="heroes", timeout=80, interval=1) is None:
logging.error(
"Game didn't start in time. Check settings and try again.")
sys.exit(1) sys.exit(1)
height, width = get_resolution() time.sleep(15) # wait for main menu
location = None
screen_height, screen_width = get_resolution()
location = None
click_multiple = 0
# We check the resolution so we know which screenshot to use for the locate on screen function # We check the resolution so we know which screenshot to use for the locate on screen function
match width: match screen_width:
case "1280": case "1280":
location = gui.locateOnScreen(f"{SCRIPT_DIRECTORY}\\screenshots\\settings_720.png", confidence=0.9) location = gui.locateOnScreen(
f"{SCRIPT_DIRECTORY}\\screenshots\\settings_720.png",
confidence=0.9)
click_multiple = 0.8
case "1920": case "1920":
location = gui.locateOnScreen(f"{SCRIPT_DIRECTORY}\\screenshots\\settings_1080.png") location = gui.locateOnScreen(
f"{SCRIPT_DIRECTORY}\\screenshots\\settings_1080.png",
confidence=0.9)
click_multiple = 1
case "2560": case "2560":
location = gui.locateOnScreen(f"{SCRIPT_DIRECTORY}\\screenshots\\settings_1440.png") location = gui.locateOnScreen(
f"{SCRIPT_DIRECTORY}\\screenshots\\settings_1440.png",
confidence=0.9)
click_multiple = 1.5
case "3840": case "3840":
location = gui.locateOnScreen(f"{SCRIPT_DIRECTORY}\\screenshots\\settings_2160.png") location = gui.locateOnScreen(
f"{SCRIPT_DIRECTORY}\\screenshots\\settings_2160.png",
confidence=0.9)
click_multiple = 2
case _: case _:
logging.error("Could not find the settings cog. The game resolution is currently %s, %s. Are you using a standard resolution?", height, width) logging.error(
"Could not find the settings cog. The game resolution is currently %s, %s. Are you using a standard resolution?",
screen_height, screen_width)
sys.exit(1) sys.exit(1)
# navigating to the video config section # navigating to the video config section
@@ -99,21 +119,47 @@ def run_benchmark():
result = kerasService.look_for_word(word="video", attempts=10, interval=1) result = kerasService.look_for_word(word="video", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the video menu button. Did Keras enter settings correctly?") logging.info(
"Did not find the video menu button. Did Keras enter settings correctly?")
sys.exit(1) sys.exit(1)
gui.moveTo(result["x"] + 10, result["y"] + 8) gui.moveTo(result["x"] + int(50 * click_multiple),
result["y"] + int(20 * click_multiple))
gui.mouseDown() gui.mouseDown()
time.sleep(0.2) time.sleep(0.2)
gui.mouseUp() gui.mouseUp()
time.sleep(0.2) time.sleep(0.2)
if kerasService.wait_for_word(word="resolution", timeout=30, interval=1) is None: if kerasService.wait_for_word(
logging.info("Did not find the video settings menu. Did the menu get stuck?") word="resolution", timeout=30, interval=1) is None:
logging.info(
"Did not find the video settings menu. Did the menu get stuck?")
sys.exit(1) sys.exit(1)
am.take_screenshot("video.png", ArtifactType.CONFIG_IMAGE, "picture of video settings") am.take_screenshot("video1.png", ArtifactType.CONFIG_IMAGE,
"picture of video settings")
user.press("down")
if kerasService.wait_for_word(
word="api", timeout=30, interval=1) is None:
logging.info(
"Did not find the video settings menu. Did the menu get stuck?")
sys.exit(1)
am.take_screenshot("video2.png", ArtifactType.CONFIG_IMAGE,
"picture of video settings")
user.press("down")
if kerasService.wait_for_word(
word="direct", timeout=30, interval=1) is None:
logging.info(
"Did not find the video settings menu. Did the menu get stuck?")
sys.exit(1)
am.take_screenshot("video3.png", ArtifactType.CONFIG_IMAGE,
"picture of video settings")
# starting the benchmark # starting the benchmark
user.press("escape") user.press("escape")
logging.info('Starting benchmark') logging.info('Starting benchmark')
@@ -124,7 +170,8 @@ def run_benchmark():
user.press("\\") user.press("\\")
time.sleep(5) time.sleep(5)
if kerasService.wait_for_word(word="directed", timeout=30, interval=0.1) is None: if kerasService.wait_for_word(
word="directed", timeout=30, interval=0.1) is None:
logging.error("Didn't see directed camera. Did the replay load?") logging.error("Didn't see directed camera. Did the replay load?")
sys.exit(1) sys.exit(1)
@@ -138,26 +185,29 @@ def run_benchmark():
result = kerasService.wait_for_word(word="2560", timeout=30, interval=0.1) result = kerasService.wait_for_word(word="2560", timeout=30, interval=0.1)
if result is None: if result is None:
logging.error("Unable to find Leshrac's HP. Using default start time value.") logging.error(
"Unable to find Leshrac's HP. Using default start time value.")
else: else:
test_start_time = int(time.time()) test_start_time = int(time.time())
logging.info("Found Leshrac's HP! Marking the start time accordingly.") logging.info("Found Leshrac's HP! Marking the start time accordingly.")
time.sleep(73) # sleep duration during gameplay time.sleep(73) # sleep duration during gameplay
# Default fallback end time # Default fallback end time
test_end_time = int(time.time()) test_end_time = int(time.time())
result = kerasService.wait_for_word(word="1195", timeout=30, interval=0.1) result = kerasService.wait_for_word(word="1195", timeout=30, interval=0.1)
if result is None: if result is None:
logging.error("Unable to find gold count of 1195. Using default end time value.") logging.error(
"Unable to find gold count of 1195. Using default end time value.")
else: else:
test_end_time = int(time.time()) test_end_time = int(time.time())
logging.info("Found the gold. Marking end time.") logging.info("Found the gold. Marking end time.")
time.sleep(2) time.sleep(2)
if kerasService.wait_for_word(word="heroes", timeout=25, interval=1) is None: if kerasService.wait_for_word(
word="heroes", timeout=25, interval=1) is None:
logging.error("Main menu after running benchmark not found, exiting") logging.error("Main menu after running benchmark not found, exiting")
sys.exit(1) sys.exit(1)
@@ -173,14 +223,14 @@ def run_benchmark():
try: try:
start_time, end_time = run_benchmark() start_time, end_time = run_benchmark()
height, width = get_resolution() res_height, res_width = get_resolution()
report = { report = {
"resolution": format_resolution(width, height), "resolution": format_resolution(int(res_width), int(res_height)),
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time) "end_time": seconds_to_milliseconds(end_time)
} }
write_report_json(LOG_DIRECTORY, "report.json", report) write_report_json(str(LOG_DIRECTORY), "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -37,7 +37,7 @@ def get_install_path():
def copy_replay_from_network_drive(): def copy_replay_from_network_drive():
"""Copies replay file from network drive to harness folder""" """Copies replay file from network drive to harness folder"""
src_path = Path(r"\\Labs\labs\03_ProcessingFiles\Dota2\benchmark.dem") src_path = Path(r"\\labs.lmg.gg\labs\03_ProcessingFiles\Dota2\benchmark.dem")
dest_path = SCRIPT_DIRECTORY / "benchmark.dem" dest_path = SCRIPT_DIRECTORY / "benchmark.dem"
shutil.copyfile(src_path, dest_path) shutil.copyfile(src_path, dest_path)
@@ -84,13 +84,18 @@ def copy_config() -> None:
def read_config() -> list[str] | None: def read_config() -> list[str] | None:
"""Looks for config file and returns contents if found""" """Looks for config file and returns contents if found"""
userdata_path = Path(get_steam_folder_path(), "userdata", str(STEAM_USER_ID), str(STEAM_GAME_ID), "local", "cfg", "video.txt") userdata_path = Path(
get_steam_folder_path(),
"userdata", str(STEAM_USER_ID),
str(STEAM_GAME_ID),
"local", "cfg", "video.txt")
install_path = Path(get_install_path(), "game", "dota", "cfg", "video.txt") install_path = Path(get_install_path(), "game", "dota", "cfg", "video.txt")
try: try:
with open(userdata_path, encoding="utf-8") as f: with open(userdata_path, encoding="utf-8") as f:
return f.readlines() return f.readlines()
except OSError: except OSError:
logging.error("Did not find config file at path %s. Trying path %s", userdata_path, install_path) logging.error("Did not find config file at path %s. Trying path %s",
userdata_path, install_path)
try: try:
with open(install_path, encoding="utf-8") as f: with open(install_path, encoding="utf-8") as f:
return f.readlines() return f.readlines()
@@ -118,6 +123,6 @@ def get_resolution():
height = height_match.group(1) height = height_match.group(1)
if width_match is not None: if width_match is not None:
width = width_match.group(1) width = width_match.group(1)
if height != 0 and width !=0: if height != 0 and width != 0:
return (height, width) return (height, width)
return (height, width) return (height, width)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.1 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@@ -1,4 +1,6 @@
"""Far Cry 6 test script""" """Far Cry 6 test script"""
# pylint: disable = C0116, W0621
import os import os
import logging import logging
import time import time
@@ -30,25 +32,31 @@ LOG_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, "run")
PROCESS_NAME = "FarCry6.exe" PROCESS_NAME = "FarCry6.exe"
GAME_ID = 5266 GAME_ID = 5266
username = os.getlogin() username = os.getlogin()
xml_file = rf"C:\Users\{username}\Documents\My Games\Far Cry 6\gamerprofile.xml" XML_FILE = rf"C:\Users\{username}\Documents\My Games\Far Cry 6\gamerprofile.xml"
user.FAILSAFE = False
def start_game(): def start_game():
subprocess.run(f'start uplay://launch/{GAME_ID}/0', shell=True) subprocess.run(f'start uplay://launch/{GAME_ID}/0', shell=True, check=True)
def skip_logo_screens() -> None: def skip_logo_screens() -> None:
"""Simulate input to skip logo screens""" """Simulate input to skip logo screens"""
logging.info("Skipping logo screens") logging.info("Skipping logo screens")
#skipping the logo screens # skipping the logo screens
press_n_times("escape", 8, 1) press_n_times("escape", 8, 1)
def run_benchmark(): def run_benchmark():
am = ArtifactManager(LOG_DIRECTORY) am = ArtifactManager(LOG_DIRECTORY)
start_game() start_game()
setup_start_time = int(time.time()) setup_start_time = int(time.time())
time.sleep(25) time.sleep(25)
#skipping game intros # skipping game intros
result = kerasService.look_for_word("warning", attempts=20, interval=1) result = kerasService.look_for_word("warning", attempts=20, interval=1)
if not result: if not result:
logging.info("Did not see warnings. Did the game start?") logging.info("Did not see warnings. Did the game start?")
@@ -66,7 +74,7 @@ def run_benchmark():
time.sleep(2) time.sleep(2)
#navigating the menus to get to the video settings # navigating the menus to get to the video settings
result = kerasService.look_for_word("later", attempts=5, interval=1) result = kerasService.look_for_word("later", attempts=5, interval=1)
if result: if result:
user.press("escape") user.press("escape")
@@ -95,10 +103,11 @@ def run_benchmark():
gui.mouseUp() gui.mouseUp()
time.sleep(2) time.sleep(2)
#grabbing screenshots of all the video settings # grabbing screenshots of all the video settings
result = kerasService.look_for_word("adapter", attempts=10, interval=1) result = kerasService.look_for_word("adapter", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the Video Adapter setting in the monitor options. Did keras navigate wrong?") logging.info(
"Did not find the Video Adapter setting in the monitor options. Did keras navigate wrong?")
sys.exit(1) sys.exit(1)
am.take_screenshot("video.png", ArtifactType.CONFIG_IMAGE, "picture of video settings") am.take_screenshot("video.png", ArtifactType.CONFIG_IMAGE, "picture of video settings")
@@ -109,18 +118,20 @@ def run_benchmark():
result = kerasService.look_for_word("filtering", attempts=10, interval=1) result = kerasService.look_for_word("filtering", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the Texture Filtering setting in the quality options. Did keras navigate wrong?") logging.info(
"Did not find the Texture Filtering setting in the quality options. Did keras navigate wrong?")
sys.exit(1) sys.exit(1)
am.take_screenshot("quality1.png", ArtifactType.CONFIG_IMAGE, "1st picture of quality settings") am.take_screenshot("quality1.png", ArtifactType.CONFIG_IMAGE, "1st picture of quality settings")
time.sleep(2) time.sleep(2)
mouse_scroll_n_times(8, -800, 0.2) mouse_scroll_n_times(8, -800, 0.2)
result = kerasService.look_for_word("shading", attempts=10, interval=1) result = kerasService.look_for_word("shading", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the FidelityFX Variable Shading setting in the quality options. Did keras navigate wrong?") logging.info(
"Did not find the FidelityFX Variable Shading setting in the quality options. Did keras navigate wrong?")
sys.exit(1) sys.exit(1)
am.take_screenshot("quality2.png", ArtifactType.CONFIG_IMAGE, "2nd picture of quality settings") am.take_screenshot("quality2.png", ArtifactType.CONFIG_IMAGE, "2nd picture of quality settings")
@@ -131,12 +142,13 @@ def run_benchmark():
result = kerasService.look_for_word("lock", attempts=10, interval=1) result = kerasService.look_for_word("lock", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the Enable Framerate Lock setting in the advanced options. Did keras navigate wrong?") logging.info(
"Did not find the Enable Framerate Lock setting in the advanced options. Did keras navigate wrong?")
sys.exit(1) sys.exit(1)
am.take_screenshot("advanced.png", ArtifactType.CONFIG_IMAGE, "picture of advanced settings") am.take_screenshot("advanced.png", ArtifactType.CONFIG_IMAGE, "picture of advanced settings")
#starting the benchmark # starting the benchmark
time.sleep(2) time.sleep(2)
user.press("f5") user.press("f5")
elapsed_setup_time = round(int(time.time()) - setup_start_time, 2) elapsed_setup_time = round(int(time.time()) - setup_start_time, 2)
@@ -148,7 +160,7 @@ def run_benchmark():
sys.exit(1) sys.exit(1)
test_start_time = int(time.time()) test_start_time = int(time.time())
time.sleep(60) # wait for benchmark to complete time.sleep(60) # wait for benchmark to complete
result = kerasService.wait_for_word("results", interval=0.5, timeout=100) result = kerasService.wait_for_word("results", interval=0.5, timeout=100)
if not result: if not result:
@@ -165,11 +177,12 @@ def run_benchmark():
# Exit # Exit
terminate_processes(PROCESS_NAME) terminate_processes(PROCESS_NAME)
am.copy_file(xml_file, ArtifactType.CONFIG_TEXT, "config file") am.copy_file(XML_FILE, ArtifactType.CONFIG_TEXT, "config file")
am.create_manifest() am.create_manifest()
return test_start_time, test_end_time return test_start_time, test_end_time
setup_log_directory(LOG_DIRECTORY) setup_log_directory(LOG_DIRECTORY)
logging.basicConfig(filename=f'{LOG_DIRECTORY}/harness.log', logging.basicConfig(filename=f'{LOG_DIRECTORY}/harness.log',
@@ -204,4 +217,4 @@ except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)
terminate_processes(PROCESS_NAME) terminate_processes(PROCESS_NAME)
sys.exit(1) sys.exit(1)

View File

@@ -34,6 +34,7 @@ CONFIG_LOCATION = (
CONFIG_FILENAME = "UserConfigSelections" CONFIG_FILENAME = "UserConfigSelections"
PROCESSES = ["ForzaHorizon5.exe", "RTSS.exe"] PROCESSES = ["ForzaHorizon5.exe", "RTSS.exe"]
user.FAILSAFE = False
def start_rtss(): def start_rtss():
"""Sets up the RTSS process""" """Sets up the RTSS process"""

View File

@@ -78,11 +78,11 @@ def main():
report = { report = {
"start_time": start_time, "start_time": start_time,
"version": "4.3-stable", "version": "4.4.1-stable",
"end_time": end_time, "end_time": end_time,
"score": score, "score": score,
"unit": "seconds", "unit": "seconds",
"test": "Godot 4.3 Compile" "test": "Godot 4.4.1 Compile"
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(LOG_DIR, "report.json", report)

View File

@@ -14,7 +14,7 @@ MINGW_ZIP = "x86_64-13.2.0-release-posix-seh-msvcrt-rt_v11-rev1.zip"
MINGW_FOLDER = SCRIPT_DIR.joinpath("mingw64") MINGW_FOLDER = SCRIPT_DIR.joinpath("mingw64")
MINICONDA_EXECUTABLE_PATH = Path("C:\\ProgramData\\miniconda3\\_conda.exe") MINICONDA_EXECUTABLE_PATH = Path("C:\\ProgramData\\miniconda3\\_conda.exe")
CONDA_ENV_NAME = "godotbuild" CONDA_ENV_NAME = "godotbuild"
GODOT_DIR = "godot-4.3-stable" GODOT_DIR = "godot-4.4.1-stable"
def install_mingw() -> str: def install_mingw() -> str:
@@ -24,7 +24,7 @@ def install_mingw() -> str:
if str(MINGW_FOLDER) not in original_path: if str(MINGW_FOLDER) not in original_path:
os.environ['PATH'] = str(MINGW_FOLDER.joinpath('bin')) + os.pathsep + original_path os.environ['PATH'] = str(MINGW_FOLDER.joinpath('bin')) + os.pathsep + original_path
return "existing mingw installation detected" return "existing mingw installation detected"
source = Path("\\\\Labs\\labs\\01_Installers_Utilities\\MinGW\\").joinpath(MINGW_ZIP) source = Path("\\\\labs.lmg.gg\\labs\\01_Installers_Utilities\\MinGW\\").joinpath(MINGW_ZIP)
destination = SCRIPT_DIR.joinpath(MINGW_ZIP) destination = SCRIPT_DIR.joinpath(MINGW_ZIP)
shutil.copyfile(source, destination) shutil.copyfile(source, destination)
with ZipFile(destination, 'r') as zip_object: with ZipFile(destination, 'r') as zip_object:
@@ -36,7 +36,8 @@ def install_mingw() -> str:
def copy_miniconda_from_network_drive(): def copy_miniconda_from_network_drive():
"""copies miniconda installer from network drive""" """copies miniconda installer from network drive"""
source = Path("\\\\Labs\\labs\\01_Installers_Utilities\\Miniconda\\").joinpath(MINICONDA_INSTALLER) source = Path("\\\\labs.lmg.gg\\labs\\01_Installers_Utilities\\Miniconda\\").joinpath(
MINICONDA_INSTALLER)
destination = SCRIPT_DIR.joinpath(MINICONDA_INSTALLER) destination = SCRIPT_DIR.joinpath(MINICONDA_INSTALLER)
shutil.copyfile(source, destination) shutil.copyfile(source, destination)
@@ -49,15 +50,15 @@ def install_miniconda() -> str:
copy_miniconda_from_network_drive() copy_miniconda_from_network_drive()
except Exception as err: except Exception as err:
raise Exception("could not copy miniconda from network drive") from err raise Exception("could not copy miniconda from network drive") from err
command =[ command = [
"powershell", "powershell",
"start-process", "start-process",
"-FilePath", "-FilePath",
f'"{str(SCRIPT_DIR.joinpath(MINICONDA_INSTALLER))}"', f'"{str(SCRIPT_DIR.joinpath(MINICONDA_INSTALLER))}"',
"-ArgumentList", "-ArgumentList",
'"/S"', '"/S"',
"-Wait" "-Wait"
] ]
try: try:
output = subprocess.check_output(command, stderr=subprocess.PIPE, text=True) output = subprocess.check_output(command, stderr=subprocess.PIPE, text=True)
except Exception as err: except Exception as err:
@@ -71,14 +72,14 @@ def copy_godot_source_from_network_drive() -> str:
if SCRIPT_DIR.joinpath(GODOT_DIR).is_dir(): if SCRIPT_DIR.joinpath(GODOT_DIR).is_dir():
return "existing godot source directory detected" return "existing godot source directory detected"
zip_name = f"{GODOT_DIR}.zip" zip_name = f"{GODOT_DIR}.zip"
source = Path("\\\\Labs\\labs\\03_ProcessingFiles\\Godot Files\\").joinpath(zip_name) source = Path("\\\\labs.lmg.gg\\labs\\03_ProcessingFiles\\Godot Files\\").joinpath(zip_name)
destination = SCRIPT_DIR.joinpath(zip_name) destination = SCRIPT_DIR.joinpath(zip_name)
shutil.copyfile(source, destination) shutil.copyfile(source, destination)
with ZipFile(destination, 'r') as zip_object: with ZipFile(destination, 'r') as zip_object:
try: try:
zip_object.extractall(path=SCRIPT_DIR) zip_object.extractall(path=SCRIPT_DIR)
except Exception as ex: except Exception as ex:
raise Exception ("error extracting godot zip") from ex raise Exception("error extracting godot zip") from ex
return "godot source copied and unpacked from network drive" return "godot source copied and unpacked from network drive"
@@ -90,7 +91,8 @@ def check_conda_environment_exists() -> bool:
"-n", "-n",
CONDA_ENV_NAME CONDA_ENV_NAME
] ]
process = subprocess.run(" ".join(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=False) process = subprocess.run(" ".join(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True, check=False)
if process.returncode == 1: if process.returncode == 1:
return False return False
return True return True
@@ -131,6 +133,6 @@ def convert_duration_string_to_seconds(duration: str) -> int:
hours=int(duration.split(':')[0]), hours=int(duration.split(':')[0]),
minutes=int(duration.split(':')[1]), minutes=int(duration.split(':')[1]),
seconds=float(duration.split('.')[0].split(':')[2]), seconds=float(duration.split('.')[0].split(':')[2]),
milliseconds=int(float('0.' + duration.split('.')[1])*1000)) milliseconds=int(float('0.' + duration.split('.')[1]) * 1000))
return round(time_obj.total_seconds()) return round(time_obj.total_seconds())

View File

@@ -4,7 +4,7 @@ import getpass
import subprocess import subprocess
import sys import sys
from pathlib import Path from pathlib import Path
from gravitymark_utils import friendly_test_name, get_args, get_score, create_gravitymark_command from gravitymark_utils import friendly_test_param, get_args, get_score, create_gravitymark_command
PARENT_DIR = str(Path(sys.path[0], "..")) PARENT_DIR = str(Path(sys.path[0], ".."))
sys.path.append(PARENT_DIR) sys.path.append(PARENT_DIR)
@@ -19,7 +19,7 @@ GRAVITYMARK_PATH = Path("C:/", "Program Files", "GravityMark", "bin")
GRAVITYMARK_EXE = GRAVITYMARK_PATH / "GravityMark.exe" GRAVITYMARK_EXE = GRAVITYMARK_PATH / "GravityMark.exe"
args = get_args() args = get_args()
api = f"-{args.api}" API = f"-{args.api}"
script_dir = Path(__file__).resolve().parent script_dir = Path(__file__).resolve().parent
log_dir = script_dir / "run" log_dir = script_dir / "run"
@@ -36,9 +36,11 @@ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger("").addHandler(console) logging.getLogger("").addHandler(console)
gravitymark_log_path = Path("C:/Users", getpass.getuser(), ".GravityMark", "GravityMark.log") gravitymark_log_path = Path(
"C:/Users", getpass.getuser(),
".GravityMark", "GravityMark.log")
image_path = log_dir / "result.png" image_path = log_dir / "result.png"
command = create_gravitymark_command(GRAVITYMARK_EXE, api, image_path) command = create_gravitymark_command(GRAVITYMARK_EXE, API, image_path)
try: try:
logging.info('Starting benchmark!') logging.info('Starting benchmark!')
@@ -47,7 +49,8 @@ try:
result = subprocess.run(command, check=True, cwd=GRAVITYMARK_PATH) result = subprocess.run(command, check=True, cwd=GRAVITYMARK_PATH)
if result.returncode > 0: if result.returncode > 0:
logging.error("GravityMark exited with return code %d", result.returncode) logging.error("GravityMark exited with return code %d",
result.returncode)
sys.exit(1) sys.exit(1)
score = get_score(gravitymark_log_path) score = get_score(gravitymark_log_path)
@@ -57,12 +60,13 @@ try:
sys.exit(1) sys.exit(1)
report = { report = {
"test": friendly_test_name(args.api), "test": "GravityMark",
"test_parameter": friendly_test_param(args.api),
"score": score, "score": score,
"unit": "score" "unit": "score"
} }
write_report_json(log_dir, "report.json", report) write_report_json(str(log_dir), "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -23,16 +23,16 @@ CLI_OPTIONS = {
"-status": "1" "-status": "1"
} }
def friendly_test_name(api: str) -> str: def friendly_test_param(api: str) -> str:
"""return a friendlier string given the API harness argument""" """return a friendlier string given the API harness argument"""
if api == "vulkan": if api == "vulkan":
return "GravityMark Vulkan" return "Vulkan"
if api == "opengl": if api == "opengl":
return "GravityMark OpenGL" return "OpenGL"
if api == "direct3d12": if api == "direct3d12":
return "GravityMark DX12" return "DX12"
if api == "direct3d11": if api == "direct3d11":
return "GravityMark DX11" return "DX11"
return api return api
def get_args() -> Namespace: def get_args() -> Namespace:

View File

@@ -33,6 +33,8 @@ CONFIG_PATH = f"C:\\Users\\{username}\\Documents\\My Games\\GRID Legends\\hardwa
CONFIG_FILENAME = "hardware_settings_config.xml" CONFIG_FILENAME = "hardware_settings_config.xml"
CONFIG_FULL_PATH = f"{CONFIG_PATH}\\{CONFIG_FILENAME}" CONFIG_FULL_PATH = f"{CONFIG_PATH}\\{CONFIG_FILENAME}"
user.FAILSAFE = False
def get_resolution() -> tuple[int]: def get_resolution() -> tuple[int]:
"""Gets resolution width and height from local xml file created by game.""" """Gets resolution width and height from local xml file created by game."""
resolution = re.compile(r"<resolution width=\"(\d+)\" height=\"(\d+)\"") resolution = re.compile(r"<resolution width=\"(\d+)\" height=\"(\d+)\"")

View File

@@ -32,51 +32,63 @@ logging.basicConfig(
ENCODER_TO_PRESET = { ENCODER_TO_PRESET = {
"h264_cpu": { "h264_cpu": {
"file": f"{SCRIPT_DIR}\\presets\\h264_bigbuckbunny_1080p_cpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\h264_bigbuckbunny_1080p_cpu_test.json",
"name": "\"CPU 1080p BBB H264\"" "name": "\"CPU 1080p BBB H264\"",
"api": "cpu"
}, },
"h265_cpu": { "h265_cpu": {
"file": f"{SCRIPT_DIR}\\presets\\h265_bigbuckbunny_1080p_cpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\h265_bigbuckbunny_1080p_cpu_test.json",
"name": "\"CPU 1080p BBB H265\"" "name": "\"CPU 1080p BBB H265\"",
"api": "cpu"
}, },
"av1_cpu": { "av1_cpu": {
"file": f"{SCRIPT_DIR}\\presets\\av1-svt_bigbuckbunny_1080p_cpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\av1-svt_bigbuckbunny_1080p_cpu_test.json",
"name": "\"CPU 1080p BBB AV1\"" "name": "\"CPU 1080p BBB AV1\"",
"api": "cpu"
}, },
"h264_nvenc": { "h264_nvenc": {
"file": f"{SCRIPT_DIR}\\presets\\h264_nvenc_bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\h264_nvenc_bigbuckbunny_1080p_gpu_test.json",
"name": "\"NVENC 1080p BBB H264\"" "name": "\"NVENC 1080p BBB H264\"",
"api": "nvenc"
}, },
"h265_nvenc": { "h265_nvenc": {
"file": f"{SCRIPT_DIR}\\presets\\h265_nvenc_bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\h265_nvenc_bigbuckbunny_1080p_gpu_test.json",
"name": "\"NVENC 1080p BBB H265\"" "name": "\"NVENC 1080p BBB H265\"",
"api": "nvenc"
}, },
"av1_nvenc": { "av1_nvenc": {
"file": f"{SCRIPT_DIR}\\presets\\av1-nvenc_bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\av1-nvenc_bigbuckbunny_1080p_gpu_test.json",
"name": "\"NVENC 1080p BBB AV1\"" "name": "\"NVENC 1080p BBB AV1\"",
"api": "nvenc"
}, },
"h264_vce": { "h264_vce": {
"file": f"{SCRIPT_DIR}\\presets\\h264-vce-bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\h264-vce-bigbuckbunny_1080p_gpu_test.json",
"name": "\"AMD VCE 1080p BBB H264\"" "name": "\"AMD VCE 1080p BBB H264\"",
"api": "vce"
}, },
"av1_vce": { "av1_vce": {
"file": f"{SCRIPT_DIR}\\presets\\av1-vce-bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\av1-vce-bigbuckbunny_1080p_gpu_test.json",
"name": "\"AMD VCE 1080p BBB AV1\"" "name": "\"AMD VCE 1080p BBB AV1\"",
"api": "vce"
}, },
"h264_quicksync": { "h264_quicksync": {
"file": f"{SCRIPT_DIR}\\presets\\h264-quicksync_bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\h264-quicksync_bigbuckbunny_1080p_gpu_test.json",
"name": "\"QUICKSYNC 1080p BBB H264\"" "name": "\"QUICKSYNC 1080p BBB H264\"",
"api": "quicksync"
}, },
"av1_quicksync": { "av1_quicksync": {
"file": f"{SCRIPT_DIR}\\presets\\av1-quicksync_bigbuckbunny_1080p_gpu_test.json", "file": f"{SCRIPT_DIR}\\presets\\av1-quicksync_bigbuckbunny_1080p_gpu_test.json",
"name": "\"QUICKSYNC 1080p BBB AV1\"" "name": "\"QUICKSYNC 1080p BBB AV1\"",
"api": "quicksync"
} }
} }
console = logging.StreamHandler() console = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger("").addHandler(console) logging.getLogger("").addHandler(console)
def main(): def main():
"""entrypoint""" """entrypoint"""
parser = ArgumentParser() parser = ArgumentParser()
@@ -133,7 +145,9 @@ def main():
end_time = current_time_ms() end_time = current_time_ms()
report = { report = {
"test": f"HandBrake Encoding BBB {args.encoder.upper()}", "test": "HandBrake Encoding",
"test_parameter": f"{ENCODER_TO_PRESET[args.encoder]['name']}",
"api": ENCODER_TO_PRESET[args.encoder]['api'],
"score": score, "score": score,
"unit": "frames per second", "unit": "frames per second",
"version": "1.9.1", "version": "1.9.1",
@@ -141,11 +155,12 @@ def main():
"end_time": end_time "end_time": end_time
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(str(LOG_DIR), "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)
sys.exit(1) sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -17,7 +17,8 @@ def handbrake_present() -> bool:
def copy_handbrake_from_network_drive(): def copy_handbrake_from_network_drive():
"""copy handbrake cli from network drive""" """copy handbrake cli from network drive"""
source = Path("\\\\Labs\\labs\\01_Installers_Utilities\\Handbrake\\X86\\HandBrakeCLI-1.9.1-win-x86_64\\") source = Path(
"\\\\labs.lmg.gg\\labs\\01_Installers_Utilities\\Handbrake\\X86\\HandBrakeCLI-1.9.1-win-x86_64\\")
copy_souce = source / HANDBRAKE_EXECUTABLE copy_souce = source / HANDBRAKE_EXECUTABLE
destination = SCRIPT_DIR / HANDBRAKE_EXECUTABLE destination = SCRIPT_DIR / HANDBRAKE_EXECUTABLE
shutil.copyfile(copy_souce, destination) shutil.copyfile(copy_souce, destination)
@@ -30,7 +31,7 @@ def is_video_source_present() -> bool:
def copy_video_source(): def copy_video_source():
"""copy big buck bunny source video to local from network drive""" """copy big buck bunny source video to local from network drive"""
source = r"\\Labs\labs\03_ProcessingFiles\Handbrake Test\big_buck_bunny_1080p24.y4m" source = r"\\labs.lmg.gg\labs\03_ProcessingFiles\Handbrake Test\big_buck_bunny_1080p24.y4m"
root_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = os.path.dirname(os.path.realpath(__file__))
destination = os.path.join(root_dir, SOURCE_VIDEO_NAME) destination = os.path.join(root_dir, SOURCE_VIDEO_NAME)
shutil.copyfile(source, destination) shutil.copyfile(source, destination)

View File

@@ -81,7 +81,7 @@ class ArtifactManager:
The newly created artifact's `type` and `description` fields are set to the given The newly created artifact's `type` and `description` fields are set to the given
`artifact_type` and `description` arguments respectively while the artifact's `filename` `artifact_type` and `description` arguments respectively while the artifact's `filename`
is set to the basename of `src`. is set to the basename of `src`.
Raises a `ValueError` if `src` points to a directory instead of a file. Raises a `ValueError` if `src` points to a directory instead of a file.
""" """
src_path = Path(src) src_path = Path(src)
@@ -108,7 +108,7 @@ class ArtifactManager:
The newly created artifact's `filename`, `type` and `description` fields are set to the The newly created artifact's `filename`, `type` and `description` fields are set to the
given `filename`, `artifact_type` and `description` arguments respectively. given `filename`, `artifact_type` and `description` arguments respectively.
Raises a `ValueError` if `artifact_type` is not one of the `ArtifactType` values which represents an image. Raises a `ValueError` if `artifact_type` is not one of the `ArtifactType` values which represents an image.
""" """
if artifact_type not in _IMAGE_ARTIFACT_TYPES: if artifact_type not in _IMAGE_ARTIFACT_TYPES:
raise ValueError("artifact_type should be a type that represents an image artifact") raise ValueError("artifact_type should be a type that represents an image artifact")

View File

@@ -1,4 +1,5 @@
"""Misc utility functions""" """Misc utility functions"""
from argparse import ArgumentParser
import logging import logging
import os import os
from pathlib import Path from pathlib import Path
@@ -10,6 +11,9 @@ import requests
import vgamepad as vg import vgamepad as vg
import json import json
import re import re
import sys
user.FAILSAFE = False
class LTTGamePad360(vg.VX360Gamepad): class LTTGamePad360(vg.VX360Gamepad):
""" """
@@ -19,7 +23,8 @@ class LTTGamePad360(vg.VX360Gamepad):
This class extension provides some useful functions to make your code look a little cleaner when This class extension provides some useful functions to make your code look a little cleaner when
implemented in our harnesses. implemented in our harnesses.
""" """
def single_press(self, button = vg.XUSB_BUTTON.XUSB_GAMEPAD_DPAD_DOWN, pause = 0.1):
def single_press(self, button=vg.XUSB_BUTTON.XUSB_GAMEPAD_DPAD_DOWN, pause=0.1):
""" """
Custom function to perform a single press of a specified gamepad button Custom function to perform a single press of a specified gamepad button
@@ -59,6 +64,7 @@ class LTTGamePad360(vg.VX360Gamepad):
self.single_press(button) self.single_press(button)
time.sleep(pause) time.sleep(pause)
class LTTGamePadDS4(vg.VDS4Gamepad): class LTTGamePadDS4(vg.VDS4Gamepad):
""" """
Class extension for the virtual game pad library Class extension for the virtual game pad library
@@ -67,7 +73,8 @@ class LTTGamePadDS4(vg.VDS4Gamepad):
This class extension provides some useful functions to make your code look a little cleaner when This class extension provides some useful functions to make your code look a little cleaner when
implemented in our harnesses. implemented in our harnesses.
""" """
def single_button_press(self, button = vg.DS4_BUTTONS.DS4_BUTTON_CROSS, fastpause = 0.05):
def single_button_press(self, button=vg.DS4_BUTTONS.DS4_BUTTON_CROSS, fastpause=0.05):
""" """
Custom function to perform a single press of a specified gamepad digital button Custom function to perform a single press of a specified gamepad digital button
@@ -95,7 +102,6 @@ class LTTGamePadDS4(vg.VDS4Gamepad):
time.sleep(fastpause) time.sleep(fastpause)
self.release_button(button=button) self.release_button(button=button)
self.update() self.update()
def button_press_n_times(self, button: vg.DS4_BUTTONS, n: int, pause: float): def button_press_n_times(self, button: vg.DS4_BUTTONS, n: int, pause: float):
""" """
@@ -105,7 +111,7 @@ class LTTGamePadDS4(vg.VDS4Gamepad):
self.single_button_press(button) self.single_button_press(button)
time.sleep(pause) time.sleep(pause)
def single_dpad_press(self, direction = vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH, pause = 0.1): def single_dpad_press(self, direction=vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH, pause=0.1):
""" """
Custom function to perform a single press of a specified gamepad button Custom function to perform a single press of a specified gamepad button
@@ -139,6 +145,7 @@ class LTTGamePadDS4(vg.VDS4Gamepad):
self.single_dpad_press(direction) self.single_dpad_press(direction)
time.sleep(pause) time.sleep(pause)
def clickme(x: int, y: int): def clickme(x: int, y: int):
"""Pyautogui's click function sucks, this should do the trick""" """Pyautogui's click function sucks, this should do the trick"""
gui.moveTo(x, y) gui.moveTo(x, y)
@@ -147,10 +154,11 @@ def clickme(x: int, y: int):
time.sleep(0.2) time.sleep(0.2)
gui.mouseUp() gui.mouseUp()
def mouse_scroll_n_times(n: int, scroll_amount: int, pause: float): def mouse_scroll_n_times(n: int, scroll_amount: int, pause: float):
""" """
Pyautogui's mouse scroll function often fails to actually scroll in game menus, this functions solves that problem Pyautogui's mouse scroll function often fails to actually scroll in game menus, this functions solves that problem
n --> the number of times you want to scroll, should be a positive integer n --> the number of times you want to scroll, should be a positive integer
scroll_amount --> positive is scroll up, negative is scroll down scroll_amount --> positive is scroll up, negative is scroll down
pause --> the amount of time to pause betwee subsequent scrolls pause --> the amount of time to pause betwee subsequent scrolls
@@ -159,12 +167,19 @@ def mouse_scroll_n_times(n: int, scroll_amount: int, pause: float):
gui.vscroll(scroll_amount) gui.vscroll(scroll_amount)
time.sleep(pause) time.sleep(pause)
def press_n_times(key: str, n: int, pause: float):
def int_time() -> int:
"""Returns the current time in seconds since epoch as an integer"""
return int(time.time())
def press_n_times(key: str, n: int, pause: float = 0.5):
"""A helper function press the same button multiple times""" """A helper function press the same button multiple times"""
for _ in range(n): for _ in range(n):
user.press(key) user.press(key)
time.sleep(pause) time.sleep(pause)
def remove_files(paths: list[str]) -> None: def remove_files(paths: list[str]) -> None:
"""Removes files specified by provided list of file paths. """Removes files specified by provided list of file paths.
Does nothing for a path that does not exist. Does nothing for a path that does not exist.
@@ -199,7 +214,7 @@ def extract_file_from_archive(zip_file: Path, member_path: str, destination_dir:
def find_eg_game_version(gamefoldername: str) -> str: def find_eg_game_version(gamefoldername: str) -> str:
"""Find the version of the specific game (e.g., AlanWake2) from the launcher installed data.""" """Find the version of the specific game (e.g., AlanWake2) from the launcher installed data."""
installerdat = r"C:\ProgramData\Epic\UnrealEngineLauncher\LauncherInstalled.dat" installerdat = r"C:\ProgramData\Epic\UnrealEngineLauncher\LauncherInstalled.dat"
try: try:
# Open the file and read its entire content # Open the file and read its entire content
with open(installerdat, encoding="utf-8") as file: with open(installerdat, encoding="utf-8") as file:
@@ -213,7 +228,7 @@ def find_eg_game_version(gamefoldername: str) -> str:
# Extract the InstallationList part from the file # Extract the InstallationList part from the file
installation_list_json = installation_list_match.group(1) installation_list_json = installation_list_match.group(1)
# Load the installation list as JSON # Load the installation list as JSON
installation_list = json.loads(installation_list_json) installation_list = json.loads(installation_list_json)
@@ -228,3 +243,20 @@ def find_eg_game_version(gamefoldername: str) -> str:
print(f"Error: {e}") print(f"Error: {e}")
return None return None
def find_word(keras_service, word, msg, timeout=30, interval=1):
"""Function to call Keras service to find a word in the screen"""
if keras_service.wait_for_word(word=word, timeout=timeout, interval=interval) is None:
logging.error(msg)
sys.exit(1)
def keras_args():
"""helper function to get args for keras"""
parser = ArgumentParser()
parser.add_argument("--kerasHost", dest="keras_host",
help="Host for Keras OCR service", required=True)
parser.add_argument("--kerasPort", dest="keras_port",
help="Port for Keras OCR service", required=True)
return parser.parse_args()

View File

@@ -1,17 +1,20 @@
"""Functions related to logging and formatting output from test harnesses.""" """Functions related to logging and formatting output from test harnesses."""
import json import json
import os import os
import logging
DEFAULT_LOGGING_FORMAT = '%(asctime)s %(levelname)-s %(message)s' DEFAULT_LOGGING_FORMAT = '%(asctime)s %(levelname)-s %(message)s'
DEFAULT_DATE_FORMAT = '%m-%d %H:%M' DEFAULT_DATE_FORMAT = '%m-%d %H:%M'
def setup_log_directory(log_dir: str) -> None: def setup_log_directory(log_dir: str) -> None:
"""Creates the log directory for a harness if it does not already exist""" """Creates the log directory for a harness if it does not already exist"""
if not os.path.isdir(log_dir): if not os.path.isdir(log_dir):
os.mkdir(log_dir) os.mkdir(log_dir)
def write_report_json(log_dir: str, report_name: str, report_json: any) -> None: def write_report_json(
log_dir: str, report_name: str, report_json: dict) -> None:
"""Writes the json output of a harness to the log directory""" """Writes the json output of a harness to the log directory"""
with open(os.path.join(log_dir, report_name), "w", encoding="utf-8") as file: with open(os.path.join(log_dir, report_name), "w", encoding="utf-8") as file:
file.write(json.dumps(report_json)) file.write(json.dumps(report_json))
@@ -25,3 +28,17 @@ def format_resolution(width: int, height: int) -> str:
def seconds_to_milliseconds(seconds: float | int) -> int: def seconds_to_milliseconds(seconds: float | int) -> int:
"""Convert seconds to milliseconds""" """Convert seconds to milliseconds"""
return round((seconds * 1000)) return round((seconds * 1000))
def setup_logging(log_directory: str) -> None:
"""Sets up logging for the harness"""
setup_log_directory(log_directory)
logging.basicConfig(filename=f'{log_directory}/harness.log',
format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT,
level=logging.DEBUG)
console = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)

View File

@@ -1,12 +1,11 @@
# Marvel Rivals # Marvel Rivals
This benchmark runs a replay of a Season 1 tournament Double Elimination round game between SendHelp and BeerLovers This benchmark runs a canned benchmark built into the Marvel Rivals settings.
## Prerequisites ## Prerequisites
- Python 3.10+ - Python 3.10+
- Marvel Rivals installed on Steam - Marvel Rivals installed on Steam
- Keras OCR service - Keras OCR service
- Favoriting replay ID 10518740076
## Options ## Options

View File

@@ -7,7 +7,7 @@ import time
import pyautogui as gui import pyautogui as gui
import pydirectinput as user import pydirectinput as user
import sys import sys
from marvelrivals_utils import read_resolution from marvelrivals_utils import read_resolution, find_latest_benchmarkcsv
import subprocess import subprocess
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, os.path.join(sys.path[0], '..'))
@@ -33,7 +33,9 @@ LAUNCHER_NAME = "MarvelRivals_Launcher.exe"
APPDATA = os.getenv("LOCALAPPDATA") APPDATA = os.getenv("LOCALAPPDATA")
CONFIG_LOCATION = f"{APPDATA}\\Marvel\\Saved\\Config\\Windows" CONFIG_LOCATION = f"{APPDATA}\\Marvel\\Saved\\Config\\Windows"
CONFIG_FILENAME = "GameUserSettings.ini" CONFIG_FILENAME = "GameUserSettings.ini"
cfg = f"{CONFIG_LOCATION}\\{CONFIG_FILENAME}" CFG = f"{CONFIG_LOCATION}\\{CONFIG_FILENAME}"
user.FAILSAFE = False
am = ArtifactManager(LOG_DIR) am = ArtifactManager(LOG_DIR)
@@ -49,19 +51,20 @@ def setup_logging():
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger('').addHandler(console) logging.getLogger('').addHandler(console)
def start_game(): def start_game():
"""Starts the game process""" """Starts the game process"""
game_path = get_app_install_location(STEAM_GAME_ID) game_path = get_app_install_location(STEAM_GAME_ID)
process_path = os.path.join(game_path, LAUNCHER_NAME) # Full path to the executable process_path = os.path.join(game_path, LAUNCHER_NAME) # Full path to the executable
logging.info(f"Starting game: {process_path}") logging.info("Starting game: %s", process_path)
process = subprocess.Popen([process_path], cwd=game_path) process = subprocess.Popen([process_path], cwd=game_path) # pylint: disable=R1732
return process return process
def run_benchmark(keras_service): def run_benchmark(keras_service):
"""Run Marvel Rivals benchmark""" """Run Marvel Rivals benchmark"""
setup_start_time = int(time.time()) setup_start_time = int(time.time())
start_game() start_game()
#wait for launcher to launch then click the launch button to launch the launcher into the game that we were launching #wait for launcher to launch then click the launch button to launch the launcher into the game that we were launching
time.sleep(20) time.sleep(20)
@@ -84,7 +87,13 @@ def run_benchmark(keras_service):
gui.mouseDown() gui.mouseDown()
time.sleep(0.2) time.sleep(0.2)
gui.mouseUp() gui.mouseUp()
time.sleep(0.5) time.sleep(20)
#checking if a marketing notification has come up
result = keras_service.wait_for_word("view", timeout=15, interval=1)
if result:
user.press("escape")
time.sleep(0.5)
#navigating to the video settings and taking screenshots #navigating to the video settings and taking screenshots
result = keras_service.wait_for_word("play", timeout=30, interval=1) result = keras_service.wait_for_word("play", timeout=30, interval=1)
@@ -125,42 +134,14 @@ def run_benchmark(keras_service):
time.sleep(1) time.sleep(1)
#navigate to the player profile #navigate to the player profile
user.press("escape") mouse_scroll_n_times(10, 800, 0.2)
time.sleep(1) time.sleep(1)
result = keras_service.wait_for_word("play", timeout=30, interval=1)
result = keras_service.wait_for_word("run", timeout=30, interval=1)
if not result: if not result:
logging.info("Did not find the play menu. Did it press escape?") logging.info("Did not find the Performance Test. Did it scroll back up properly?")
sys.exit(1) sys.exit(1)
time.sleep(1)
height, width = read_resolution()
location = None
# We check the resolution so we know which screenshot to use for the locate on screen function
match width:
case "1280":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\profile_720.png", confidence=0.9)
case "1920":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\profile_1080.png", confidence=0.9)
case "2560":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\profile_1440.png", confidence=0.9)
case "3840":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\profile_2160.png", confidence=0.9)
case _:
logging.error("Could not find the profile icon. The game resolution is currently %s, %s. Are you using a standard resolution?", height, width)
sys.exit(1)
click_me = gui.center(location)
gui.moveTo(click_me.x, click_me.y)
gui.mouseDown()
time.sleep(0.2)
gui.mouseUp()
time.sleep(0.5)
#navigate to the replays section
result = keras_service.wait_for_word("favorites", timeout=30, interval=1)
if not result:
logging.info("Did not find the favorites menu. Did it navigate properly to it?")
sys.exit(1)
gui.moveTo(result["x"], result["y"]) gui.moveTo(result["x"], result["y"])
time.sleep(0.2) time.sleep(0.2)
gui.mouseDown() gui.mouseDown()
@@ -168,69 +149,42 @@ def run_benchmark(keras_service):
gui.mouseUp() gui.mouseUp()
time.sleep(1) time.sleep(1)
result = keras_service.wait_for_word("match", timeout=30, interval=1) result = keras_service.wait_for_word("start", timeout=30, interval=1)
if not result: if not result:
logging.info("Did not find the match replays menu. Did it click correctly?") logging.info("Did not find the Start Test button. Keras click correctly?")
sys.exit(1) sys.exit(1)
gui.moveTo(result["x"], result["y"]) gui.moveTo(result["x"], result["y"])
time.sleep(0.2) time.sleep(0.2)
gui.mouseDown() gui.mouseDown()
time.sleep(0.2) time.sleep(0.2)
gui.mouseUp() gui.mouseUp()
time.sleep(1) time.sleep(1)
#starting the benchmark replay
result = keras_service.wait_for_word("shibuya", timeout=30, interval=1)
if not result:
logging.info("Did not find the replay we were looking for. Is it not saved in the favorites?")
sys.exit(1)
match width:
case "1280":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\play_720.png", confidence=0.9)
case "1920":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\play_1080.png", confidence=0.9)
case "2560":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\play_1440.png", confidence=0.9)
case "3840":
location = gui.locateOnScreen(f"{SCRIPT_DIR}\\screenshots\\play_2160.png", confidence=0.9)
case _:
logging.error("Could not find the play button. The game resolution is currently %s, %s. Are you using a standard resolution?", height, width)
sys.exit(1)
click_me = gui.center(location)
gui.moveTo(click_me.x, click_me.y)
gui.mouseDown()
time.sleep(0.2)
gui.mouseUp()
time.sleep(0.5)
#marking the in-time #marking the end time
setup_end_time = int(time.time()) setup_end_time = int(time.time())
elapsed_setup_time = round(setup_end_time - setup_start_time, 2) elapsed_setup_time = round(setup_end_time - setup_start_time, 2)
logging.info("Harness setup took %f seconds", elapsed_setup_time) logging.info("Harness setup took %f seconds", elapsed_setup_time)
time.sleep(2) time.sleep(2)
#looking for the player name to start wait timer till we get into the actual game
result = keras_service.wait_for_word("dluo", timeout=30, interval=1)
if not result:
logging.info("Did not find the player Dluo. Did the replay start?")
sys.exit(1)
time.sleep(90)
#looking for landmark to mark benchmark start time and then wait for first round to finish #looking for the FPS data graph
if keras_service.wait_for_word(word="defend", timeout=30, interval=1) is None: result = keras_service.wait_for_word("fps", timeout=30, interval=1)
logging.info("Didn't see the defend waypoint. Did the game crash?") if not result:
logging.info("Did not find the FPS graph. Did the replay start?")
sys.exit(1) sys.exit(1)
test_start_time = int(time.time()) + 2
time.sleep(460) test_start_time = int(time.time())
time.sleep(98)
#checking that first round has finished #checking that first round has finished
result = keras_service.wait_for_word("complete", timeout=30, interval=1) result = keras_service.wait_for_word("again", timeout=30, interval=1)
if not result: if not result:
logging.info("First round doesn't appear to have finished. Did the replay start?") logging.info("Didn't see the results screen. Did the test crash?")
sys.exit(1) sys.exit(1)
test_end_time = int(time.time()) test_end_time = int(time.time())
am.copy_file(Path(cfg), ArtifactType.CONFIG_TEXT, "Marvel Rivals Video Config") am.copy_file(Path(CFG), ArtifactType.CONFIG_TEXT, "Marvel Rivals Video Config")
am.copy_file(Path(find_latest_benchmarkcsv()), ArtifactType.CONFIG_TEXT, "Marvel Rivals Benchmark CSV")
logging.info("Run completed. Closing game.") logging.info("Run completed. Closing game.")
time.sleep(2) time.sleep(2)
@@ -274,4 +228,4 @@ if __name__ == "__main__":
except Exception as ex: except Exception as ex:
logging.error("something went wrong running the benchmark!") logging.error("something went wrong running the benchmark!")
logging.exception(ex) logging.exception(ex)
sys.exit(1) sys.exit(1)

View File

@@ -29,4 +29,13 @@ def read_resolution():
height = height_match.group(1) height = height_match.group(1)
if width_match is not None: if width_match is not None:
width = width_match.group(1) width = width_match.group(1)
return (height, width) return (height, width)
def find_latest_benchmarkcsv():
"""find latest log from the benchmark"""
appdata_path = os.getenv('LOCALAPPDATA')
benchmarkcsv_dir = Path(appdata_path) / "Marvel" / "Saved" / "Benchmark"
files = [os.path.join(benchmarkcsv_dir, file) for file in os.listdir(
benchmarkcsv_dir) if os.path.isfile(os.path.join(benchmarkcsv_dir, file))]
latest_file = max(files, key=os.path.getmtime)
return latest_file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 965 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.9 KiB

View File

@@ -1,4 +1,5 @@
"""UL Procyon Computer Vision test script""" """UL Procyon Computer Vision test script"""
# pylint: disable=no-name-in-module
from argparse import ArgumentParser from argparse import ArgumentParser
import logging import logging
from pathlib import Path from pathlib import Path
@@ -32,7 +33,7 @@ from harness_utils.procyoncmd import (
get_cuda_devices, get_cuda_devices,
) )
##### #####
### Globals # Globals
##### #####
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
@@ -48,74 +49,87 @@ CONFIG_DIR = SCRIPT_DIR / "config"
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"AMD_CPU": { "AMD_CPU": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_winml_cpu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_winml_cpu.def\"",
"process_name": "WinML.exe", "process_name": "WinML.exe",
"device_name": "CPU", "device_name": "CPU",
"device_id": "CPU", # TODO: Find a good way to report the CPU name here. # TODO: Find a good way to report the CPU name here.
"test_name": "WinML CPU (FLOAT32)" "device_id": "CPU",
"test_name": "cpu_float32",
"api": "winml"
}, },
"AMD_GPU0": { "AMD_GPU0": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_winml_gpu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_winml_gpu.def\"",
"process_name": "WinML.exe", "process_name": "WinML.exe",
"device_name": list(WINML_DEVICES.keys())[0], "device_name": list(WINML_DEVICES.keys())[0],
"device_id": list(WINML_DEVICES.values())[0], "device_id": list(WINML_DEVICES.values())[0],
"test_name": "WinML GPU (FLOAT32)" "test_name": "gpu_float32",
"api": "winml"
}, },
"AMD_GPU1": { "AMD_GPU1": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_winml_gpu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_winml_gpu.def\"",
"process_name": "WinML.exe", "process_name": "WinML.exe",
"device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0], "device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0],
"device_id": list(WINML_DEVICES.values())[1] if len(list(WINML_DEVICES.values())) > 1 else list(WINML_DEVICES.values())[0], "device_id": list(WINML_DEVICES.values())[1] if len(list(WINML_DEVICES.values())) > 1 else list(WINML_DEVICES.values())[0],
"test_name": "WinML GPU (FLOAT32)" "test_name": "gpu_float32",
"api": "winml"
}, },
"Intel_CPU": { "Intel_CPU": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_cpu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_cpu.def\"",
"process_name": "OpenVino.exe", "process_name": "OpenVino.exe",
"device_id": "CPU", "device_id": "CPU",
"device_name": OPENVINO_DEVICES["CPU"], "device_name": OPENVINO_DEVICES["CPU"],
"test_name": "Intel OpenVINO CPU (FLOAT32)" "test_name": "cpu_float32",
"api": "openvino"
}, },
"Intel_GPU0": { "Intel_GPU0": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_gpu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_gpu.def\"",
"process_name": "OpenVino.exe", "process_name": "OpenVino.exe",
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.0"),
"test_name": "Intel OpenVINO GPU 0 (FLOAT32)" "test_name": "gpu_float32",
"api": "openvino"
}, },
"Intel_GPU1": { "Intel_GPU1": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_gpu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_gpu.def\"",
"process_name": "OpenVino.exe", "process_name": "OpenVino.exe",
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.0"),
"test_name": "Intel OpenVINO GPU 1 (FLOAT32)" "test_name": "gpu_float32",
"api": "openvino"
}, },
"Intel_NPU": { "Intel_NPU": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_npu.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_openvino_npu.def\"",
"process_name": "OpenVino.exe", "process_name": "OpenVino.exe",
"device_id": "NPU", "device_id": "NPU",
"device_name": OPENVINO_DEVICES.get("NPU", "None"), "device_name": OPENVINO_DEVICES.get("NPU", "None"),
"test_name": "Intel OpenVINO NPU (FLOAT32)" "test_name": "npu_float32",
"api": "openvino"
}, },
"NVIDIA_GPU": { "NVIDIA_GPU": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_tensorrt.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_tensorrt.def\"",
"device_id": "cuda:0", "device_id": "cuda:0",
"device_name": CUDA_DEVICES.get("cuda:0"), "device_name": CUDA_DEVICES.get("cuda:0"),
"process_name": "TensorRT.exe", "process_name": "TensorRT.exe",
"test_name": "NVIDIA TensorRT (FLOAT32)" "test_name": "gpu_float32",
"api": "tensorrt"
}, },
"Qualcomm_HTP": { "Qualcomm_HTP": {
"config": f"\"{CONFIG_DIR}\\ai_computer_vision_snpe.def\"", "config": f"\"{CONFIG_DIR}\\ai_computer_vision_snpe.def\"",
"device_id": "CPU", "device_id": "CPU",
"device_name": "CPU", "device_name": "CPU",
"process_name": "SNPE.exe", "process_name": "SNPE.exe",
"test_name": "Qualcomm SNPE (INTEGER)" "test_name": "htp_integer",
"api": "snpe"
}, },
} }
RESULTS_FILENAME = "result.xml" RESULTS_FILENAME = "result.xml"
REPORT_PATH = LOG_DIR / RESULTS_FILENAME REPORT_PATH = LOG_DIR / RESULTS_FILENAME
def setup_logging(): def setup_logging():
"""setup logging""" """setup logging"""
setup_log_directory(LOG_DIR) setup_log_directory(str(LOG_DIR))
logging.basicConfig(filename=LOG_DIR / "harness.log", logging.basicConfig(filename=LOG_DIR / "harness.log",
format=DEFAULT_LOGGING_FORMAT, format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
@@ -130,7 +144,8 @@ def get_arguments():
"""get arguments""" """get arguments"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument( parser.add_argument(
"--engine", dest="engine", help="Engine test type", required=True, choices=BENCHMARK_CONFIG.keys()) "--engine", dest="engine", help="Engine test type", required=True,
choices=BENCHMARK_CONFIG.keys())
argies = parser.parse_args() argies = parser.parse_args()
return argies return argies
@@ -160,16 +175,17 @@ def run_benchmark(process_name, command_to_run):
while True: while True:
now = time.time() now = time.time()
elapsed = now - start_time elapsed = now - start_time
if elapsed >= 60: #seconds if elapsed >= 60: # seconds
raise ValueError("BenchMark subprocess did not start in time") raise ValueError("BenchMark subprocess did not start in time")
process = is_process_running(process_name) process = is_process_running(process_name)
if process is not None: if process is not None:
process.nice(psutil.HIGH_PRIORITY_CLASS) process.nice(psutil.HIGH_PRIORITY_CLASS)
break break
time.sleep(0.2) time.sleep(0.2)
_, _ = proc.communicate() # blocks until 3dmark exits _, _ = proc.communicate() # blocks until 3dmark exits
return proc return proc
try: try:
setup_logging() setup_logging()
logging.info("Detected Windows ML Devices: %s", str(WINML_DEVICES)) logging.info("Detected Windows ML Devices: %s", str(WINML_DEVICES))
@@ -203,15 +219,17 @@ try:
report = { report = {
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time), "end_time": seconds_to_milliseconds(end_time),
"test": BENCHMARK_CONFIG[args.engine]["test_name"], "test": "Procyon AI CV",
"test_parameter": BENCHMARK_CONFIG[args.engine]["test_name"],
"api": BENCHMARK_CONFIG[args.engine]["api"],
"test_version": find_test_version(), "test_version": find_test_version(),
"device_name": BENCHMARK_CONFIG[args.engine]["device_name"], "device_name": BENCHMARK_CONFIG[args.engine]["device_name"],
"procyon_version": find_procyon_version(), "procyon_version": find_procyon_version(),
"unit": "score", "unit": "score",
"score": score "score": score
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(str(LOG_DIR), "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -1,4 +1,5 @@
"""UL Procyon AI Image Generation test script""" """UL Procyon AI Image Generation test script"""
# pylint: disable=no-name-in-module
from argparse import ArgumentParser from argparse import ArgumentParser
import logging import logging
from pathlib import Path from pathlib import Path
@@ -26,7 +27,7 @@ from harness_utils.output import (
) )
##### #####
### Globals # Globals
##### #####
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
@@ -41,102 +42,117 @@ CONFIG_DIR = SCRIPT_DIR / "config"
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"AMD_GPU0_FP16": { "AMD_GPU0_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_onnxruntime.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_onnxruntime.def\"",
"process_name": "ort-directml.exe", "process_name": "ort-directml.exe",
"device_name": list(WINML_DEVICES.keys())[0], "device_name": list(WINML_DEVICES.keys())[0],
"device_id": "0", "device_id": "0",
"test_name": "ONNX Stable Diffusion FP16" "test_name": "stable_diffusion_fp16",
"api": "onnx"
}, },
"AMD_GPU1_FP16": { "AMD_GPU1_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_onnxruntime.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_onnxruntime.def\"",
"process_name": "ort-directml.exe", "process_name": "ort-directml.exe",
"device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0], "device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0],
"device_id": "1" if len(list(WINML_DEVICES.values())) > 1 else "0", "device_id": "1" if len(list(WINML_DEVICES.values())) > 1 else "0",
"test_name": "ONNX Stable Diffusion FP16" "test_name": "stable_diffusion_fp16",
"api": "onnx"
}, },
"AMD_GPU0_XL_FP16": { "AMD_GPU0_XL_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_onnxruntime.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_onnxruntime.def\"",
"process_name": "ort-directml.exe", "process_name": "ort-directml.exe",
"device_name": list(WINML_DEVICES.keys())[0], "device_name": list(WINML_DEVICES.keys())[0],
"device_id": "0", "device_id": "0",
"test_name": "ONNX Stable Diffusion FP16 XL" "test_name": "stable_diffusion_fp16_xl",
"api": "onnx"
}, },
"AMD_GPU1_XL_FP16": { "AMD_GPU1_XL_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_onnxruntime.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_onnxruntime.def\"",
"process_name": "ort-directml.exe", "process_name": "ort-directml.exe",
"device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0], "device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0],
"device_id": list(WINML_DEVICES.values())[1] if len(list(WINML_DEVICES.values())) > 1 else list(WINML_DEVICES.values())[0], "device_id": list(WINML_DEVICES.values())[1] if len(list(WINML_DEVICES.values())) > 1 else list(WINML_DEVICES.values())[0],
"test_name": "ONNX Stable Diffusion FP16 XL" "test_name": "stable_diffusion_fp16_xl",
"api": "onnx"
}, },
"Intel_GPU0_INT8": { "Intel_GPU0_INT8": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_openvino.def\"",
"process_name": "openvino.exe", "process_name": "openvino.exe",
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.0"),
"test_name": "Intel OpenVINO Stable Diffusion INT8" "test_name": "stable_diffusion_int8",
"api": "openvino"
}, },
"Intel_GPU0_FP16": { "Intel_GPU0_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_openvino.def\"",
"process_name": "openvino.exe", "process_name": "openvino.exe",
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.0"),
"test_name": "Intel OpenVINO Stable Diffusion FP16" "test_name": "stable_diffusion_fp16",
"api": "openvino"
}, },
"Intel_GPU0_XL_FP16": { "Intel_GPU0_XL_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_openvino.def\"",
"process_name": "openvino.exe", "process_name": "openvino.exe",
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.0"),
"test_name": "Intel OpenVINO Stable Diffusion FP16 XL" "test_name": "stable_diffusion_fp16_xl",
"api": "openvino"
}, },
"Intel_GPU1_INT8": { "Intel_GPU1_INT8": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_openvino.def\"",
"process_name": "openvino.exe", "process_name": "openvino.exe",
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.1"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.1"),
"test_name": "Intel OpenVINO Stable Diffusion INT8" "test_name": "stable_diffusion_int8",
"api": "openvino"
}, },
"Intel_GPU1_FP16": { "Intel_GPU1_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_openvino.def\"",
"process_name": "openvino.exe", "process_name": "openvino.exe",
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.1"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.1"),
"test_name": "Intel OpenVINO Stable Diffusion FP16" "test_name": "stable_diffusion_fp16",
"api": "openvino"
}, },
"Intel_GPU1_XL_FP16": { "Intel_GPU1_XL_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_openvino.def\"",
"process_name": "openvino.exe", "process_name": "openvino.exe",
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU", "device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.1"), "device_name": get_openvino_gpu(OPENVINO_DEVICES, "GPU.1"),
"test_name": "Intel OpenVINO Stable Diffusion FP16 XL" "test_name": "stable_diffusion_fp16_xl",
"api": "openvino"
}, },
"NVIDIA_GPU_INT8": { "NVIDIA_GPU_INT8": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_tensorrt.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_tensorrt.def\"",
"process_name": "tensorrt.exe", "process_name": "tensorrt.exe",
"device_id": "cuda:0", "device_id": "cuda:0",
"device_name": CUDA_DEVICES.get("cuda:0"), "device_name": CUDA_DEVICES.get("cuda:0"),
"test_name": "NVIDIA TensorRT Stable Diffusion INT8" "test_name": "stable_diffusion_int8",
"api": "tensorrt"
}, },
"NVIDIA_GPU_FP16": { "NVIDIA_GPU_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_tensorrt.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_tensorrt.def\"",
"process_name": "tensorrt.exe", "process_name": "tensorrt.exe",
"device_id": "cuda:0", "device_id": "cuda:0",
"device_name": CUDA_DEVICES.get("cuda:0"), "device_name": CUDA_DEVICES.get("cuda:0"),
"test_name": "NVIDIA TensorRT Stable Diffusion FP16" "test_name": "stable_diffusion_fp16",
"api": "tensorrt"
}, },
"NVIDIA_GPU_XL_FP16": { "NVIDIA_GPU_XL_FP16": {
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_tensorrt.def\"", "config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_tensorrt.def\"",
"process_name": "tensorrt.exe", "process_name": "tensorrt.exe",
"device_id": "cuda:0", "device_id": "cuda:0",
"device_name": CUDA_DEVICES.get("cuda:0"), "device_name": CUDA_DEVICES.get("cuda:0"),
"test_name": "NVIDIA TensorRT Stable Diffusion FP16 XL" "test_name": "stable_diffusion_fp16_xl",
"api": "tensorrt"
} }
} }
RESULTS_FILENAME = "result.xml" RESULTS_FILENAME = "result.xml"
REPORT_PATH = LOG_DIR / RESULTS_FILENAME REPORT_PATH = LOG_DIR / RESULTS_FILENAME
def setup_logging(): def setup_logging():
"""setup logging""" """setup logging"""
setup_log_directory(LOG_DIR) setup_log_directory(str(LOG_DIR))
logging.basicConfig(filename=LOG_DIR / "harness.log", logging.basicConfig(filename=LOG_DIR / "harness.log",
format=DEFAULT_LOGGING_FORMAT, format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
@@ -151,7 +167,8 @@ def get_arguments():
"""get arguments""" """get arguments"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument( parser.add_argument(
"--engine", dest="engine", help="Engine test type", required=True, choices=BENCHMARK_CONFIG.keys()) "--engine", dest="engine", help="Engine test type", required=True,
choices=BENCHMARK_CONFIG.keys())
argies = parser.parse_args() argies = parser.parse_args()
return argies return argies
@@ -179,16 +196,17 @@ def run_benchmark(process_name, command_to_run):
while True: while True:
now = time.time() now = time.time()
elapsed = now - start_time elapsed = now - start_time
if elapsed >= 60: #seconds if elapsed >= 60: # seconds
raise ValueError("BenchMark subprocess did not start in time") raise ValueError("BenchMark subprocess did not start in time")
process = is_process_running(process_name) process = is_process_running(process_name)
if process is not None: if process is not None:
process.nice(psutil.HIGH_PRIORITY_CLASS) process.nice(psutil.HIGH_PRIORITY_CLASS)
break break
time.sleep(0.2) time.sleep(0.2)
_, _ = proc.communicate() # blocks until 3dmark exits _, _ = proc.communicate() # blocks until 3dmark exits
return proc return proc
try: try:
setup_logging() setup_logging()
logging.info("Detected Windows ML Devices: %s", str(WINML_DEVICES)) logging.info("Detected Windows ML Devices: %s", str(WINML_DEVICES))
@@ -197,7 +215,9 @@ try:
args = get_arguments() args = get_arguments()
option = BENCHMARK_CONFIG[args.engine]["config"] option = BENCHMARK_CONFIG[args.engine]["config"]
cmd = create_procyon_command(option, BENCHMARK_CONFIG[args.engine]["process_name"], BENCHMARK_CONFIG[args.engine]["device_id"]) cmd = create_procyon_command(
option, BENCHMARK_CONFIG[args.engine]["process_name"],
BENCHMARK_CONFIG[args.engine]["device_id"])
logging.info('Starting benchmark!') logging.info('Starting benchmark!')
logging.info(cmd) logging.info(cmd)
start_time = time.time() start_time = time.time()
@@ -221,16 +241,18 @@ try:
report = { report = {
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time), "end_time": seconds_to_milliseconds(end_time),
"test": BENCHMARK_CONFIG[args.engine]["test_name"], "test": "Procyon AI Image Generation",
"test_parameter": BENCHMARK_CONFIG[args.engine]["test_name"],
"api": BENCHMARK_CONFIG[args.engine]["api"],
"test_version": find_test_version(), "test_version": find_test_version(),
"device_name": BENCHMARK_CONFIG[args.engine]["device_name"], "device_name": BENCHMARK_CONFIG[args.engine]["device_name"],
"procyon_version": find_procyon_version(), "procyon_version": find_procyon_version(),
"unit": "score", "unit": "score",
"score": score "score": score
} }
write_report_json(LOG_DIR, "report.json", report) write_report_json(str(LOG_DIR), "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -1,4 +1,5 @@
"""UL Procyon AI Text Generation test script""" """UL Procyon AI Text Generation test script"""
# pylint: disable=no-name-in-module
from argparse import ArgumentParser from argparse import ArgumentParser
import logging import logging
from pathlib import Path from pathlib import Path
@@ -20,7 +21,7 @@ from harness_utils.output import (
) )
##### #####
### Globals # Globals
##### #####
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
@@ -31,71 +32,83 @@ CONFIG_DIR = SCRIPT_DIR / "config"
BENCHMARK_CONFIG = { BENCHMARK_CONFIG = {
"All_Models_ONNX": { "All_Models_ONNX": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_all.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_all.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AIImageGenerationOverallScore>(\d+)", "result_regex": r"<AIImageGenerationOverallScore>(\d+)",
"test_name": "All LLM Model Text Generation" "test_name": "all_models",
"api": "onnx"
}, },
"Llama_2_13B_ONNX": { "Llama_2_13B_ONNX": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama2.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama2.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationLlama2OverallScore>(\d+)", "result_regex": r"<AiTextGenerationLlama2OverallScore>(\d+)",
"test_name": "LLama 2 Text Generation" "test_name": "llama_2_13b",
"api": "onnx"
}, },
"Llama_3_1_8B_ONNX": { "Llama_3_1_8B_ONNX": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama3.1.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama3.1.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationLlama3OverallScore>(\d+)", "result_regex": r"<AiTextGenerationLlama3OverallScore>(\d+)",
"test_name": "Llama 3.1 Text Generation" "test_name": "llama_3_1_8b",
"api": "onnx"
}, },
"Mistral_7B_ONNX": { "Mistral_7B_ONNX": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_mistral.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_mistral.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationMistralOverallScore>(\d+)", "result_regex": r"<AiTextGenerationMistralOverallScore>(\d+)",
"test_name": "Mistral Text Generation" "test_name": "mistral_7b",
"api": "onnx"
}, },
"Phi_3_5_ONNX": { "Phi_3_5_ONNX": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_phi.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_phi.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationPhiOverallScore>(\d+)", "result_regex": r"<AiTextGenerationPhiOverallScore>(\d+)",
"test_name": "Phi Text Generation" "test_name": "phi_3_5",
"api": "onnx"
}, },
"All_Models_OPENVINO": { "All_Models_OPENVINO": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_all_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_all_openvino.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AIImageGenerationOverallScore>(\d+)", "result_regex": r"<AIImageGenerationOverallScore>(\d+)",
"test_name": "All LLM Model Text Generation" "test_name": "all_models",
"api": "openvino"
}, },
"Llama_2_13B_OPENVINO": { "Llama_2_13B_OPENVINO": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama2_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama2_openvino.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationLlama2OverallScore>(\d+)", "result_regex": r"<AiTextGenerationLlama2OverallScore>(\d+)",
"test_name": "LLama 2 Text Generation" "test_name": "llama_2_13b",
"api": "openvino"
}, },
"Llama_3_1_8B_OPENVINO": { "Llama_3_1_8B_OPENVINO": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama3.1_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama3.1_openvino.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationLlama3OverallScore>(\d+)", "result_regex": r"<AiTextGenerationLlama3OverallScore>(\d+)",
"test_name": "Llama 3.1 Text Generation" "test_name": "llama_3_1_8b",
"api": "openvino"
}, },
"Mistral_7B_OPENVINO": { "Mistral_7B_OPENVINO": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_mistral_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_mistral_openvino.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationMistralOverallScore>(\d+)", "result_regex": r"<AiTextGenerationMistralOverallScore>(\d+)",
"test_name": "Mistral Text Generation" "test_name": "mistral_7b",
"api": "openvino"
}, },
"Phi_3_5_OPENVINO": { "Phi_3_5_OPENVINO": {
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_phi_openvino.def\"", "config": f"\"{CONFIG_DIR}\\ai_textgeneration_phi_openvino.def\"",
"process_name": "Handler.exe", "process_name": "Handler.exe",
"result_regex": r"<AiTextGenerationPhiOverallScore>(\d+)", "result_regex": r"<AiTextGenerationPhiOverallScore>(\d+)",
"test_name": "Phi Text Generation" "test_name": "phi_3_5",
"api": "openvino"
} }
} }
RESULTS_FILENAME = "result.xml" RESULTS_FILENAME = "result.xml"
REPORT_PATH = LOG_DIR / RESULTS_FILENAME REPORT_PATH = LOG_DIR / RESULTS_FILENAME
def setup_logging(): def setup_logging():
"""setup logging""" """setup logging"""
setup_log_directory(LOG_DIR) setup_log_directory(str(LOG_DIR))
logging.basicConfig(filename=LOG_DIR / "harness.log", logging.basicConfig(filename=LOG_DIR / "harness.log",
format=DEFAULT_LOGGING_FORMAT, format=DEFAULT_LOGGING_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
@@ -110,7 +123,8 @@ def get_arguments():
"""get arguments""" """get arguments"""
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument( parser.add_argument(
"--engine", dest="engine", help="Engine test type", required=True, choices=BENCHMARK_CONFIG.keys()) "--engine", dest="engine", help="Engine test type", required=True,
choices=BENCHMARK_CONFIG.keys())
argies = parser.parse_args() argies = parser.parse_args()
return argies return argies
@@ -129,16 +143,17 @@ def run_benchmark(process_name, command_to_run):
while True: while True:
now = time.time() now = time.time()
elapsed = now - start_time elapsed = now - start_time
if elapsed >= 60: #seconds if elapsed >= 60: # seconds
raise ValueError("BenchMark subprocess did not start in time") raise ValueError("BenchMark subprocess did not start in time")
process = is_process_running(process_name) process = is_process_running(process_name)
if process is not None: if process is not None:
process.nice(psutil.HIGH_PRIORITY_CLASS) process.nice(psutil.HIGH_PRIORITY_CLASS)
break break
time.sleep(0.2) time.sleep(0.2)
_, _ = proc.communicate() # blocks until 3dmark exits _, _ = proc.communicate() # blocks until 3dmark exits
return proc return proc
try: try:
setup_logging() setup_logging()
args = get_arguments() args = get_arguments()
@@ -165,7 +180,8 @@ try:
sys.exit(1) sys.exit(1)
report = { report = {
"test": BENCHMARK_CONFIG[args.engine]["test_name"], "test": "Procyon AI Text Generation",
"test_parameter": BENCHMARK_CONFIG[args.engine]["test_name"],
"unit": "score", "unit": "score",
"score": score, "score": score,
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
@@ -175,7 +191,7 @@ try:
logging.info("Benchmark took %.2f seconds", elapsed_test_time) logging.info("Benchmark took %.2f seconds", elapsed_test_time)
logging.info("Score was %s", score) logging.info("Score was %s", score)
write_report_json(LOG_DIR, "report.json", report) write_report_json(str(LOG_DIR), "report.json", report)
else: else:
session_report = [] session_report = []
@@ -198,19 +214,19 @@ try:
report = { report = {
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time), "end_time": seconds_to_milliseconds(end_time),
"test": test_type[0], "test": "Procyon AI Text Generation",
"test_parameter": test_type[1]["test_name"],
"api": test_type[1]["api"],
"test_version": find_test_version(), "test_version": find_test_version(),
"procyon_version": find_procyon_version(), "procyon_version": find_procyon_version(),
"unit": "score", "unit": "score",
"score": score "score": score
}
}
session_report.append(report) session_report.append(report)
write_report_json(LOG_DIR, "report.json", session_report) write_report_json(str(LOG_DIR), "report.json", session_report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")

View File

@@ -10,6 +10,7 @@ import logging
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR / "run" LOG_DIR = SCRIPT_DIR / "run"
def is_process_running(process_name): def is_process_running(process_name):
"""check if given process is running""" """check if given process is running"""
for process in psutil.process_iter(['pid', 'name']): for process in psutil.process_iter(['pid', 'name']):
@@ -17,6 +18,7 @@ def is_process_running(process_name):
return process return process
return None return None
def regex_find_score_in_xml(result_regex): def regex_find_score_in_xml(result_regex):
"""Reads score from local game log""" """Reads score from local game log"""
score_pattern = re.compile(result_regex) score_pattern = re.compile(result_regex)
@@ -30,26 +32,29 @@ def regex_find_score_in_xml(result_regex):
score_value = score_match.group(1) score_value = score_match.group(1)
return score_value return score_value
def get_install_path() -> str: def get_install_path() -> str:
"""Gets the path to the Steam installation directory from the SteamPath registry key""" """Gets the path to the Steam installation directory from the SteamPath registry key"""
reg_path = r"Software\UL\Procyon" reg_path = r"Software\UL\Procyon"
reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path, 0, winreg.KEY_READ) reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
value, _ = winreg.QueryValueEx(reg_key, "InstallDir") reg_path, 0, winreg.KEY_READ)
value, _ = winreg.QueryValueEx(reg_key, "InstallDir")
return value return value
def find_procyon_version() -> str: def find_procyon_version() -> str:
"""Gets the version of an executable located in the install path.""" """Gets the version of an executable located in the install path."""
install_path = get_install_path() install_path = get_install_path()
if not install_path: if not install_path:
logging.info("Installation path not found.") logging.info("Installation path not found.")
return None return ""
exe_path = os.path.join(install_path, "ProcyonCmd.exe") exe_path = os.path.join(install_path, "ProcyonCmd.exe")
if not os.path.exists(exe_path): if not os.path.exists(exe_path):
logging.info(f"Executable not found at {exe_path}") logging.info("Executable not found at %s", exe_path)
return None return ""
try: try:
# Get all file version info # Get all file version info
@@ -61,7 +66,7 @@ def find_procyon_version() -> str:
if ms is None or ls is None: if ms is None or ls is None:
logging.info("No FileVersionMS or FileVersionLS found.") logging.info("No FileVersionMS or FileVersionLS found.")
return None return ""
# Convert to human-readable version: major.minor.build.revision # Convert to human-readable version: major.minor.build.revision
major = ms >> 16 major = ms >> 16
@@ -73,29 +78,31 @@ def find_procyon_version() -> str:
return version return version
except Exception as e: except Exception as e:
logging.info(f"Error retrieving version info from {exe_path}: {e}") logging.info("Error retrieving version info from %s: %s", exe_path, e)
return None # Return None if version info retrieval fails return "" # Return empty string if version info retrieval fails
def find_test_version() -> str: def find_test_version() -> str:
"""Gets the version of an executable located in the chops path.""" """Gets the version of an executable located in the chops path."""
chops_path = "C:\\ProgramData\\UL\\Procyon\\chops\\dlc\\ai-textgeneration-benchmark\\x64" chops_path = "C:\\ProgramData\\UL\\Procyon\\chops\\dlc\\ai-textgeneration-benchmark\\x64"
logging.info(f"The install path for the test is {chops_path}") logging.info("The install path for the test is %s", chops_path)
if not chops_path: if not chops_path:
logging.info("Installation path not found.") logging.info("Installation path not found.")
return None return ""
exe_path = os.path.join(chops_path, "Handler.exe") exe_path = os.path.join(chops_path, "Handler.exe")
if not os.path.exists(exe_path): if not os.path.exists(exe_path):
logging.info(f"Executable 'Handler.exe' not found at {exe_path}") logging.info("Executable 'Handler.exe' not found at %s", exe_path)
return None return ""
try: try:
lang, codepage = win32api.GetFileVersionInfo(exe_path, "\\VarFileInfo\\Translation")[0] lang, codepage = win32api.GetFileVersionInfo(
exe_path, "\\VarFileInfo\\Translation")[0]
str_info_path = f"\\StringFileInfo\\{lang:04X}{codepage:04X}\\ProductVersion" str_info_path = f"\\StringFileInfo\\{lang:04X}{codepage:04X}\\ProductVersion"
return win32api.GetFileVersionInfo(exe_path, str_info_path) return str(win32api.GetFileVersionInfo(exe_path, str_info_path))
except Exception as e: except Exception as e:
logging.info(f"Error retrieving version info from {exe_path}: {e}") logging.info("Error retrieving version info from %s: %s", exe_path, e)
return None # Return None if version info retrieval fails return "" # Return empty string if version info retrieval fails

View File

@@ -15,6 +15,6 @@ options:
- aftereffects - aftereffects
- resolve - resolve
tooltip: Select which test to run tooltip: Select which test to run
- name: benchmark - name: benchmark_version
type: input type: input
tooltip: Version of benchmark to run tooltip: Version of benchmark to run

View File

@@ -69,9 +69,9 @@ def run_benchmark(application: str, app_version: str, benchmark_version: str):
"Standard", "--app_version", f"{app_version}"] "Standard", "--app_version", f"{app_version}"]
command = None command = None
if application == "premierepro": if application == "premierepro":
command = [executable_path] + command_args + ["--app", "photoshop"]
elif application == "photoshop":
command = [executable_path] + command_args + ["--app", "premierepro"] command = [executable_path] + command_args + ["--app", "premierepro"]
elif application == "photoshop":
command = [executable_path] + command_args + ["--app", "photoshop"]
elif application == "aftereffects": elif application == "aftereffects":
command = [executable_path] + command_args + ["--app", "aftereffects"] command = [executable_path] + command_args + ["--app", "aftereffects"]
elif application == "resolve": elif application == "resolve":
@@ -136,19 +136,19 @@ def main():
score = 0 score = 0
test = "" test = ""
if args.app == "premierepro": if args.app == "premierepro":
test = "PugetBench Adobe Premiere Pro" test = "Adobe Premiere Pro"
if version is None: if version is None:
version = get_premierepro_version() version = get_premierepro_version()
elif args.app == "photoshop": elif args.app == "photoshop":
test = "PugetBench Adobe Photoshop" test = "Adobe Photoshop"
if version is None: if version is None:
version = get_photoshop_version() version = get_photoshop_version()
elif args.app == "aftereffects": elif args.app == "aftereffects":
test = "PugetBench Adobe After Effects" test = "Adobe After Effects"
if version is None: if version is None:
version = get_aftereffects_version() version = get_aftereffects_version()
elif args.app == "resolve": elif args.app == "resolve":
test = "PugetBench Davinci Resolve Studio" test = "Davinci Resolve Studio"
if version is None: if version is None:
version = get_davinci_version() + "-studio" version = get_davinci_version() + "-studio"
@@ -163,7 +163,8 @@ def main():
report = { report = {
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time), "end_time": seconds_to_milliseconds(end_time),
"test": test, "test": "PugetBench",
"test_parameter": test,
"app_version": version, "app_version": version,
"benchmark_version": args.benchmark_version, "benchmark_version": args.benchmark_version,
"pugetbench_version": get_pugetbench_version(), "pugetbench_version": get_pugetbench_version(),

View File

@@ -32,6 +32,7 @@ SCRIPT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
LOG_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, "run") LOG_DIRECTORY = os.path.join(SCRIPT_DIRECTORY, "run")
CONFIG_FULL_PATH = Path("C:/Users/", getpass.getuser(), "Documents", "Rockstar Games", "Red Dead Redemption 2", "Settings", "system.xml") CONFIG_FULL_PATH = Path("C:/Users/", getpass.getuser(), "Documents", "Rockstar Games", "Red Dead Redemption 2", "Settings", "system.xml")
user.FAILSAFE = False
def run_benchmark(): def run_benchmark():
"""Starts the benchmark""" """Starts the benchmark"""
@@ -39,8 +40,15 @@ def run_benchmark():
setup_start_time = int(time.time()) setup_start_time = int(time.time())
exec_steam_run_command(STEAM_GAME_ID) exec_steam_run_command(STEAM_GAME_ID)
am = ArtifactManager(LOG_DIRECTORY) am = ArtifactManager(LOG_DIRECTORY)
time.sleep(80) time.sleep(80)
# patch to look for seasonal popup
result = kerasService.look_for_word_vulkan("strange", attempts=30, interval=1)
if result:
user.press("enter")
time.sleep(3)
# Press Z to enter settings # Press Z to enter settings
result = kerasService.look_for_word_vulkan("settings", attempts=30, interval=1) result = kerasService.look_for_word_vulkan("settings", attempts=30, interval=1)
if not result: if not result:

View File

@@ -30,7 +30,7 @@ CONFIG_PATH = Path(f"C:\\Users\\{USERNAME}\\Documents\\My Games\\Rocket League\\
PROCESS_NAME = "rocketleague.exe" PROCESS_NAME = "rocketleague.exe"
EXECUTABLE_PATH = find_epic_executable() EXECUTABLE_PATH = find_epic_executable()
GAME_ID = "9773aa1aa54f4f7b80e44bef04986cea%3A530145df28a24424923f5828cc9031a1%3ASugar?action=launch&silent=true" GAME_ID = "9773aa1aa54f4f7b80e44bef04986cea%3A530145df28a24424923f5828cc9031a1%3ASugar?action=launch&silent=true"
gamefoldername = "rocketleague" GAMEFOLDERNAME = "rocketleague"
am = ArtifactManager(LOG_DIRECTORY) am = ArtifactManager(LOG_DIRECTORY)
gamepad = LTTGamePadDS4() gamepad = LTTGamePadDS4()
@@ -63,7 +63,7 @@ def camera_cycle(max_attempts=10):
:param check_duration: How long (in seconds) to look for the word before pressing a button. :param check_duration: How long (in seconds) to look for the word before pressing a button.
:param button: The gamepad button to press if word is not found. :param button: The gamepad button to press if word is not found.
""" """
for attempt in range(max_attempts): for _ in range(max_attempts):
# Try finding the word within check_duration seconds # Try finding the word within check_duration seconds
found = kerasService.look_for_word(word="player", attempts=2, interval=0.2) found = kerasService.look_for_word(word="player", attempts=2, interval=0.2)
@@ -136,6 +136,11 @@ def run_benchmark():
gamepad.single_dpad_press(direction=vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH) gamepad.single_dpad_press(direction=vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH)
time.sleep(0.5) time.sleep(0.5)
if kerasService.look_for_word(word="club", attempts=5, interval=0.2):
logging.info('Saw Create a Club. Navigating accordingly.')
gamepad.single_dpad_press(direction=vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH)
time.sleep(0.5)
gamepad.dpad_press_n_times(direction=vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH, n=2, pause=0.8) gamepad.dpad_press_n_times(direction=vg.DS4_DPAD_DIRECTIONS.DS4_BUTTON_DPAD_SOUTH, n=2, pause=0.8)
time.sleep(0.5) time.sleep(0.5)
gamepad.single_button_press(button=vg.DS4_BUTTONS.DS4_BUTTON_CROSS) gamepad.single_button_press(button=vg.DS4_BUTTONS.DS4_BUTTON_CROSS)
@@ -239,7 +244,7 @@ try:
"resolution": format_resolution(width, height), "resolution": format_resolution(width, height),
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time), "end_time": seconds_to_milliseconds(end_time),
"game_version": find_eg_game_version(gamefoldername) "game_version": find_eg_game_version(GAMEFOLDERNAME)
} }
write_report_json(LOG_DIRECTORY, "report.json", report) write_report_json(LOG_DIRECTORY, "report.json", report)

View File

@@ -27,6 +27,7 @@ PROCESS_NAME = "SOTTR.exe"
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR.joinpath("run") LOG_DIR = SCRIPT_DIR.joinpath("run")
user.FAILSAFE = False
def setup_logging(): def setup_logging():
"""default logging config""" """default logging config"""

View File

@@ -20,7 +20,7 @@
version=0 version=0
} }
"refreshRate"={ "refreshRate"={
value="50" value="60"
version=0 version=0
} }
} }

View File

@@ -0,0 +1,73 @@
force_pow2_textures=no
language="l_english"
graphics=
{
size=
{
x=1920
y=1080
}
gui_scale=1.000000
gui_safe_ratio=1.000000
refreshRate=60
fullScreen=yes
borderless=no
display_index=0
renderer=1
shadowSize=2048
multi_sampling=4
multi_sampling_quality=0
maxanisotropy=16
gamma=50.000000
vsync=yes
}
sound_fx_volume=50.000000
music_volume=50.000000
scroll_speed=50.000000
camera_rotation_speed=50.000000
zoom_speed=50.000000
mouse_speed=50.000000
soundgroup="l_english"
master_volume=100.000000
ambient_volume=50.000000
dev_master_volume=75.000000
input_type=0
precise_mouse_wheel=no
mouse_wheel_acceleration=0.000000
mouse_wheel_base_speed=1.000000
input_type=0
crisis_conversation_speech=yes
voice_volume=50.000000
tts_volume=75.000000
camera_look_sensitivity=1.000000
camera_speed=50.000000
autosave=0
tutorial=0
completed_tutorial_missions=0
gfx_quality=2
bloom=
{
quality=2
lens_flare=yes
}
cvaa_settings=
{
mouse_side_button_mode=0
cvaa_tts_enabled=no
cvaa_stt_enabled=no
general_tts_enabled=no
tooltip_tts_enabled=no
cvaa_chat_visual_alerts=no
cvaa_stt_hotkey_enabled=no
cvaa_chat_large_fonts=no
cvaa_stt_hotkey=""
}
mp_max_ticks_ahead=30
mapmode_sectors=no
show_startup_game_info="Phoenix v4.0"
hyperlane_opacity=0.300000
hotkey_activation_delay=0.100000
hotkey_actualization_delay=0.100000
hide_unowned_content=no
transfer_speed="fast"

View File

@@ -1,45 +0,0 @@
force_pow2_textures=no
language="l_english"
graphics={
size={
x=1920
y=1080
}
gui_scale=1.000000
gui_safe_ratio=1.000000
refreshRate=50
fullScreen=yes
borderless=no
display_index=0
renderer=0
shadowSize=2048
multi_sampling=0
multi_sampling_quality=0
maxanisotropy=16
gamma=50.000000
vsync=yes
}
sound_fx_volume=20.000000
music_volume=20.000000
scroll_speed=50.000000
camera_rotation_speed=50.000000
zoom_speed=50.000000
mouse_speed=50.000000
soundgroup="l_english"
master_volume=52.000000
ambient_volume=21.000000
dev_master_volume=75.000000
input_type=0
voice_volume=22.000000
tts_volume=75.000000
camera_look_sensitivity=1.000000
camera_speed=50.000000
autosave=0
tutorial=0
completed_tutorial_missions=0
gfx_quality=0
bloom={
quality=0
lens_flare=no
}

View File

@@ -23,6 +23,7 @@ from harness_utils.output import (
) )
from harness_utils.steam import get_app_install_location from harness_utils.steam import get_app_install_location
from harness_utils.keras_service import KerasService from harness_utils.keras_service import KerasService
from harness_utils.artifacts import ArtifactManager, ArtifactType
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
@@ -67,12 +68,38 @@ def run_benchmark(keras_host, keras_port):
start_game() start_game()
setup_start_time = int(time.time()) setup_start_time = int(time.time())
time.sleep(5) time.sleep(5)
am = ArtifactManager(LOG_DIR)
patchnotes = keras_service.wait_for_word("close", interval=0.5, timeout=100)
if patchnotes:
gui.moveTo(patchnotes["x"], patchnotes["y"])
time.sleep(0.2)
gui.mouseDown()
time.sleep(0.2)
gui.mouseUp()
time.sleep(0.2)
result = keras_service.wait_for_word("credits", interval=0.5, timeout=100) result = keras_service.wait_for_word("credits", interval=0.5, timeout=100)
if not result: if not result:
logging.info("Could not find the paused notification. Unable to mark start time!") logging.info("Could not find the paused notification. Unable to mark start time!")
sys.exit(1) sys.exit(1)
result = keras_service.look_for_word("settings", attempts=10, interval=1)
if not result:
logging.info("Did not find the settings button. Is there something wrong on the screen?")
sys.exit(1)
gui.moveTo(result["x"], result["y"])
time.sleep(0.2)
gui.mouseDown()
time.sleep(0.2)
gui.mouseUp()
time.sleep(0.5)
am.take_screenshot("settings.png", ArtifactType.CONFIG_IMAGE, "settings")
time.sleep(0.2)
user.press("esc")
result = keras_service.look_for_word("load", attempts=10, interval=1) result = keras_service.look_for_word("load", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the load save menu. Is there something wrong on the screen?") logging.info("Did not find the load save menu. Is there something wrong on the screen?")
@@ -102,9 +129,9 @@ def run_benchmark(keras_host, keras_port):
logging.info("Could not find the paused notification. Unable to mark start time!") logging.info("Could not find the paused notification. Unable to mark start time!")
sys.exit(1) sys.exit(1)
result = keras_service.look_for_word("government", attempts=10, interval=1) result = keras_service.look_for_word("overview", attempts=10, interval=1)
if not result: if not result:
logging.info("Did not find the load latest save button. Did keras click correctly?") logging.info("Did not find the overview in the corner. Did the game load?")
sys.exit(1) sys.exit(1)
gui.moveTo(result["x"], result["y"]) gui.moveTo(result["x"], result["y"])
@@ -141,6 +168,8 @@ def run_benchmark(keras_host, keras_port):
score = find_score_in_log() score = find_score_in_log()
logging.info("The one year passed in %s seconds", score) logging.info("The one year passed in %s seconds", score)
terminate_processes(PROCESS_NAME) terminate_processes(PROCESS_NAME)
am.create_manifest()
return test_start_time, test_end_time, score return test_start_time, test_end_time, score

View File

@@ -16,15 +16,16 @@ PROCESS_NAME = "stellaris.exe"
STEAM_GAME_ID = 281990 STEAM_GAME_ID = 281990
CONFIG_LOCATION = Path(f"C:\\Users\\{USERNAME}\\Documents\\Paradox Interactive\\Stellaris") CONFIG_LOCATION = Path(f"C:\\Users\\{USERNAME}\\Documents\\Paradox Interactive\\Stellaris")
LOG_LOCATION = Path(f"C:\\Users\\{USERNAME}\\Documents\\Paradox Interactive\\Stellaris\\logs") LOG_LOCATION = Path(f"C:\\Users\\{USERNAME}\\Documents\\Paradox Interactive\\Stellaris\\logs")
BENCHMARK_LOCATION = Path(f"C:\\Users\\{USERNAME}\\Documents\\Paradox Interactive\\Stellaris\\save games\\BENCHMARK") BENCHMARK_LOCATION = Path(
CONFIG_FILENAME = "standard_settings.txt" f"C:\\Users\\{USERNAME}\\Documents\\Paradox Interactive\\Stellaris\\save games\\BENCHMARK")
CONFIG_FILENAME = "settings.txt"
LOG_FILE = "game.log" LOG_FILE = "game.log"
benchmark_files = [ benchmark_files = [
"benchmark.ini", "benchmark.ini",
"pdx_settings.txt", "pdx_settings.txt",
"standard_settings.txt" "settings.txt"
] ]
@@ -76,7 +77,7 @@ def copy_benchmarkfiles() -> None:
def copy_save_from_network_drive(file_name, destination): def copy_save_from_network_drive(file_name, destination):
"""copy save file from network drive""" """copy save file from network drive"""
network_dir = Path("\\\\Labs\\labs\\03_ProcessingFiles\\Stellaris") network_dir = Path("\\\\labs.lmg.gg\\labs\\03_ProcessingFiles\\Stellaris")
source_path = network_dir.joinpath(file_name) source_path = network_dir.joinpath(file_name)
logging.info("Copying %s from %s", file_name, source_path) logging.info("Copying %s from %s", file_name, source_path)
shutil.copyfile(source_path, destination) shutil.copyfile(source_path, destination)

View File

@@ -43,21 +43,22 @@ formatter = logging.Formatter(LOGGING_FORMAT)
console.setFormatter(formatter) console.setFormatter(formatter)
logging.getLogger('').addHandler(console) logging.getLogger('').addHandler(console)
cmd = f'{INSTALL_DIR}\\{EXECUTABLE}' CMD = f'{INSTALL_DIR}\\{EXECUTABLE}'
argstr = f"-fullscreen 1 -mode default -api {args.api} -quality {args.preset} -iterations 1" ARGSTR = f"-fullscreen 1 -mode default -api {args.api} -quality {args.preset} -iterations 1"
argstr += f" -log_txt {log_dir}\\log.txt" ARGSTR += f" -log_txt {log_dir}\\log.txt"
logging.info(cmd) logging.info(CMD)
logging.info(argstr) logging.info(ARGSTR)
argies = argstr.split(" ") argies = ARGSTR.split(" ")
cmd = cmd.rstrip() CMD = CMD.rstrip()
with Popen([cmd, *argies]) as process: with Popen([CMD, *argies]) as process:
EXIT_CODE = process.wait() EXIT_CODE = process.wait()
if EXIT_CODE > 0: if EXIT_CODE > 0:
logging.error("Test failed!") logging.error("Test failed!")
sys.exit(EXIT_CODE) sys.exit(EXIT_CODE)
SCORE = ""
pattern = re.compile(r"Score: (\d+)") pattern = re.compile(r"Score: (\d+)")
log_path = os.path.join(log_dir, "log.txt") log_path = os.path.join(log_dir, "log.txt")
with open(log_path, encoding="utf-8") as log: with open(log_path, encoding="utf-8") as log:
@@ -65,11 +66,13 @@ with open(log_path, encoding="utf-8") as log:
for line in lines: for line in lines:
match = pattern.search(line) match = pattern.search(line)
if match: if match:
score = match.group(1) SCORE = match.group(1)
report = { report = {
"test": f"Unigine Superposition 2017 {args.preset} ${args.api}", "test": "Unigine Superposition",
"score": score, "test_parameter": f"{args.api}",
"test_preset": args.preset,
"score": SCORE,
"unit": "score" "unit": "score"
} }

View File

@@ -20,8 +20,8 @@ from harness_utils.output import (
) )
from harness_utils.process import terminate_processes from harness_utils.process import terminate_processes
from harness_utils.steam import ( from harness_utils.steam import (
get_registry_active_user, get_registry_active_user,
exec_steam_run_command, exec_steam_run_command,
) )
from harness_utils.misc import press_n_times from harness_utils.misc import press_n_times
@@ -34,19 +34,20 @@ PROCESS_NAME = "tlou"
user.FAILSAFE = False user.FAILSAFE = False
def take_screenshots(am: ArtifactManager) -> None: def take_screenshots(am: ArtifactManager) -> None:
"""Take screenshots of the benchmark settings""" """Take screenshots of the benchmark settings"""
logging.info("Taking screenshots of benchmark settings") logging.info("Taking screenshots of benchmark settings")
press_n_times("s",2,0.2 ) press_n_times("s", 2, 0.2)
user.press("enter") user.press("enter")
press_n_times("s",4,0.2 ) press_n_times("s", 4, 0.2)
user.press("enter") user.press("enter")
am.take_screenshot("video1.png", ArtifactType.CONFIG_IMAGE, "screenshot of video settings1") am.take_screenshot("video1.png", ArtifactType.CONFIG_IMAGE, "screenshot of video settings1")
press_n_times("s",15,0.2) press_n_times("s", 15, 0.2)
am.take_screenshot("video2.png", ArtifactType.CONFIG_IMAGE, "screenshot of video settings2") am.take_screenshot("video2.png", ArtifactType.CONFIG_IMAGE, "screenshot of video settings2")
press_n_times("s",6, 0.2) press_n_times("s", 6, 0.2)
am.take_screenshot("video3.png", ArtifactType.CONFIG_IMAGE, "screenshot of video settings3") am.take_screenshot("video3.png", ArtifactType.CONFIG_IMAGE, "screenshot of video settings3")
user.press("backspace") user.press("backspace")
@@ -69,6 +70,7 @@ def take_screenshots(am: ArtifactManager) -> None:
user.press("backspace") user.press("backspace")
press_n_times("w", 2, 0.2) press_n_times("w", 2, 0.2)
def navigate_main_menu(am: ArtifactManager) -> None: def navigate_main_menu(am: ArtifactManager) -> None:
"""Input to navigate main menu""" """Input to navigate main menu"""
logging.info("Navigating main menu") logging.info("Navigating main menu")
@@ -174,7 +176,7 @@ try:
start_time, end_time = run_benchmark() start_time, end_time = run_benchmark()
steam_id = get_registry_active_user() steam_id = get_registry_active_user()
config_path = os.path.join( config_path = os.path.join(
os.environ["HOMEPATH"], "Saved Games" ,"The Last of Us Part I", os.environ["HOMEPATH"], "Saved Games", "The Last of Us Part I",
"users", str(steam_id), "screeninfo.cfg" "users", str(steam_id), "screeninfo.cfg"
) )
height, width = get_resolution(config_path) height, width = get_resolution(config_path)
@@ -183,8 +185,8 @@ try:
"start_time": seconds_to_milliseconds(start_time), "start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time) "end_time": seconds_to_milliseconds(end_time)
} }
write_report_json(LOG_DIRECTORY, "report.json", report) write_report_json(LOG_DIRECTORY, "report.json", report)
except Exception as e: except Exception as e:
logging.error("Something went wrong running the benchmark!") logging.error("Something went wrong running the benchmark!")
logging.exception(e) logging.exception(e)

View File

@@ -0,0 +1,9 @@
friendly_name: "The Last of Us Part II"
executable: "tlou2.py"
process_name: "tlou-ii.exe"
output_dir: "run"
options:
- name: kerasHost
type: input
- name: kerasPort
type: input

View File

@@ -0,0 +1,286 @@
"""The Last of Us Part I test script"""
import logging
from pathlib import Path
import time
import sys
import pydirectinput as user
import getpass
import winreg # for accessing settings, including resolution, in the registry
import shutil
sys.path.insert(1, str(Path(sys.path[0]).parent))
from harness_utils.keras_service import KerasService
from harness_utils.output import (
format_resolution,
seconds_to_milliseconds,
write_report_json,
setup_logging,
)
from harness_utils.process import terminate_processes
from harness_utils.steam import (
exec_steam_run_command,
)
from harness_utils.artifacts import ArtifactManager, ArtifactType
from harness_utils.misc import (
int_time,
find_word,
press_n_times,
keras_args)
USERNAME = getpass.getuser()
STEAM_GAME_ID = 2531310
SCRIPT_DIRECTORY = Path(__file__).resolve().parent
LOG_DIRECTORY = SCRIPT_DIRECTORY / "run"
PROCESS_NAME = "tlou-ii.exe"
user.FAILSAFE = False
def reset_savedata():
"""
Deletes the savegame folder from the local directory and replaces it with a new one from the network drive.
"""
local_savegame_path = Path(
f"C:\\Users\\{USERNAME}\\Documents\\The Last of Us Part II\\76561199405246658\\savedata") # make this global
network_savegame_path = Path(
r"\\labs.lmg.gg\Labs\03_ProcessingFiles\The Last of Us Part II\savedata")
# Delete the local savedata folder if it exists
if local_savegame_path.exists() and local_savegame_path.is_dir():
shutil.rmtree(local_savegame_path)
logging.info("Deleted local savedata folder: %s", local_savegame_path)
# Copy the savedata folder from the network drive
try:
shutil.copytree(network_savegame_path, local_savegame_path)
logging.info("Copied savedata folder from %s to %s",
network_savegame_path, local_savegame_path)
except Exception as e:
logging.error("Failed to copy savedata folder: %s", e)
# Check if the newly copied directory contains a folder called SAVEFILE0A
def delete_autosave():
"""
Deletes the autosave folder from the local directory if it exists.
"""
local_savegame_path = Path(
f"C:\\Users\\{USERNAME}\\Documents\\The Last of Us Part II\\76561199405246658\\savedata")
savefile_path = local_savegame_path / "SAVEFILE0A" # check for autosaved file, delete if exists
if savefile_path.exists() and savefile_path.is_dir():
shutil.rmtree(savefile_path)
logging.info("Deleted folder: %s", savefile_path)
def get_current_resolution():
"""
Returns:
tuple: (width, height)
Reads resolutions settings from registry
"""
key_path = r"Software\Naughty Dog\The Last of Us Part II\Graphics"
fullscreen_width = read_registry_value(key_path, "FullscreenWidth")
fullscreen_height = read_registry_value(key_path, "FullscreenHeight")
return (fullscreen_width, fullscreen_height)
def read_registry_value(key_path, value_name):
"""
Reads value from registry
A helper function for get_current_resolution
"""
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_path) as key:
value, _ = winreg.QueryValueEx(key, value_name)
return value
except FileNotFoundError:
logging.error("Registry key not found: %s", value_name)
return None
except OSError as e:
logging.error("Error reading registry value: %s", e)
return None
def run_benchmark(keras_service: KerasService) -> tuple:
"""Starts Game, Sets Settings, and Runs Benchmark"""
exec_steam_run_command(STEAM_GAME_ID)
setup_start_time = int_time()
am = ArtifactManager(LOG_DIRECTORY)
if keras_service.wait_for_word(word="sony", timeout=60, interval=0.2) is None:
logging.error("Couldn't find 'sony'")
else:
user.press("escape")
find_word(keras_service, "story", "Couldn't find main menu : 'story'")
press_n_times("down", 2)
# navigate settings
navigate_settings(am, keras_service)
find_word(keras_service, "story", "Couldn't find main menu the second time : 'story'")
press_n_times("up", 2)
user.press("space")
time.sleep(0.3)
user.press("space")
if keras_service.wait_for_word(word="continue", timeout=5, interval=0.2) is None:
user.press("down")
else:
press_n_times("down", 2)
delete_autosave()
time.sleep(0.3)
user.press("space")
time.sleep(0.3)
if keras_service.wait_for_word(word="autosave", timeout=5, interval=0.2) is None:
user.press("space")
else:
user.press("up")
time.sleep(0.3)
user.press("space")
time.sleep(0.3)
user.press("left")
time.sleep(0.3)
user.press("space")
setup_end_time = test_start_time = test_end_time = int_time()
elapsed_setup_time = setup_end_time - setup_start_time
logging.info("Setup took %f seconds", elapsed_setup_time)
# time of benchmark usually is 4:23 = 263 seconds
if keras_service.wait_for_word(word="man", timeout=100, interval=0.2) is not None:
test_start_time = int_time() - 14
time.sleep(240)
else:
logging.error("couldn't find 'man'")
time.sleep(150)
if keras_service.wait_for_word(word="rush", timeout=100, interval=0.2) is not None:
time.sleep(3)
test_end_time = int_time()
else:
logging.error("couldn't find 'rush', marks end of benchmark")
test_end_time = int_time()
elapsed_test_time = test_end_time - test_start_time
logging.info("Test took %f seconds", elapsed_test_time)
terminate_processes(PROCESS_NAME)
am.create_manifest()
return test_start_time, test_end_time
def navigate_settings(am: ArtifactManager, keras: KerasService) -> None:
"""Navigate through settings and take screenshots.
Exits to main menu after taking screenshots.
"""
user.press("space")
find_word(keras, "display", "Couldn't find display")
time.sleep(5) # slow cards may miss the first down
press_n_times("down", 4)
user.press("space")
time.sleep(0.5)
find_word(keras, "resolution", "Couldn't find resolution")
am.take_screenshot("display1.png", ArtifactType.CONFIG_IMAGE, "display settings 1")
user.press("up")
find_word(keras, "brightness", "Couldn't find brightness")
am.take_screenshot("display2.png", ArtifactType.CONFIG_IMAGE, "display settings 2")
user.press("q") # swaps to graphics settings
time.sleep(0.5)
find_word(keras, "preset", "Couldn't find preset")
am.take_screenshot("graphics1.png", ArtifactType.CONFIG_IMAGE, "graphics settings 1")
user.press("up")
find_word(keras, "dirt", "Couldn't find dirt")
am.take_screenshot("graphics3.png", ArtifactType.CONFIG_IMAGE,
"graphics settings 3") # is at the bottom of the menu
press_n_times("up", 13)
find_word(keras, "scattering", "Couldn't find scattering")
am.take_screenshot("graphics2.png", ArtifactType.CONFIG_IMAGE, "graphics settings 2")
press_n_times("escape", 2)
def main():
"""Main function to run the benchmark"""
try:
logging.info("Starting The Last of Us Part II benchmark")
keras_service = KerasService(keras_args().keras_host, keras_args().keras_port)
reset_savedata()
start_time, end_time = run_benchmark(keras_service)
resolution_tuple = get_current_resolution()
report = {
"resolution": format_resolution(resolution_tuple[0], resolution_tuple[1]),
"start_time": seconds_to_milliseconds(start_time), # secconds to miliseconds
"end_time": seconds_to_milliseconds(end_time),
}
write_report_json(LOG_DIRECTORY, "report.json", report)
except Exception as e:
logging.error("An error occurred: %s", e)
logging.exception(e)
terminate_processes(PROCESS_NAME)
sys.exit(1)
if __name__ == "__main__":
setup_logging(LOG_DIRECTORY)
main()

View File

@@ -22,6 +22,8 @@ LOG_DIRECTORY = SCRIPT_DIRECTORY.joinpath("run")
STEAM_GAME_ID = 1286680 STEAM_GAME_ID = 1286680
EXECUTABLE = "Wonderlands.exe" EXECUTABLE = "Wonderlands.exe"
user.FAILSAFE = False
def setup_logging(): def setup_logging():
"""default logging config""" """default logging config"""
setup_log_directory(LOG_DIRECTORY) setup_log_directory(LOG_DIRECTORY)

View File

@@ -16,12 +16,12 @@ def xz_executable_exists() -> bool:
def copy_from_network_drive(): def copy_from_network_drive():
"""Download xz from network drive""" """Download xz from network drive"""
source = r"\\Labs\labs\01_Installers_Utilities\xz\xz_5.6.2_x86_64.exe" source = r"\\labs.lmg.gg\labs\01_Installers_Utilities\xz\xz_5.6.2_x86_64.exe"
root_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = os.path.dirname(os.path.realpath(__file__))
destination = os.path.join(root_dir, XZ_EXECUTABLE) destination = os.path.join(root_dir, XZ_EXECUTABLE)
shutil.copyfile(source, destination) shutil.copyfile(source, destination)
source = r"\\Labs\labs\03_ProcessingFiles\Compression\tq_dlss_explained_1080p.mp4" source = r"\\labs.lmg.gg\labs\03_ProcessingFiles\Compression\tq_dlss_explained_1080p.mp4"
root_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = os.path.dirname(os.path.realpath(__file__))
destination = os.path.join(root_dir, "tq_dlss_explained_1080p.mp4") destination = os.path.join(root_dir, "tq_dlss_explained_1080p.mp4")
shutil.copyfile(source, destination) shutil.copyfile(source, destination)

View File

@@ -12,8 +12,8 @@ sys.path.insert(1, os.path.join(sys.path[0], ".."))
from harness_utils.output import write_report_json, DEFAULT_LOGGING_FORMAT, DEFAULT_DATE_FORMAT from harness_utils.output import write_report_json, DEFAULT_LOGGING_FORMAT, DEFAULT_DATE_FORMAT
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
LOG_DIR = SCRIPT_DIR.joinpath("run") LOG_DIR = SCRIPT_DIR / "run"
EXECUTABLE_PATH = SCRIPT_DIR.joinpath(YCRUNCHER_FOLDER_NAME, "y-cruncher.exe") EXECUTABLE_PATH = SCRIPT_DIR / YCRUNCHER_FOLDER_NAME / "y-cruncher.exe"
def setup_logging(): def setup_logging():
@@ -79,7 +79,7 @@ def main():
report = { report = {
"start_time": start_time, "start_time": start_time,
"version": "v0.8.5.9543", "version": "v0.8.5.9545b",
"end_time": end_time, "end_time": end_time,
"score": avg_score, "score": avg_score,
"unit": "seconds", "unit": "seconds",

View File

@@ -4,11 +4,11 @@ from zipfile import ZipFile
from pathlib import Path from pathlib import Path
import requests import requests
YCRUNCHER_FOLDER_NAME = "y-cruncher v0.8.5.9543" YCRUNCHER_FOLDER_NAME = "y-cruncher v0.8.6.9545"
YCRUNCHER_ZIP_NAME = "y-cruncher.v0.8.5.9543.zip" YCRUNCHER_ZIP_NAME = "y-cruncher.v0.8.6.9545b.zip"
SCRIPT_DIR = Path(__file__).resolve().parent SCRIPT_DIR = Path(__file__).resolve().parent
def ycruncher_folder_exists() -> bool: def ycruncher_folder_exists() -> bool:
"""Check if ycruncher has been downloaded or not""" """Check if ycruncher has been downloaded or not"""
return SCRIPT_DIR.joinpath(YCRUNCHER_FOLDER_NAME).is_dir() return SCRIPT_DIR.joinpath(YCRUNCHER_FOLDER_NAME).is_dir()
@@ -16,7 +16,7 @@ def ycruncher_folder_exists() -> bool:
def download_ycruncher(): def download_ycruncher():
"""Download and extract Y-Cruncher""" """Download and extract Y-Cruncher"""
download_url = "https://github.com/Mysticial/y-cruncher/releases/download/v0.8.5.9543/y-cruncher.v0.8.5.9543.zip" download_url = "https://github.com/Mysticial/y-cruncher/releases/download/v0.8.6.9545/y-cruncher.v0.8.6.9545b.zip"
destination = SCRIPT_DIR / YCRUNCHER_ZIP_NAME destination = SCRIPT_DIR / YCRUNCHER_ZIP_NAME
response = requests.get(download_url, allow_redirects=True, timeout=180) response = requests.get(download_url, allow_redirects=True, timeout=180)
with open(destination, 'wb') as file: with open(destination, 'wb') as file:
@@ -24,7 +24,7 @@ def download_ycruncher():
with ZipFile(destination, 'r') as zip_object: with ZipFile(destination, 'r') as zip_object:
zip_object.extractall(path=SCRIPT_DIR) zip_object.extractall(path=SCRIPT_DIR)
def current_time_ms(): def current_time_ms():
"""Get current timestamp in milliseconds since epoch""" """Get current timestamp in milliseconds since epoch"""
return int(time.time() * 1000) return int(time.time() * 1000)

View File

Before

Width:  |  Height:  |  Size: 165 KiB

After

Width:  |  Height:  |  Size: 165 KiB

View File

@@ -23,8 +23,8 @@ logging.getLogger('').addHandler(console)
executable = os.path.join(INSTALL_DIR, EXECUTABLE) executable = os.path.join(INSTALL_DIR, EXECUTABLE)
report_dest = os.path.join(log_dir, "report.xml") report_dest = os.path.join(log_dir, "report.xml")
argstr = f"/GGBENCH {report_dest}" ARGSTR = f"/GGBENCH {report_dest}"
result = subprocess.run([executable, "/GGBENCH", report_dest], check=False) result = subprocess.run([executable, ARGSTR], check=False)
if result.returncode > 0: if result.returncode > 0:
logging.error("Aida failed with exit code {result.returncode}") logging.error("Aida failed with exit code {result.returncode}")

View File

@@ -0,0 +1 @@
# This is a non-game harness template