diff --git a/msikombuster/README.md b/msikombuster/README.md index d5a675f..973d65b 100644 --- a/msikombuster/README.md +++ b/msikombuster/README.md @@ -1,42 +1,20 @@ -# FurMark Test Harness +# MSI Kombustor harness +This is a wrapper for the [MSI Kombustor](https://geeks3d.com/furmark/kombustor/) application. + +>MSI Kombustor is MSI's exclusive burn-in benchmarking tool based on the well-known FurMark software. This program is specifically designed to push your graphics card to the limits to test stability and thermal performance. Kombustor supports cutting edge 3D APIs such as OpenGL or Vulkan. + +![Alt text](msi_kombustor.png) + +This harness takes in the **Stess Test** and **Resolution** as command line arguments and then executes. If started in benchmark mode the score will be recorded. If not in Benchmark mode MSI Kombustor will run until manually exited. ## Prerequisites - Python 3.10+ -- MSI Kombuster installed in default path. - -This harness expects that MSI Kombuster has been installed on the system using installer defaults. -> Note: Hopefully it will install itself in the future if not present. - -## Setup - - 1. Follow the setup instructions for the framework. If you have done so, all required python dependencies *should* be installed. - 2. Install MSI Kombuster from `\\10.20.0.27\Users\Linus\Team_Documents\Nikolas\Benchmark_Dependencies\MSI_Kombustor4_Setup_v4.1.16.0_x64.exe` - 1. Follow the installer's defaults. - -## Configuration - -Below is an example use of this harness as a test in a benchmark configuration. - -```yaml -... -... -tests: - - name: msikombuster - executable: "msikombuster.py" - process_name: "MSI-Kombuster-x64.exe" - output_dir: - - 'harness/msikombuster/run' -``` - -__name__ : _(required)_ name of the test. This much match the name of a directory in the harness folder so the framework -can find the executable and any supplementary files. - -__executable__ : _(required)_ the entry point to the test harness. In this case a python script. - -__process_name__ : _(required)_ The process name that should be the target for FPS recording (ex: PresentMon). - -__output_dir__: _(optional)_ Directory containing files to aggregate copies of after a successful test run. If a directory path is -given, the contents are copied. +- MSI Kombustor installed in default location. +## Output +report.json +- `resolution`: resolution used for the run. +- `test`: the test used for the run. +- `score`: score given from a benchmark run. \ No newline at end of file diff --git a/msikombuster/msi_kombustor.png b/msikombuster/msi_kombustor.png new file mode 100644 index 0000000..7211a40 Binary files /dev/null and b/msikombuster/msi_kombustor.png differ diff --git a/msikombuster/msikombuster.py b/msikombuster/msikombuster.py index a2e133a..0967968 100644 --- a/msikombuster/msikombuster.py +++ b/msikombuster/msikombuster.py @@ -1,3 +1,4 @@ +"""Kombustor test script""" from argparse import ArgumentParser from subprocess import Popen import json @@ -22,9 +23,9 @@ flags = [ # The score file is not updated at the end of a benchmark. "-update_score_file_disabled" - # By default the log file is saved in the user’s + # By default the log file is saved in the user’s # temp folder (C:\Users\USER_NAME\AppData\ - # Local\Temp). This option allows to save the log + # Local\Temp). This option allows to save the log # file in Kombustor folder "-logfile_in_app_folder" ] @@ -58,20 +59,6 @@ avail_tests = [ INSTALL_DIR = "C:\Program Files\Geeks3D\MSI Kombustor 4 x64" EXECUTABLE = "MSI-Kombustor-x64.exe" -script_dir = os.path.dirname(os.path.realpath(__file__)) -log_dir = os.path.join(script_dir, "run") -if not os.path.isdir(log_dir): - os.mkdir(log_dir) -logging_format = '%(asctime)s %(levelname)-s %(message)s' -logging.basicConfig(filename=f'{log_dir}/harness.log', - format=logging_format, - datefmt='%m-%d %H:%M', - level=logging.DEBUG) -console = logging.StreamHandler() -formatter = logging.Formatter(logging_format) -console.setFormatter(formatter) -logging.getLogger('').addHandler(console) - parser = ArgumentParser() parser.add_argument("-t", "--test", dest="test", help="kombuster test", metavar="test", required=True) @@ -84,7 +71,21 @@ args = parser.parse_args() if args.test not in avail_tests: raise ValueError(f"Error, unknown test: {args.test}") -match = re.search("^\d+,\d+$", args.resolution) +script_dir = os.path.dirname(os.path.realpath(__file__)) +log_dir = os.path.join(script_dir, "run") +if not os.path.isdir(log_dir): + os.mkdir(log_dir) +LOGGING_FORMAT = '%(asctime)s %(levelname)-s %(message)s' +logging.basicConfig(filename=f'{log_dir}/harness.log', + format=LOGGING_FORMAT, + datefmt='%m-%d %H:%M', + level=logging.DEBUG) +console = logging.StreamHandler() +formatter = logging.Formatter(LOGGING_FORMAT) +console.setFormatter(formatter) +logging.getLogger('').addHandler(console) + +match = re.search(r"^\d+,\d+$", args.resolution) if match is None: raise ValueError("Resolution value must be in format height,width") r = args.resolution.split(",") @@ -93,34 +94,31 @@ w = r[1] cmd = f'{INSTALL_DIR}/{EXECUTABLE}' argstr = f"-width={w} -height={h} -{args.test} -logfile_in_app_folder " -if args.benchmark == "true" : +if args.benchmark == "true": argstr += "-benchmark" print(cmd) print(argstr) process = Popen([cmd, argstr]) -exit_code = process.wait() +EXIT_CODE = process.wait() -score = "N/A" +SCORE = "N/A" # need to find "score => 1212 points" pattern = re.compile(r"score => (\d+)") log_path = os.path.join(INSTALL_DIR, "_kombustor_log.txt") -log = open(log_path) -lines = log.readlines() -for line in reversed(lines): - match = pattern.search(line) - if match: - score = match.group(1) -log.close() +with open(log_path, encoding="utf-8") as log: + lines = log.readlines() + for line in reversed(lines): + match = pattern.search(line) + if match: + score = match.group(1) -result = { +report = { "resolution": f"{w}x{h}", "graphics_preset": "N/A", "test": args.test, "score": score } -f = open(os.path.join(log_dir, "report.json"), "w") -f.write(json.dumps(result)) -f.close() - +with open(os.path.join(log_dir, "report.json"), "w", encoding="utf-8") as f: + f.write(json.dumps(report))