diff --git a/evolve/README.md b/evolve/README.md index c627a96..d0b076a 100644 --- a/evolve/README.md +++ b/evolve/README.md @@ -12,6 +12,7 @@ Runs the EVOLVE benchmark scene and reads all the produced scores. - `--renderer` Specifies the type of renderer to run with - `--type` Specifies the method for hardware-accelerated ray-tracing or general rendering to use +- `--preset` Specifies the graphic preset for the benchmark ## Output @@ -57,4 +58,4 @@ report-energy-score.json - `test`: The name of the selected rendering options and score specification - `score`: EVOLVE's Energy score - `start_time`: number representing a timestamp of the test's start time in milliseconds -- `end_time`: number representing a timestamp of the test's end time in milliseconds \ No newline at end of file +- `end_time`: number representing a timestamp of the test's end time in milliseconds diff --git a/evolve/evolve.py b/evolve/evolve.py index e967f9f..6d0e1cf 100644 --- a/evolve/evolve.py +++ b/evolve/evolve.py @@ -45,9 +45,9 @@ def setup_logging(): logging.getLogger("").addHandler(console) -TRACE_MODES = ["pipeline", "inline", "workgraph"] +TRACE_MODES = ["pipeline", "inline", "work-graph"] RENDERERS = ["ray-tracing", "path-tracing"] - +PRESETS = ["ultra", "high", "medium"] def get_scores(results_path): """obtain and parse the scores from the evolve run""" @@ -61,9 +61,9 @@ def get_scores(results_path): return results -def launch_evolve(renderer, trace_mode): +def launch_evolve(renderer, trace_mode, preset): """launch evolve with the given render and trace parameters""" - launch_command = f'"{EXECUTABLE_PATH}" --offline run-custom --renderer {renderer} --mode {trace_mode} --export-scores {RESULTS_FILE}' + launch_command = f'"{EXECUTABLE_PATH}" --offline run-custom --renderer {renderer} --mode {trace_mode} --preset {preset} --fullscreen --export-scores {RESULTS_FILE}' with subprocess.Popen( launch_command, stdout=subprocess.PIPE, @@ -91,34 +91,46 @@ def launch_evolve(renderer, trace_mode): def main(): setup_logging() parser = ArgumentParser() + parser.add_argument( - "-r", - "--renderer", + "-r", "--renderer", help="Whether to run with the hybrid renderer or path tracer", required=True, choices=RENDERERS, ) + parser.add_argument( "-t", "--trace-mode", help="Which type of hardware accelerated ray-tracing mode should be used", - required=True, choices=TRACE_MODES,) + required=True, + choices=TRACE_MODES, + ) + + parser.add_argument( + "--preset", + help="The graphics settings preset to use", + required=True, + choices=PRESETS, + ) + args = parser.parse_args() logging.info( - "Starting Evolve with %s renderer and trace mode %s", + "Starting Evolve with %s renderer and trace mode %s on %s", args.renderer, args.trace_mode, + args.preset, ) start_time = time.time() - launch_evolve(args.renderer, args.trace_mode) + launch_evolve(args.renderer, args.trace_mode, args.preset) end_time = time.time() scores = get_scores(RESULTS_FILE) logging.info("Benchmark took %.2f seconds", end_time - start_time) report = { - "test": "Evolve Benchmark", - "test_parameter": f"{args.renderer} {args.trace_mode}", + "test": f"Evolve Benchmark", + "test_parameter": f"{args.renderer} {args.trace_mode} {args.preset}", "start_time": seconds_to_milliseconds(start_time), "end_time": seconds_to_milliseconds(end_time), "unit": "Score", diff --git a/evolve/manifest.yaml b/evolve/manifest.yaml index 79a599b..938a348 100644 --- a/evolve/manifest.yaml +++ b/evolve/manifest.yaml @@ -15,4 +15,10 @@ options: values: - "pipeline" - "inline" - - "workgraph" \ No newline at end of file + - "work-graph" + - name: preset + type: select + values: + - "ultra" + - "high" + - "medium"