Evolve Benchmark update (#148)

Adds graphical preset option, launches in fullscreen, and fixes work-graph argument typo
This commit is contained in:
snguyen-lmg
2025-07-02 10:00:29 -07:00
committed by GitHub
3 changed files with 32 additions and 13 deletions

View File

@@ -12,6 +12,7 @@ Runs the EVOLVE benchmark scene and reads all the produced scores.
- `--renderer` Specifies the type of renderer to run with
- `--type` Specifies the method for hardware-accelerated ray-tracing or general rendering to use
- `--preset` Specifies the graphic preset for the benchmark
## Output
@@ -57,4 +58,4 @@ report-energy-score.json
- `test`: The name of the selected rendering options and score specification
- `score`: EVOLVE's Energy score
- `start_time`: number representing a timestamp of the test's start time in milliseconds
- `end_time`: number representing a timestamp of the test's end time in milliseconds
- `end_time`: number representing a timestamp of the test's end time in milliseconds

View File

@@ -45,9 +45,9 @@ def setup_logging():
logging.getLogger("").addHandler(console)
TRACE_MODES = ["pipeline", "inline", "workgraph"]
TRACE_MODES = ["pipeline", "inline", "work-graph"]
RENDERERS = ["ray-tracing", "path-tracing"]
PRESETS = ["ultra", "high", "medium"]
def get_scores(results_path):
"""obtain and parse the scores from the evolve run"""
@@ -61,9 +61,9 @@ def get_scores(results_path):
return results
def launch_evolve(renderer, trace_mode):
def launch_evolve(renderer, trace_mode, preset):
"""launch evolve with the given render and trace parameters"""
launch_command = f'"{EXECUTABLE_PATH}" --offline run-custom --renderer {renderer} --mode {trace_mode} --export-scores {RESULTS_FILE}'
launch_command = f'"{EXECUTABLE_PATH}" --offline run-custom --renderer {renderer} --mode {trace_mode} --preset {preset} --fullscreen --export-scores {RESULTS_FILE}'
with subprocess.Popen(
launch_command,
stdout=subprocess.PIPE,
@@ -91,34 +91,46 @@ def launch_evolve(renderer, trace_mode):
def main():
setup_logging()
parser = ArgumentParser()
parser.add_argument(
"-r",
"--renderer",
"-r", "--renderer",
help="Whether to run with the hybrid renderer or path tracer",
required=True,
choices=RENDERERS,
)
parser.add_argument(
"-t", "--trace-mode",
help="Which type of hardware accelerated ray-tracing mode should be used",
required=True, choices=TRACE_MODES,)
required=True,
choices=TRACE_MODES,
)
parser.add_argument(
"--preset",
help="The graphics settings preset to use",
required=True,
choices=PRESETS,
)
args = parser.parse_args()
logging.info(
"Starting Evolve with %s renderer and trace mode %s",
"Starting Evolve with %s renderer and trace mode %s on %s",
args.renderer,
args.trace_mode,
args.preset,
)
start_time = time.time()
launch_evolve(args.renderer, args.trace_mode)
launch_evolve(args.renderer, args.trace_mode, args.preset)
end_time = time.time()
scores = get_scores(RESULTS_FILE)
logging.info("Benchmark took %.2f seconds", end_time - start_time)
report = {
"test": "Evolve Benchmark",
"test_parameter": f"{args.renderer} {args.trace_mode}",
"test": f"Evolve Benchmark",
"test_parameter": f"{args.renderer} {args.trace_mode} {args.preset}",
"start_time": seconds_to_milliseconds(start_time),
"end_time": seconds_to_milliseconds(end_time),
"unit": "Score",

View File

@@ -15,4 +15,10 @@ options:
values:
- "pipeline"
- "inline"
- "workgraph"
- "work-graph"
- name: preset
type: select
values:
- "ultra"
- "high"
- "medium"