mirror of
https://github.com/LTTLabsOSS/markbench-tests.git
synced 2026-01-09 22:18:00 -05:00
Text generation benchmarks
This commit is contained in:
21
procyon_ai_text_generation/README.md
Normal file
21
procyon_ai_text_generation/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# 3DMark
|
||||
|
||||
Runs one of the 3DMark benchmark scenes and reads the Performance Graphics Score result from the output.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.10+
|
||||
- 3DMark Professional Edition installed in default location and activated.
|
||||
- Desired benchmarks are downloaded,.
|
||||
|
||||
## Options
|
||||
|
||||
- `--benchmark` Specifies the benchmark to run.
|
||||
|
||||
## Output
|
||||
|
||||
report.json
|
||||
- `test`: The name of the selected benchmark
|
||||
- `score`: 3DMark gpu score
|
||||
- `start_time`: number representing a timestamp of the test's start time in milliseconds
|
||||
- `end_time`: number representing a timestamp of the test's end time in milliseconds
|
||||
26
procyon_ai_text_generation/config/ai_textgeneration_all.def
Normal file
26
procyon_ai_text_generation/config/ai_textgeneration_all.def
Normal file
@@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<benchmark>
|
||||
<test_info>
|
||||
<benchmark_tests>
|
||||
<benchmark_test name="AITextGenerationBenchmark" test_run_type="EXPLICIT" version="1.0"/>
|
||||
</benchmark_tests>
|
||||
</test_info>
|
||||
<application_info>
|
||||
<selected_workloads>
|
||||
<selected_workload name="AIPhiDefault"/>
|
||||
<selected_workload name="AIMistral7bDefault"/>
|
||||
<selected_workload name="AILlama3Default"/>
|
||||
<selected_workload name="AILlamaDefault"/>
|
||||
</selected_workloads>
|
||||
</application_info>
|
||||
<settings>
|
||||
<setting>
|
||||
<name>ai_engine</name>
|
||||
<value>ort-directml</value> <!-- use ort-directml, openvino -->
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value></value> <!-- GPU Device selection is not available on ONNX runtime and is only supported with OpenVINO engine. Check our CLI for available OpenVINO devices using the list-openvino-devices parameter. Run "ProcyonCmd.exe -h" for details. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
@@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<benchmark>
|
||||
<test_info>
|
||||
<benchmark_tests>
|
||||
<benchmark_test name="AITextGenerationBenchmark" test_run_type="EXPLICIT" version="1.0"/>
|
||||
</benchmark_tests>
|
||||
</test_info>
|
||||
<application_info>
|
||||
<selected_workloads>
|
||||
<selected_workload name="AILlamaDefault"/>
|
||||
</selected_workloads>
|
||||
</application_info>
|
||||
<settings>
|
||||
<setting>
|
||||
<name>ai_engine</name>
|
||||
<value>ort-directml</value> <!-- use ort-directml, openvino -->
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value></value> <!-- GPU Device selection is not available on ONNX runtime and is only supported with OpenVINO engine. Check our CLI for available OpenVINO devices using the list-openvino-devices parameter. Run "ProcyonCmd.exe -h" for details. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
@@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<benchmark>
|
||||
<test_info>
|
||||
<benchmark_tests>
|
||||
<benchmark_test name="AITextGenerationBenchmark" test_run_type="EXPLICIT" version="1.0"/>
|
||||
</benchmark_tests>
|
||||
</test_info>
|
||||
<application_info>
|
||||
<selected_workloads>
|
||||
<selected_workload name="AILlama3Default"/>
|
||||
</selected_workloads>
|
||||
</application_info>
|
||||
<settings>
|
||||
<setting>
|
||||
<name>ai_engine</name>
|
||||
<value>ort-directml</value> <!-- use ort-directml, openvino -->
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value></value> <!-- GPU Device selection is not available on ONNX runtime and is only supported with OpenVINO engine. Check our CLI for available OpenVINO devices using the list-openvino-devices parameter. Run "ProcyonCmd.exe -h" for details. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
@@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<benchmark>
|
||||
<test_info>
|
||||
<benchmark_tests>
|
||||
<benchmark_test name="AITextGenerationBenchmark" test_run_type="EXPLICIT" version="1.0"/>
|
||||
</benchmark_tests>
|
||||
</test_info>
|
||||
<application_info>
|
||||
<selected_workloads>
|
||||
<selected_workload name="AIMistral7bDefault"/>
|
||||
</selected_workloads>
|
||||
</application_info>
|
||||
<settings>
|
||||
<setting>
|
||||
<name>ai_engine</name>
|
||||
<value>ort-directml</value> <!-- use ort-directml, openvino -->
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value></value> <!-- GPU Device selection is not available on ONNX runtime and is only supported with OpenVINO engine. Check our CLI for available OpenVINO devices using the list-openvino-devices parameter. Run "ProcyonCmd.exe -h" for details. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
23
procyon_ai_text_generation/config/ai_textgeneration_phi.def
Normal file
23
procyon_ai_text_generation/config/ai_textgeneration_phi.def
Normal file
@@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<benchmark>
|
||||
<test_info>
|
||||
<benchmark_tests>
|
||||
<benchmark_test name="AITextGenerationBenchmark" test_run_type="EXPLICIT" version="1.0"/>
|
||||
</benchmark_tests>
|
||||
</test_info>
|
||||
<application_info>
|
||||
<selected_workloads>
|
||||
<selected_workload name="AIPhiDefault"/>
|
||||
</selected_workloads>
|
||||
</application_info>
|
||||
<settings>
|
||||
<setting>
|
||||
<name>ai_engine</name>
|
||||
<value>ort-directml</value> <!-- use ort-directml, openvino -->
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value></value> <!-- GPU Device selection is not available on ONNX runtime and is only supported with OpenVINO engine. Check our CLI for available OpenVINO devices using the list-openvino-devices parameter. Run "ProcyonCmd.exe -h" for details. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
15
procyon_ai_text_generation/manifest.yaml
Normal file
15
procyon_ai_text_generation/manifest.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
friendly_name: "Procyon AI Text Generation"
|
||||
executable: "ulprocai_img_gen.py"
|
||||
process_name: "ProcyonCmd.exe"
|
||||
disable_presentmon: true
|
||||
output_dir: "run"
|
||||
options:
|
||||
- name: engine
|
||||
type: select
|
||||
values:
|
||||
- "All_Models"
|
||||
- "Llama_2_13B"
|
||||
- "Llama_3_1_8B"
|
||||
- "Mistral_7B"
|
||||
- "Phi_3_5"
|
||||
tooltip: Select which configuration to run for Procyon
|
||||
149
procyon_ai_text_generation/ulprocai_text_gen.py
Normal file
149
procyon_ai_text_generation/ulprocai_text_gen.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""3DMark test script"""
|
||||
from argparse import ArgumentParser
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import psutil
|
||||
from utils import find_score_in_xml, is_process_running, get_install_path
|
||||
|
||||
PARENT_DIR = str(Path(sys.path[0], ".."))
|
||||
sys.path.append(PARENT_DIR)
|
||||
|
||||
from harness_utils.output import (
|
||||
DEFAULT_DATE_FORMAT,
|
||||
DEFAULT_LOGGING_FORMAT,
|
||||
seconds_to_milliseconds,
|
||||
setup_log_directory,
|
||||
write_report_json
|
||||
)
|
||||
|
||||
#####
|
||||
### Globals
|
||||
#####
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
LOG_DIR = SCRIPT_DIR / "run"
|
||||
DIR_PROCYON = Path(get_install_path())
|
||||
EXECUTABLE = "ProcyonCmd.exe"
|
||||
ABS_EXECUTABLE_PATH = DIR_PROCYON / EXECUTABLE
|
||||
CONFIG_DIR = SCRIPT_DIR / "config"
|
||||
BENCHMARK_CONFIG = {
|
||||
"All_Models": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_all.def\"",
|
||||
"process_name": "Handler.exe",
|
||||
"result_regex": r"<AIImageGenerationOverallScore>(\d+)",
|
||||
"test_name": "All LLM Model Text Generation"
|
||||
},
|
||||
"Llama_2_13B": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama2.def\"",
|
||||
"process_name": "Handler.exe",
|
||||
"result_regex": r"<AiTextGenerationLlama2OverallScore>(\d+)",
|
||||
"test_name": "LLama 2 Text Generation"
|
||||
},
|
||||
"Llama_3_1_8B": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_llama3.1.def\"",
|
||||
"process_name": "Handler.exe",
|
||||
"result_regex": r"<AiTextGenerationLlama3OverallScore>(\d+)",
|
||||
"test_name": "Llama 3.1 Text Generation"
|
||||
},
|
||||
"Mistral_7B": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_mistral.def\"",
|
||||
"process_name": "Handler.exe",
|
||||
"result_regex": r"<AiTextGenerationMistralOverallScore>(\d+)",
|
||||
"test_name": "Mistral Text Generation"
|
||||
},
|
||||
"Phi_3_5": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_textgeneration_phi.def\"",
|
||||
"process_name": "Handler.exe",
|
||||
"result_regex": r"<AiTextGenerationPhiOverallScore>(\d+)",
|
||||
"test_name": "Phi Text Generation"
|
||||
}
|
||||
}
|
||||
RESULTS_FILENAME = "result.xml"
|
||||
REPORT_PATH = LOG_DIR / RESULTS_FILENAME
|
||||
|
||||
def setup_logging():
|
||||
"""setup logging"""
|
||||
setup_log_directory(LOG_DIR)
|
||||
logging.basicConfig(filename=LOG_DIR / "harness.log",
|
||||
format=DEFAULT_LOGGING_FORMAT,
|
||||
datefmt=DEFAULT_DATE_FORMAT,
|
||||
level=logging.DEBUG)
|
||||
console = logging.StreamHandler()
|
||||
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
||||
console.setFormatter(formatter)
|
||||
logging.getLogger('').addHandler(console)
|
||||
|
||||
|
||||
def get_arguments():
|
||||
"""get arguments"""
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--engine", dest="engine", help="Engine test type", required=True, choices=BENCHMARK_CONFIG.keys())
|
||||
argies = parser.parse_args()
|
||||
return argies
|
||||
|
||||
|
||||
def create_procyon_command(test_option):
|
||||
"""create command string"""
|
||||
command = f'\"{ABS_EXECUTABLE_PATH}\" --definition={test_option} --export=\"{REPORT_PATH}\"'
|
||||
command = command.rstrip()
|
||||
return command
|
||||
|
||||
|
||||
def run_benchmark(process_name, command_to_run):
|
||||
"""run the benchmark"""
|
||||
with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) as proc:
|
||||
logging.info("Procyon AI Text Generation benchmark has started.")
|
||||
start_time = time.time()
|
||||
while True:
|
||||
now = time.time()
|
||||
elapsed = now - start_time
|
||||
if elapsed >= 60: #seconds
|
||||
raise ValueError("BenchMark subprocess did not start in time")
|
||||
process = is_process_running(process_name)
|
||||
if process is not None:
|
||||
process.nice(psutil.HIGH_PRIORITY_CLASS)
|
||||
break
|
||||
time.sleep(0.2)
|
||||
_, _ = proc.communicate() # blocks until 3dmark exits
|
||||
return proc
|
||||
|
||||
try:
|
||||
setup_logging()
|
||||
args = get_arguments()
|
||||
option = BENCHMARK_CONFIG[args.engine]["config"]
|
||||
cmd = create_procyon_command(option)
|
||||
logging.info('Starting benchmark!')
|
||||
logging.info(cmd)
|
||||
start_time = time.time()
|
||||
pr = run_benchmark(BENCHMARK_CONFIG[args.engine]["process_name"], cmd)
|
||||
|
||||
if pr.returncode > 0:
|
||||
logging.error("Procyon exited with return code %d", pr.returncode)
|
||||
sys.exit(pr.returncode)
|
||||
|
||||
score = find_score_in_xml(BENCHMARK_CONFIG[args.engine]["result_regex"])
|
||||
if score is None:
|
||||
logging.error("Could not find overall score!")
|
||||
sys.exit(1)
|
||||
|
||||
end_time = time.time()
|
||||
elapsed_test_time = round(end_time - start_time, 2)
|
||||
logging.info("Benchmark took %.2f seconds", elapsed_test_time)
|
||||
logging.info("Score was %s", score)
|
||||
|
||||
report = {
|
||||
"test": BENCHMARK_CONFIG[args.engine]["test_name"],
|
||||
"unit": "score",
|
||||
"score": score,
|
||||
"start_time": seconds_to_milliseconds(start_time),
|
||||
"end_time": seconds_to_milliseconds(end_time)
|
||||
}
|
||||
|
||||
write_report_json(LOG_DIR, "report.json", report)
|
||||
except Exception as e:
|
||||
logging.error("Something went wrong running the benchmark!")
|
||||
logging.exception(e)
|
||||
sys.exit(1)
|
||||
36
procyon_ai_text_generation/utils.py
Normal file
36
procyon_ai_text_generation/utils.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""3dmark test utils"""
|
||||
from pathlib import Path
|
||||
import psutil
|
||||
import xml.etree.ElementTree as ET
|
||||
import winreg
|
||||
import re
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
LOG_DIR = SCRIPT_DIR / "run"
|
||||
|
||||
def is_process_running(process_name):
|
||||
"""check if given process is running"""
|
||||
for process in psutil.process_iter(['pid', 'name']):
|
||||
if process.info['name'] == process_name:
|
||||
return process
|
||||
return None
|
||||
|
||||
def find_score_in_xml(result_regex):
|
||||
"""Reads score from local game log"""
|
||||
score_pattern = re.compile(result_regex)
|
||||
cfg = f"{LOG_DIR}\\result.xml"
|
||||
score_value = 0
|
||||
with open(cfg, encoding="utf-8") as file:
|
||||
lines = file.readlines()
|
||||
for line in lines:
|
||||
score_match = score_pattern.search(line)
|
||||
if score_match is not None:
|
||||
score_value = score_match.group(1)
|
||||
return score_value
|
||||
|
||||
def get_install_path() -> str:
|
||||
"""Gets the path to the Steam installation directory from the SteamPath registry key"""
|
||||
reg_path = r"Software\UL\Procyon"
|
||||
reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path, 0, winreg.KEY_READ)
|
||||
value, _ = winreg.QueryValueEx(reg_key, "InstallDir")
|
||||
return value
|
||||
Reference in New Issue
Block a user