mirror of
https://github.com/LTTLabsOSS/markbench-tests.git
synced 2026-01-09 22:18:00 -05:00
image generation benchmark implementation, requires testing for openvino systems
This commit is contained in:
65
harness_utils/procyoncmd.py
Normal file
65
harness_utils/procyoncmd.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
def get_winml_devices(procyon_path):
|
||||
"""
|
||||
Function which uses the ProcyonCmd.exe to list all available winml devices on the system. Returns a dictionary of device names and IDs
|
||||
"""
|
||||
|
||||
# run the command line utility for procyon in order to list available winml devices
|
||||
winml_devices = subprocess.run([f'{procyon_path}', '--list-winml-devices'], shell=True, capture_output=True, text=True, check=True).stdout
|
||||
|
||||
winml_devices_split = winml_devices.split('\n')
|
||||
winml_devices_parsed = [device[9::] for device in winml_devices_split if re.search(r"(amd|nvidia|intel)", device.lower())]
|
||||
unique_winml_devices = list(dict.fromkeys(winml_devices_parsed))
|
||||
winml_dict = {device_split.split(', ')[0]:device_split.split(', ')[1] for device_split in unique_winml_devices}
|
||||
|
||||
return winml_dict
|
||||
|
||||
def get_openvino_devices(procyon_path):
|
||||
"""
|
||||
Function which uses the ProcyonCmd.exe to list all available openvino devices on the system. Returns a dictionary of device type and name
|
||||
"""
|
||||
|
||||
openvino_devices = subprocess.run([f'{procyon_path}', '--list-openvino-devices'], shell=True, capture_output=True, text=True, check=True).stdout
|
||||
|
||||
openvino_devices_split = openvino_devices.split('\n')
|
||||
openvino_devices_parsed = [device[9::] for device in openvino_devices_split if re.search(r"(amd|nvidia|intel)", device.lower())]
|
||||
unique_openvino_devices = list(dict.fromkeys(openvino_devices_parsed))
|
||||
openvino_dict = {device_split.split(', ')[1]:device_split.split(', ')[0] for device_split in unique_openvino_devices}
|
||||
|
||||
return openvino_dict
|
||||
|
||||
def get_openvino_gpu(openvino_devices, gpu_id):
|
||||
"""
|
||||
function which checks the openvino_devices dictionary for GPU entries.
|
||||
If only one gpu is detected, there should not be a GPU.0 and GPU.1 entry
|
||||
so we just return the first detected gpu regardless of requested ID
|
||||
"""
|
||||
|
||||
gpu = openvino_devices.get("GPU", None)
|
||||
|
||||
if gpu is not None:
|
||||
return gpu
|
||||
else:
|
||||
gpu = openvino_devices.get(gpu_id, "No Openvino GPU Detected")
|
||||
|
||||
return gpu
|
||||
|
||||
def get_cuda_devices(procyon_path):
|
||||
"""
|
||||
Function which uses the ProcyonCmd.exe to list all available openvino devices on the system. Returns a dictionary of device type and name
|
||||
"""
|
||||
|
||||
cuda_devices = subprocess.run([f'{procyon_path}', '--list-cuda-devices'], shell=True, capture_output=True, text=True, check=True).stdout
|
||||
|
||||
cuda_devices_split = cuda_devices.split('\n')
|
||||
cuda_devices_parsed = [device[9::] for device in cuda_devices_split if re.search(r"(nvidia)", device.lower())]
|
||||
unique_cuda_devices = list(dict.fromkeys(cuda_devices_parsed))
|
||||
|
||||
if len(unique_cuda_devices) > 0:
|
||||
cuda_dict = {device_split.split(', ')[1]:device_split.split(', ')[0] for device_split in unique_cuda_devices}
|
||||
else:
|
||||
cuda_dict = {}
|
||||
|
||||
return cuda_dict
|
||||
@@ -10,10 +10,6 @@ from utils import (
|
||||
find_score_in_xml,
|
||||
is_process_running,
|
||||
get_install_path,
|
||||
get_winml_devices,
|
||||
get_openvino_devices,
|
||||
get_openvino_gpu,
|
||||
get_cuda_devices
|
||||
)
|
||||
|
||||
PARENT_DIR = str(Path(sys.path[0], ".."))
|
||||
@@ -24,9 +20,14 @@ from harness_utils.output import (
|
||||
DEFAULT_LOGGING_FORMAT,
|
||||
seconds_to_milliseconds,
|
||||
setup_log_directory,
|
||||
write_report_json
|
||||
write_report_json,
|
||||
)
|
||||
from harness_utils.procyoncmd import (
|
||||
get_winml_devices,
|
||||
get_openvino_devices,
|
||||
get_openvino_gpu,
|
||||
get_cuda_devices,
|
||||
)
|
||||
|
||||
#####
|
||||
### Globals
|
||||
#####
|
||||
|
||||
@@ -35,66 +35,3 @@ def get_install_path() -> str:
|
||||
reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path, 0, winreg.KEY_READ)
|
||||
value, _ = winreg.QueryValueEx(reg_key, "InstallDir")
|
||||
return value
|
||||
|
||||
def get_winml_devices(procyon_path):
|
||||
"""
|
||||
Function which uses the ProcyonCmd.exe to list all available winml devices on the system. Returns a dictionary of device names and IDs
|
||||
"""
|
||||
|
||||
# run the command line utility for procyon in order to list available winml devices
|
||||
winml_devices = subprocess.run([f'{procyon_path}', '--list-winml-devices'], shell=True, capture_output=True, text=True, check=True).stdout
|
||||
|
||||
winml_devices_split = winml_devices.split('\n')
|
||||
winml_devices_parsed = [device[9::] for device in winml_devices_split if re.search(r"(amd|nvidia|intel)", device.lower())]
|
||||
unique_winml_devices = list(dict.fromkeys(winml_devices_parsed))
|
||||
winml_dict = {device_split.split(', ')[0]:device_split.split(', ')[1] for device_split in unique_winml_devices}
|
||||
|
||||
return winml_dict
|
||||
|
||||
def get_openvino_devices(procyon_path):
|
||||
"""
|
||||
Function which uses the ProcyonCmd.exe to list all available openvino devices on the system. Returns a dictionary of device type and name
|
||||
"""
|
||||
|
||||
openvino_devices = subprocess.run([f'{procyon_path}', '--list-openvino-devices'], shell=True, capture_output=True, text=True, check=True).stdout
|
||||
|
||||
openvino_devices_split = openvino_devices.split('\n')
|
||||
openvino_devices_parsed = [device[9::] for device in openvino_devices_split if re.search(r"(amd|nvidia|intel)", device.lower())]
|
||||
unique_openvino_devices = list(dict.fromkeys(openvino_devices_parsed))
|
||||
openvino_dict = {device_split.split(', ')[1]:device_split.split(', ')[0] for device_split in unique_openvino_devices}
|
||||
|
||||
return openvino_dict
|
||||
|
||||
def get_openvino_gpu(openvino_devices, gpu_id):
|
||||
"""
|
||||
function which checks the openvino_devices dictionary for GPU entries.
|
||||
If only one gpu is detected, there should not be a GPU.0 and GPU.1 entry
|
||||
so we just return the first detected gpu regardless of requested ID
|
||||
"""
|
||||
|
||||
gpu = openvino_devices.get("GPU", None)
|
||||
|
||||
if gpu is not None:
|
||||
return gpu
|
||||
else:
|
||||
gpu = openvino_devices.get(gpu_id, "No Openvino GPU Detected")
|
||||
|
||||
return gpu
|
||||
|
||||
def get_cuda_devices(procyon_path):
|
||||
"""
|
||||
Function which uses the ProcyonCmd.exe to list all available openvino devices on the system. Returns a dictionary of device type and name
|
||||
"""
|
||||
|
||||
cuda_devices = subprocess.run([f'{procyon_path}', '--list-cuda-devices'], shell=True, capture_output=True, text=True, check=True).stdout
|
||||
|
||||
cuda_devices_split = cuda_devices.split('\n')
|
||||
cuda_devices_parsed = [device[9::] for device in cuda_devices_split if re.search(r"(nvidia)", device.lower())]
|
||||
unique_cuda_devices = list(dict.fromkeys(cuda_devices_parsed))
|
||||
|
||||
if len(unique_cuda_devices) > 0:
|
||||
cuda_dict = {device_split.split(', ')[1]:device_split.split(', ')[0] for device_split in unique_cuda_devices}
|
||||
else:
|
||||
cuda_dict = {}
|
||||
|
||||
return cuda_dict
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value>0</value> <!-- Use "0", "1", etc. Use our CLI e.g. "ProcyonCmd.exe list-directml-devices" to full options. Check "ProcyonCmd.exe -h" for the parameter syntax. -->
|
||||
<value></value> <!-- Use "0", "1", etc. Use our CLI e.g. "ProcyonCmd.exe list-directml-devices" to full options. Check "ProcyonCmd.exe -h" for the parameter syntax. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
@@ -17,7 +17,7 @@
|
||||
</setting>
|
||||
<setting>
|
||||
<name>ai_device_id</name>
|
||||
<value>0</value> <!-- Use "0", "1", etc. Use our CLI e.g. "ProcyonCmd.exe list-directml-devices" to full options. Check "ProcyonCmd.exe -h" for the parameter syntax. -->
|
||||
<value></value> <!-- Use "0", "1", etc. Use our CLI e.g. "ProcyonCmd.exe list-directml-devices" to full options. Check "ProcyonCmd.exe -h" for the parameter syntax. -->
|
||||
</setting>
|
||||
</settings>
|
||||
</benchmark>
|
||||
@@ -11,6 +11,12 @@ from utils import find_score_in_xml, is_process_running, get_install_path
|
||||
PARENT_DIR = str(Path(sys.path[0], ".."))
|
||||
sys.path.append(PARENT_DIR)
|
||||
|
||||
from harness_utils.procyoncmd import (
|
||||
get_winml_devices,
|
||||
get_openvino_devices,
|
||||
get_openvino_gpu,
|
||||
get_cuda_devices
|
||||
)
|
||||
from harness_utils.output import (
|
||||
DEFAULT_DATE_FORMAT,
|
||||
DEFAULT_LOGGING_FORMAT,
|
||||
@@ -27,46 +33,101 @@ LOG_DIR = SCRIPT_DIR / "run"
|
||||
DIR_PROCYON = Path(get_install_path())
|
||||
EXECUTABLE = "ProcyonCmd.exe"
|
||||
ABS_EXECUTABLE_PATH = DIR_PROCYON / EXECUTABLE
|
||||
WINML_DEVICES = get_winml_devices(ABS_EXECUTABLE_PATH)
|
||||
OPENVINO_DEVICES = get_openvino_devices(ABS_EXECUTABLE_PATH)
|
||||
CUDA_DEVICES = get_cuda_devices(ABS_EXECUTABLE_PATH)
|
||||
|
||||
CONFIG_DIR = SCRIPT_DIR / "config"
|
||||
BENCHMARK_CONFIG = {
|
||||
"AMD_GPU_FP16": {
|
||||
"AMD_GPU0_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_onnxruntime.def\"",
|
||||
"process_name": "ort-directml.exe",
|
||||
"device_name": list(WINML_DEVICES.keys())[0],
|
||||
"device_id": "0",
|
||||
"test_name": "ONNX Stable Diffusion FP16"
|
||||
},
|
||||
"AMD_GPU_XL_FP16": {
|
||||
"AMD_GPU1_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_onnxruntime.def\"",
|
||||
"process_name": "ort-directml.exe",
|
||||
"device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0],
|
||||
"device_id": "1" if len(list(WINML_DEVICES.values())) > 1 else "0",
|
||||
"test_name": "ONNX Stable Diffusion FP16"
|
||||
},
|
||||
"AMD_GPU0_XL_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_onnxruntime.def\"",
|
||||
"process_name": "ort-directml.exe",
|
||||
"device_name": list(WINML_DEVICES.keys())[0],
|
||||
"device_id": "0",
|
||||
"test_name": "ONNX Stable Diffusion FP16 XL"
|
||||
},
|
||||
"Intel_GPU_INT8": {
|
||||
"AMD_GPU1_XL_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_onnxruntime.def\"",
|
||||
"process_name": "ort-directml.exe",
|
||||
"device_name": list(WINML_DEVICES.keys())[1] if len(list(WINML_DEVICES.keys())) > 1 else list(WINML_DEVICES.keys())[0],
|
||||
"device_id": list(WINML_DEVICES.values())[1] if len(list(WINML_DEVICES.values())) > 1 else list(WINML_DEVICES.values())[0],
|
||||
"test_name": "ONNX Stable Diffusion FP16 XL"
|
||||
},
|
||||
"Intel_GPU0_INT8": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_openvino.def\"",
|
||||
"process_name": "openvino.exe",
|
||||
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
|
||||
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"),
|
||||
"test_name": "Intel OpenVINO Stable Diffusion INT8"
|
||||
},
|
||||
"Intel_GPU_FP16": {
|
||||
"Intel_GPU0_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_openvino.def\"",
|
||||
"process_name": "openvino.exe",
|
||||
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
|
||||
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"),
|
||||
"test_name": "Intel OpenVINO Stable Diffusion FP16"
|
||||
},
|
||||
"Intel_GPU_XL_FP16": {
|
||||
"Intel_GPU0_XL_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_openvino.def\"",
|
||||
"process_name": "openvino.exe",
|
||||
"device_id": "GPU.0" if "GPU.0" in list(OPENVINO_DEVICES.keys()) else "GPU",
|
||||
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.0"),
|
||||
"test_name": "Intel OpenVINO Stable Diffusion FP16 XL"
|
||||
},
|
||||
"Intel_GPU1_INT8": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_openvino.def\"",
|
||||
"process_name": "openvino.exe",
|
||||
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
|
||||
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.1"),
|
||||
"test_name": "Intel OpenVINO Stable Diffusion INT8"
|
||||
},
|
||||
"Intel_GPU1_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_openvino.def\"",
|
||||
"process_name": "openvino.exe",
|
||||
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
|
||||
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.1"),
|
||||
"test_name": "Intel OpenVINO Stable Diffusion FP16"
|
||||
},
|
||||
"Intel_GPU1_XL_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_openvino.def\"",
|
||||
"process_name": "openvino.exe",
|
||||
"device_id": "GPU.1" if "GPU.1" in list(OPENVINO_DEVICES.keys()) else "GPU",
|
||||
"device_name": get_openvino_gpu(OPENVINO_DEVICES ,"GPU.1"),
|
||||
"test_name": "Intel OpenVINO Stable Diffusion FP16 XL"
|
||||
},
|
||||
"NVIDIA_GPU_INT8": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15int8_tensorrt.def\"",
|
||||
"process_name": "tensorrt.exe",
|
||||
"device_id": "cuda:0",
|
||||
"device_name": CUDA_DEVICES.get("cuda:0"),
|
||||
"test_name": "NVIDIA TensorRT Stable Diffusion INT8"
|
||||
},
|
||||
"NVIDIA_GPU_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sd15fp16_tensorrt.def\"",
|
||||
"process_name": "tensorrt.exe",
|
||||
"device_id": "cuda:0",
|
||||
"device_name": CUDA_DEVICES.get("cuda:0"),
|
||||
"test_name": "NVIDIA TensorRT Stable Diffusion FP16"
|
||||
},
|
||||
"NVIDIA_GPU_XL_FP16": {
|
||||
"config": f"\"{CONFIG_DIR}\\ai_imagegeneration_sdxlfp16_tensorrt.def\"",
|
||||
"process_name": "tensorrt.exe",
|
||||
"device_id": "cuda:0",
|
||||
"device_name": CUDA_DEVICES.get("cuda:0"),
|
||||
"test_name": "NVIDIA TensorRT Stable Diffusion FP16 XL"
|
||||
}
|
||||
}
|
||||
@@ -95,10 +156,19 @@ def get_arguments():
|
||||
return argies
|
||||
|
||||
|
||||
def create_procyon_command(test_option):
|
||||
def create_procyon_command(test_option, process_name, device_id):
|
||||
"""create command string"""
|
||||
command = f'\"{ABS_EXECUTABLE_PATH}\" --definition={test_option} --export=\"{REPORT_PATH}\"'
|
||||
|
||||
match process_name:
|
||||
case 'ort-directml.exe':
|
||||
command = f'\"{ABS_EXECUTABLE_PATH}\" --definition={test_option} --export=\"{REPORT_PATH}\" --select-winml-device {device_id}'
|
||||
case 'openvino.exe':
|
||||
command = f'\"{ABS_EXECUTABLE_PATH}\" --definition={test_option} --export=\"{REPORT_PATH}\" --select-openvino-device {device_id}'
|
||||
case 'tensorrt.exe':
|
||||
command = f'\"{ABS_EXECUTABLE_PATH}\" --definition={test_option} --export=\"{REPORT_PATH}\" --select-cuda-device {device_id}'
|
||||
command = command.rstrip()
|
||||
|
||||
return command
|
||||
|
||||
|
||||
@@ -122,9 +192,13 @@ def run_benchmark(process_name, command_to_run):
|
||||
|
||||
try:
|
||||
setup_logging()
|
||||
logging.info(f"Detected Windows ML Devices: {WINML_DEVICES}")
|
||||
logging.info(f"Detected OpenVino Devices: {OPENVINO_DEVICES}")
|
||||
logging.info(f"Detected CUDA Devices: {CUDA_DEVICES}")
|
||||
|
||||
args = get_arguments()
|
||||
option = BENCHMARK_CONFIG[args.engine]["config"]
|
||||
cmd = create_procyon_command(option)
|
||||
cmd = create_procyon_command(option, BENCHMARK_CONFIG[args.engine]["process_name"], BENCHMARK_CONFIG[args.engine]["device_id"])
|
||||
logging.info('Starting benchmark!')
|
||||
logging.info(cmd)
|
||||
start_time = time.time()
|
||||
@@ -146,6 +220,7 @@ try:
|
||||
|
||||
report = {
|
||||
"test": BENCHMARK_CONFIG[args.engine]["test_name"],
|
||||
"device_name": BENCHMARK_CONFIG[args.engine]["device_name"],
|
||||
"unit": "score",
|
||||
"score": score,
|
||||
"start_time": seconds_to_milliseconds(start_time),
|
||||
|
||||
Reference in New Issue
Block a user