mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 19:44:57 -05:00
feat: let us choose file to benchmark
if needed, eg during development of new benchmarks, we can chose the files to benchmark with calls like: - poetry run python script/progress_tracker_utils/measure.py benchmarks -f benchmarks/x_matmul_y.py benchmarks/x_plus_y.py - poetry run python script/progress_tracker_utils/measure.py benchmarks -f benchmarks/x_matmul_y.py and the classical - poetry run python script/progress_tracker_utils/measure.py benchmarks is still usable
This commit is contained in:
committed by
Benoit Chevallier
parent
be453394fb
commit
806d6584e8
@@ -284,17 +284,17 @@ def perform_measurements(path, script, target_id, metrics, samples, result):
|
||||
del result["targets"][target_id]["measurements"]
|
||||
|
||||
|
||||
def main(args):
|
||||
"""Measurement script for the progress tracker"""
|
||||
|
||||
def get_scripts_to_benchmark(args):
|
||||
"""Get the list of files to benchmark"""
|
||||
base = pathlib.Path(args.base)
|
||||
samples = args.samples
|
||||
|
||||
with open(".benchmarks/machine.json", "r", encoding="utf-8") as f:
|
||||
machine = json.load(f)
|
||||
if args.files_to_benchmark is None:
|
||||
scripts = list(base.glob("*.py"))
|
||||
else:
|
||||
scripts = [pathlib.Path(f) for f in args.files_to_benchmark]
|
||||
|
||||
result = {"machine": machine, "metrics": {}, "targets": {}}
|
||||
scripts = list(base.glob("*.py"))
|
||||
print("Will benchmark following files:\n")
|
||||
print(" - " + "\n - ".join(str(s) for s in scripts))
|
||||
|
||||
# Clear the previous temporary scripts directory
|
||||
shutil.rmtree(".benchmarks/scripts", ignore_errors=True)
|
||||
@@ -307,6 +307,21 @@ def main(args):
|
||||
# (e.g., we copy `benchmarks/common.py` to `.benchmarks/scripts/common.py` which allows
|
||||
# the modified `.benchmarks/scripts/x_plus_42.py` to access `common` module`)
|
||||
|
||||
return scripts
|
||||
|
||||
|
||||
def main(args):
|
||||
"""Measurement script for the progress tracker"""
|
||||
|
||||
samples = args.samples
|
||||
|
||||
with open(".benchmarks/machine.json", "r", encoding="utf-8") as f:
|
||||
machine = json.load(f)
|
||||
|
||||
result = {"machine": machine, "metrics": {}, "targets": {}}
|
||||
|
||||
scripts = get_scripts_to_benchmark(args)
|
||||
|
||||
# Process each script under the base directory
|
||||
for path in scripts:
|
||||
# Read the script line by line
|
||||
@@ -396,5 +411,13 @@ if __name__ == "__main__":
|
||||
parser.add_argument("base", type=str, help="directory which contains the benchmarks")
|
||||
parser.add_argument("--samples", type=int, default=30, help="number of samples to take")
|
||||
parser.add_argument("--keep", action="store_true", help="flag to keep measurement scripts")
|
||||
parser.add_argument(
|
||||
"--files_to_benchmark",
|
||||
"-f",
|
||||
nargs="+",
|
||||
type=str,
|
||||
default=None,
|
||||
help="files to benchmark in base directory (with base directory as a prefix)",
|
||||
)
|
||||
|
||||
main(parser.parse_args())
|
||||
|
||||
Reference in New Issue
Block a user