feat: implement performing and publishing benchmarks with a single make target

This commit is contained in:
Umut
2021-09-10 10:16:04 +03:00
parent bd8dca11d5
commit c253219277
10 changed files with 273 additions and 39 deletions

View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Run benchmarks while logging the intermediate results
# Publish findings in the progress tracker
initial_log=logs/$(date -u --iso-8601=seconds).log
mkdir -p logs
make -s benchmark > "$initial_log"
final_log=logs/$(date -u --iso-8601=seconds).log
cat -s "$initial_log" | sed '1d; $d' > "$final_log"
rm "$initial_log"
cp "$final_log" logs/latest.log
if [ -f .env ]
then
# Set the last two environment variables in `.env` for the curl command below
# (https://gist.github.com/mihow/9c7f559807069a03e302605691f85572)
export $(cat .env | tail -n 2 | sed 's/#.*//g' | xargs -d '\n')
fi
curl \
-H 'Authorization: Bearer '"$PROGRESS_TRACKER_TOKEN"'' \
-H 'Content-Type: application/json' \
-d @.benchmarks/findings.json \
-X POST "$PROGRESS_TRACKER_URL"/measurement

View File

@@ -0,0 +1,50 @@
import cpuinfo
import dotenv
import json
import os
import platform
import psutil
import urllib.parse
def main():
dotenv.load_dotenv()
properties = []
cpu_value = cpuinfo.get_cpu_info()["brand_raw"].replace("(R)", "®").replace("(TM)", "")
properties.append(["CPU", cpu_value])
vcpu_value = os.getenv("VCPU")
if vcpu_value is not None:
properties.append(["vCPU", vcpu_value])
ram_value = f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB"
properties.append(["RAM", ram_value])
os_value = os.getenv("OS_NAME")
if os_value is None:
os_value = f"{platform.system()} {platform.release()}"
properties.append(["OS", os_value])
name = os.getenv("MACHINE_NAME").strip()
if name is None:
name = platform.node().strip()
id_ = name.lower()
id_ = id_.replace(" ", "-")
id_ = id_.replace("_", "-")
id_ = id_.replace(".", "-")
id_ = id_.replace("(", "")
id_ = id_.replace(")", "")
id_ = id_.replace("$/h", "-dollars-per-hour")
id_ = id_.strip()
id_ = urllib.parse.quote_plus(id_)
machine = {"id": id_, "name": name, "properties": properties}
with open(".benchmarks/machine.json", "w") as f:
json.dump(machine, f, indent=2, ensure_ascii=False)
if __name__ == "__main__":
main()

View File

@@ -10,9 +10,9 @@ import tqdm
def name_to_id(name):
"""Convert a human readable name to a url friendly id (e.g., `x + y` to `x-plus-y`)"""
name = name.replace("-", "minus")
name = name.replace("**", "-to-the-power-of-")
name = name.replace("+", "plus")
name = name.replace("-", "minus")
name = name.replace("*", "times")
name = name.replace("/", "over")
name = name.replace("%", "percent")
@@ -191,7 +191,12 @@ def perform_measurements(script, script_without_extension, target_id, metrics, s
working = False
pbar.write(f" Failed (exited with {process.returncode})")
pbar.write(f"")
pbar.write(f" --------------------{'-' * len(str(process.returncode))}-")
stderr = process.stderr.decode("utf-8")
for line in stderr.split("\n"):
if line.strip() != "":
pbar.write(f" {line}")
pbar.update(samples)
break
@@ -235,17 +240,18 @@ def main():
parser = argparse.ArgumentParser(description="Measurement script for the progress tracker")
parser.add_argument("base", type=str, help="directory which contains the benchmarks")
parser.add_argument("--output", type=str, help="file which the results will be saved to")
parser.add_argument("--samples", type=int, default=30, help="number of samples to take")
parser.add_argument("--keep", action='store_true', help="flag to keep measurement scripts")
args = parser.parse_args()
base = pathlib.Path(args.base)
output = pathlib.Path(args.output)
samples = args.samples
result = {"metrics": {}, "targets": {}}
with open(".benchmarks/machine.json", "r") as f:
machine = json.load(f)
result = {"machine": machine, "metrics": {}, "targets": {}}
scripts = list(base.glob("*.py"))
# Process each script under the base directory
@@ -303,8 +309,8 @@ def main():
perform_measurements(script, script_without_extension, target_id, metrics, samples, result)
# Dump the latest results to the output file
with open(output, "w") as f:
json.dump(result, f, indent=2)
with open(".benchmarks/findings.json", "w") as f:
json.dump(result, f, indent=2, ensure_ascii=False)
# Delete the modified script if the user doesn't care
if not args.keep: