mirror of
https://github.com/ROCm/ROCm.git
synced 2026-04-05 03:01:17 -04:00
Reformat Python code with yapf. (#2589)
I've add an option to yapf to do what we want for long lines, see https://github.com/google/yapf/pull/1177. We can now have a real Python formatter, yay! To make this PR, I ran my modified yapf over the repository, then looked over the full diff. Where yapf was mangling the param list of long function decls/calls (mostly kernels), I manually added `#` to put linebreaks where we want. I fixed up other formatting too -- mostly adding or removing a trailing comma from lists. Overall, trailing `#` was sufficient to get formatting similar to our current code. I didn't have to disable yapf anywhere. --------- Co-authored-by: Phil Tillet <phil@openai.com>
This commit is contained in:
@@ -3,8 +3,7 @@ import csv
|
||||
from collections import namedtuple
|
||||
|
||||
# Create a named tuple for the output of the benchmark
|
||||
BenchmarkOutput = namedtuple(
|
||||
'BenchmarkOutput', ['dev', 'name', 'batch_size', 'speedup', 'latency'])
|
||||
BenchmarkOutput = namedtuple('BenchmarkOutput', ['dev', 'name', 'batch_size', 'speedup', 'latency'])
|
||||
|
||||
|
||||
def parse_output(file_path: str) -> dict:
|
||||
@@ -19,13 +18,11 @@ def parse_output(file_path: str) -> dict:
|
||||
batch_size = row[2]
|
||||
speedup = float(row[3])
|
||||
latency = float(row[4])
|
||||
entries[name] = BenchmarkOutput(
|
||||
dev, name, batch_size, speedup, latency)
|
||||
entries[name] = BenchmarkOutput(dev, name, batch_size, speedup, latency)
|
||||
return entries
|
||||
|
||||
|
||||
def compare(baseline: dict, new: dict, threshold: float,
|
||||
geomean_threshold: float) -> bool:
|
||||
def compare(baseline: dict, new: dict, threshold: float, geomean_threshold: float) -> bool:
|
||||
baseline_geomean = 1.0
|
||||
new_geomean = 1.0
|
||||
for key in new:
|
||||
@@ -41,19 +38,16 @@ def compare(baseline: dict, new: dict, threshold: float,
|
||||
continue
|
||||
|
||||
if new_latency < baseline_latency * (1 - threshold):
|
||||
print(
|
||||
f"New benchmark {key} is faster than baseline: {new_latency} vs {baseline_latency}")
|
||||
print(f"New benchmark {key} is faster than baseline: {new_latency} vs {baseline_latency}")
|
||||
elif new_latency > baseline_latency * (1 + threshold):
|
||||
print(
|
||||
f"New benchmark {key} is slower than baseline: {new_latency} vs {baseline_latency}")
|
||||
print(f"New benchmark {key} is slower than baseline: {new_latency} vs {baseline_latency}")
|
||||
else:
|
||||
print(
|
||||
f"New benchmark {key} is within threshold: {new_latency} vs {baseline_latency}")
|
||||
print(f"New benchmark {key} is within threshold: {new_latency} vs {baseline_latency}")
|
||||
baseline_geomean *= baseline[key].speedup
|
||||
new_geomean *= new[key].speedup
|
||||
|
||||
baseline_geomean = baseline_geomean ** (1 / len(baseline))
|
||||
new_geomean = new_geomean ** (1 / len(new))
|
||||
baseline_geomean = baseline_geomean**(1 / len(baseline))
|
||||
new_geomean = new_geomean**(1 / len(new))
|
||||
print(f"Baseline geomean: {baseline_geomean}")
|
||||
print(f"New geomean: {new_geomean}")
|
||||
assert new_geomean >= baseline_geomean * (1 - geomean_threshold), \
|
||||
|
||||
Reference in New Issue
Block a user