Skip to content

Commit

Permalink
Lint, and add _s suffix to metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
golechwierowicz committed Nov 27, 2023
1 parent 80ca0fc commit ac84ed1
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 18 deletions.
15 changes: 9 additions & 6 deletions benchmarks/experiment_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,9 @@ def dump_profile_info(self, prof, model_name):
def collect_profile_to_metrics(self, prof, metrics):
assert prof is not None, 'Expecting profiler to be defined!'
if not self._args.profile_cuda_cpu_collect:
logger.warning('Profiling enabled, but collection of CPU/CUDA profiling info disabled.')
logger.warning(
'Profiling enabled, but collection of CPU/CUDA profiling info disabled.'
)
return

kernel_dump = prof.profiler.total_average()
Expand All @@ -276,10 +278,12 @@ def collect_profile_to_metrics(self, prof, metrics):

total_cpu_time /= 1000000
total_cuda_time /= 1000000
metrics["total_cpu_time"] = total_cpu_time
metrics["total_cuda_time"] = total_cuda_time
metrics["per_iter_cpu_time"] = total_cpu_time / self._args.iterations_per_run
metrics["per_iter_cuda_time"] = total_cuda_time / self._args.iterations_per_run
metrics["total_cpu_time_s"] = total_cpu_time
metrics["total_cuda_time_s"] = total_cuda_time
metrics[
"per_iter_cpu_time_s"] = total_cpu_time / self._args.iterations_per_run
metrics[
"per_iter_cuda_time_s"] = total_cuda_time / self._args.iterations_per_run

def timed_run(self, benchmark_experiment, benchmark_model):
reset_rng_state(benchmark_experiment)
Expand Down Expand Up @@ -323,7 +327,6 @@ def loop(prof=None):
else:
output = loop()


t_end = time.perf_counter()
if enable_prof:
self.dump_profile_info(prof, benchmark_model.model_name)
Expand Down
28 changes: 16 additions & 12 deletions benchmarks/result_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,18 +88,22 @@ def get_calculated_metrics(self, d, dataline):
d["xla_median_trace_per_iter_time"] = -1
d["xla_compile_time"] = -1

if "total_cpu_time" in dataline["metrics"]:
total_cpu_time = np.asarray(dataline["metrics"]["total_cpu_time"], dtype="float")
d["median_total_cpu_time"] = np.median(total_cpu_time)
if "per_iter_cpu_time" in dataline["metrics"]:
per_iter_cpu_time = np.asarray(dataline["metrics"]["per_iter_cpu_time"], dtype="float")
d["median_per_iter_cpu_time"] = np.median(per_iter_cpu_time)
if "total_cuda_time" in dataline["metrics"]:
total_cuda_time = np.asarray(dataline["metrics"]["total_cuda_time"], dtype="float")
d["median_total_cuda_time"] = np.median(total_cuda_time)
if "per_iter_cuda_time" in dataline["metrics"]:
per_iter_cuda_time = np.asarray(dataline["metrics"]["per_iter_cuda_time"], dtype="float")
d["median_per_iter_cuda_time"] = np.median(per_iter_cuda_time)
if "total_cpu_time_s" in dataline["metrics"]:
total_cpu_time = np.asarray(
dataline["metrics"]["total_cpu_time_s"], dtype="float")
d["median_total_cpu_time_s"] = np.median(total_cpu_time)
if "per_iter_cpu_time_s" in dataline["metrics"]:
per_iter_cpu_time = np.asarray(
dataline["metrics"]["per_iter_cpu_time_s"], dtype="float")
d["median_per_iter_cpu_time_s"] = np.median(per_iter_cpu_time)
if "total_cuda_time_s" in dataline["metrics"]:
total_cuda_time = np.asarray(
dataline["metrics"]["total_cuda_time_s"], dtype="float")
d["median_total_cuda_time_s"] = np.median(total_cuda_time)
if "per_iter_cuda_time_s" in dataline["metrics"]:
per_iter_cuda_time = np.asarray(
dataline["metrics"]["per_iter_cuda_time_s"], dtype="float")
d["median_per_iter_cuda_time_s"] = np.median(per_iter_cuda_time)

if dataline["experiment"]["dynamo"]:
d["dynamo_compile_time"] = np.max(total_time) - np.median(total_time)
Expand Down

0 comments on commit ac84ed1

Please sign in to comment.