Skip to content

Commit

Permalink
[benchmarks] Collect XLA compilation and execution metrics. (#6153)
Browse files Browse the repository at this point in the history
  • Loading branch information
ysiraichi authored Dec 15, 2023
1 parent 2ac4cbc commit 4103eb6
Showing 1 changed file with 15 additions and 0 deletions.
15 changes: 15 additions & 0 deletions benchmarks/experiment_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,9 @@ def timed_run(self, benchmark_experiment, benchmark_model):
self._mark_step(benchmark_experiment)
self._synchronize(benchmark_experiment)

# Clear XLA metrics before executing the model.
met.clear_metrics()

enable_prof = self._args.profile_cuda
metrics = OrderedDict()
t_start = time.perf_counter()
Expand Down Expand Up @@ -363,9 +366,21 @@ def loop(prof=None):
metrics["total_time"] = t_end - t_start
metrics[
"per_iter_time"] = metrics["total_time"] / self._args.iterations_per_run

if benchmark_experiment.xla:
metrics["trace_per_iter_time"] = t_trace / self._args.iterations_per_run

def ns_to_s(ns):
return ns * 1e-9

for m in ("CompileTime", "ExecuteTime"):
data = met.metric_data(m)
data = data if data is not None else (0, 0, [])
number, total_time, _ = data
# Time is measured in nano-seconds
metrics[f"xla_{m}_time_s"] = ns_to_s(total_time)
metrics[f"xla_{m}_number"] = number

if enable_prof:
self.collect_individual_ops(benchmark_experiment, metrics, prof)

Expand Down

0 comments on commit 4103eb6

Please sign in to comment.