From f6904df0f5eb5cded7e6b47c5c81aa462f1d7973 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Wed, 29 Nov 2023 11:08:17 -0300 Subject: [PATCH 1/3] Allow openxla for eval. --- benchmarks/benchmark_experiment.py | 6 +----- benchmarks/experiment_runner.py | 6 +++--- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/benchmarks/benchmark_experiment.py b/benchmarks/benchmark_experiment.py index 21679594d2f..6195a44e5f2 100644 --- a/benchmarks/benchmark_experiment.py +++ b/benchmarks/benchmark_experiment.py @@ -85,11 +85,7 @@ def is_available(self, experiment_config): experiment_config["accelerator"] == "cuda" and not experiment_config["xla"]): return False - if experiment_config["dynamo"] == "openxla_eval" and not ( - experiment_config["xla"] and experiment_config["test"] == "eval"): - return False - if experiment_config["dynamo"] == "openxla" and not ( - experiment_config["xla"] and experiment_config["test"] == "train"): + if experiment_config["dynamo"] == "openxla" and not experiment_config["xla"]: return False if (experiment_config["xla"] and not is_xla_device_available(experiment_config["accelerator"].upper())): diff --git a/benchmarks/experiment_runner.py b/benchmarks/experiment_runner.py index 8e5d3b5af30..2be6c622193 100644 --- a/benchmarks/experiment_runner.py +++ b/benchmarks/experiment_runner.py @@ -501,7 +501,7 @@ def parse_args(args=None): "--collect-full-output", action="store_true", help="""Whether to collect full output for training. Set this to true if we - want to verify the numerical correctness of graidents. But that may + want to verify the numerical correctness of gradients. But that may cause time measurement not accurate""", ) @@ -557,13 +557,13 @@ def parse_args(args=None): type=str, default="./output/", help="Directory specifying where to dump profiling information (summary, and trace)", - ), + ) parser.add_argument( "--profile-cuda-cpu-collect", action="store_true", help="Whether to collect CPU/GPU profiling information in the resulting file.", - ), + ) parser.add_argument( "--xla-flags", From 23bb689b8cd92381b7a0f9897c8e14c4ba9c12ad Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Thu, 30 Nov 2023 18:52:09 -0300 Subject: [PATCH 2/3] Update readme. --- benchmarks/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/benchmarks/README.md b/benchmarks/README.md index 51ab754400b..9e168ecefeb 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -32,9 +32,10 @@ python xla/benchmarks/experiment_runner.py \ You can change the flags to add the configurations you are interested in. The `experiment_runner.py` will expand the options to all supported configurations. For example, in the case above, it will consider all the possible combinations -among the flags `--dynamo`, `--xla`, and `--test`, 4 of which are supported: +among the flags `--dynamo`, `--xla`, and `--test`, 5 of which are supported: - `dynamo=openxla_eval`, `xla=PJRT`, `test=eval` + - `dynamo=openxla`, `xla=PJRT`, `test=eval` - `dynamo=openxla`, `xla=PJRT`, `test=train` - `dynamo=inductor`, `xla=None`, `test=eval` - `dynamo=inductor`, `xla=None`, `test=train` From 5a9a8b2d7c534c463dfe538e0eee0e0054137296 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Fri, 1 Dec 2023 12:46:20 -0300 Subject: [PATCH 3/3] Revert `openxla_eval` rule. --- benchmarks/benchmark_experiment.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/benchmarks/benchmark_experiment.py b/benchmarks/benchmark_experiment.py index 6195a44e5f2..0de425bdc3e 100644 --- a/benchmarks/benchmark_experiment.py +++ b/benchmarks/benchmark_experiment.py @@ -85,6 +85,9 @@ def is_available(self, experiment_config): experiment_config["accelerator"] == "cuda" and not experiment_config["xla"]): return False + if experiment_config["dynamo"] == "openxla_eval" and not ( + experiment_config["xla"] and experiment_config["test"] == "eval"): + return False if experiment_config["dynamo"] == "openxla" and not experiment_config["xla"]: return False if (experiment_config["xla"] and