Skip to content

Commit

Permalink
[benchmarks] Increase compilation cache. (pytorch#6509)
Browse files Browse the repository at this point in the history
  • Loading branch information
ysiraichi authored and amithrm committed Mar 1, 2024
1 parent 2472ad5 commit de420de
Showing 1 changed file with 12 additions and 0 deletions.
12 changes: 12 additions & 0 deletions benchmarks/torchbench_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
{
"test": "eval",
"xla": "PJRT",
"dynamo": None,
}, # TIMEOUT
],
"hf_T5_generate": [
Expand All @@ -85,6 +86,7 @@
{
"test": "eval",
"xla": "PJRT",
"dynamo": None,
}, # TIMEOUT
],
"doctr_det_predictor": [{
Expand Down Expand Up @@ -135,6 +137,13 @@
],
}

# Models that had more graphs to be compiled than the actual size of
# the cache.
NEED_LARGER_CACHE = {
"cm3leon_generate",
"hf_T5_generate",
}


class TorchBenchModelLoader(ModelLoader):

Expand Down Expand Up @@ -401,6 +410,9 @@ def update_process_env(self, process_env):
if precision_flag is not None:
process_env[precision_flag] = '1'

if self.model_name in NEED_LARGER_CACHE:
process_env["XLA_COMPILATION_CACHE_SIZE"] = "2048"

def pick_grad(self):
# special case
if self.model_name in ("maml",):
Expand Down

0 comments on commit de420de

Please sign in to comment.