Skip to content

Commit

Permalink
Revert "Re-land: Fix model initialization. (#6182)" (#6220)
Browse files Browse the repository at this point in the history
This reverts commit 38e3644 i.e. #6182. It is causing regressions for Inductor on inference -- see #6182 (comment)
  • Loading branch information
cota authored and LukeBoyer committed Dec 25, 2023
1 parent 3a5b6b4 commit 10bd7d8
Showing 1 changed file with 7 additions and 18 deletions.
25 changes: 7 additions & 18 deletions benchmarks/torchbench_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import sys
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
from torch._dynamo.testing import collect_results, reduce_to_scalar_loss
from torch._dynamo.utils import clone_inputs
import types
Expand Down Expand Up @@ -158,6 +157,7 @@ def is_compatible(self, dummy_benchmark_model, benchmark_experiment):
break
if matched:
return False

return True


Expand Down Expand Up @@ -207,23 +207,12 @@ def load_benchmark(self):
# workaround "RuntimeError: not allowed to set torch.backends.cudnn flags"
# torch.backends.__allow_nonbracketed_mutation_flag = True

benchmark = benchmark_cls(
test=self.benchmark_experiment.test,
device=self.benchmark_experiment.accelerator,
batch_size=self.benchmark_experiment.batch_size,
)

self.module, self.example_inputs = benchmark.get_module()

# Move the initialized model to XLA device.
device = self.benchmark_experiment.get_device()
if self.benchmark_experiment.xla:
self.module = self.module.to(device)
self.example_inputs = pytree.tree_map_only(torch.Tensor,
lambda t: t.to(device),
self.example_inputs)

self.benchmark_experiment.batch_size = benchmark.batch_size
if self.benchmark_experiment.accelerator == "cpu":
device = "cpu"
elif self.benchmark_experiment.accelerator == "cuda" and not self.benchmark_experiment.xla:
device = "cuda"
else:
device = str(self.benchmark_experiment.get_device())

return benchmark_cls(
test=self.benchmark_experiment.test,
Expand Down

0 comments on commit 10bd7d8

Please sign in to comment.