You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
File H:\anaconda3\envs\tensorflow\lib\site-packages\torch_dynamo\eval_frame.py:413, in optimize(backend, nopython, guard_export_fn, guard_fail_fn, disable, dynamic)
380 def optimize(
381 backend="inductor",
382 *,
(...)
387 dynamic=False,
388 ):
389 """
390 The main entrypoint of TorchDynamo. Do graph capture and call
391 backend() to optimize extracted graphs.
(...)
411 ...
412 """
--> 413 check_if_dynamo_supported()
414 # Note: The hooks object could be global instead of passed around, however that would make
415 # for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
416 # There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
417 # compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
418 # easier to understand UX at the cost of a little more plumbing on our end.
419 hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
File H:\anaconda3\envs\tensorflow\lib\site-packages\torch_dynamo\eval_frame.py:375, in check_if_dynamo_supported()
373 def check_if_dynamo_supported():
374 if sys.platform == "win32":
--> 375 raise RuntimeError("Windows not yet supported for torch.compile")
376 if sys.version_info >= (3, 11):
377 raise RuntimeError("Python 3.11+ not yet supported for torch.compile")
RuntimeError: Windows not yet supported for torch.compile
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
my code as follow,
import torch
import torchvision.models as models
model = models.resnet18().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
compiled_model = torch.compile(model)
x = torch.randn(16, 3, 224, 224).cuda()
optimizer.zero_grad()
out = compiled_model(x)
out.sum().backward()
optimizer.step()
Error message as follows:
RuntimeError Traceback (most recent call last)
Cell In[9], line 6
4 model = models.resnet18().cuda()
5 optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
----> 6 compiled_model = torch.compile(model)
8 x = torch.randn(16, 3, 224, 224).cuda()
9 optimizer.zero_grad()
File H:\anaconda3\envs\tensorflow\lib\site-packages\torch_init_.py:1441, in compile(model, fullgraph, dynamic, backend, mode, options, disable)
1439 if backend == "inductor":
1440 backend = _TorchCompileInductorWrapper(mode, options, dynamic)
-> 1441 return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model)
File H:\anaconda3\envs\tensorflow\lib\site-packages\torch_dynamo\eval_frame.py:413, in optimize(backend, nopython, guard_export_fn, guard_fail_fn, disable, dynamic)
380 def optimize(
381 backend="inductor",
382 *,
(...)
387 dynamic=False,
388 ):
389 """
390 The main entrypoint of TorchDynamo. Do graph capture and call
391 backend() to optimize extracted graphs.
(...)
411 ...
412 """
--> 413 check_if_dynamo_supported()
414 # Note: The hooks object could be global instead of passed around, however that would make
415 # for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
416 # There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
417 # compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
418 # easier to understand UX at the cost of a little more plumbing on our end.
419 hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
File H:\anaconda3\envs\tensorflow\lib\site-packages\torch_dynamo\eval_frame.py:375, in check_if_dynamo_supported()
373 def check_if_dynamo_supported():
374 if sys.platform == "win32":
--> 375 raise RuntimeError("Windows not yet supported for torch.compile")
376 if sys.version_info >= (3, 11):
377 raise RuntimeError("Python 3.11+ not yet supported for torch.compile")
RuntimeError: Windows not yet supported for torch.compile
Beta Was this translation helpful? Give feedback.
All reactions