You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
(fish-speech) E:\temp_python_ai__face_wav_singer_fish-speech.git>python -m tools.api --listen 0.0.0.0:8080 --llama-checkpoint-path "checkpoints/fish-speech-1.4" --decoder-checkpoint-path "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" --decoder-config-name firefly_gan_vq --compile
2024-09-13 14:37:15.164 | INFO | main::403 - Loading Llama model...
2024-09-13 14:37:19.134 | INFO | tools.llama.generate:load_model:350 - Restored model from checkpoint
2024-09-13 14:37:19.135 | INFO | tools.llama.generate:load_model:354 - Using DualARTransformer
2024-09-13 14:37:19.135 | INFO | tools.llama.generate:load_model:360 - Compiling function...
2024-09-13 14:37:19.167 | INFO | main::410 - Llama model loaded, loading VQ-GAN model...
2024-09-13 14:37:20.744 | INFO | tools.vqgan.inference:load_model:44 - Loaded model:
2024-09-13 14:37:20.744 | INFO | main::418 - VQ-GAN model loaded, warming up...
2024-09-13 14:37:20.774 | INFO | tools.llama.generate:generate_long:438 - Encoded text: Hello world.
2024-09-13 14:37:20.775 | INFO | tools.llama.generate:generate_long:456 - Generating sentence 1/1 of sample 1/1
E:\temp_python_ai_face_wav_singer_fish-speech.git\fish_speech\models\text2semantic\llama.py:682: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at ..\aten\src\ATen\native\transformers\cuda\sdp_utils.cpp:455.)
y = F.scaled_dot_product_attention(
0%| | 0/1023 [00:00<?, ?it/s]E:\anaconda3\envs\fish-speech\lib\site-packages\torch\backends\cuda_init.py:342: FutureWarning: torch.backends.cuda.sdp_kernel() is deprecated. In the future, this context manager will be removed. Please see, torch.nn.attention.sdpa_kernel() for the new context manager, with updated signature.
warnings.warn(
0%| | 0/1023 [00:37<?, ?it/s]
Traceback (most recent call last):
File "E:\anaconda3\envs\fish-speech\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "E:\anaconda3\envs\fish-speech\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\api.py", line 421, in
list(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
response = gen.send(None)
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\api.py", line 243, in inference
raise result.response
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 581, in worker
for chunk in generate_long(
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 490, in generate_long
y = generate(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 264, in generate
x = decode_n_tokens(
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 202, in decode_n_tokens
next_token = decode_one_token(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\eval_frame.py", line 451, in _fn
return fn(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 921, in catch_errors
return callback(frame, cache_entry, hooks, frame_state, skip=1)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 400, in _convert_frame_assert
return compile(
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 676, in compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 535, in compile_inner
out_code = transform_code_object(code, transform)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\bytecode_transformation.py", line 1036, in transform_code_object
transformations(instructions, code_options)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 165, in fn
return fn(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 500, in transform
tracer.run()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 2149, in run
super().run()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 810, in run
and self.step()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 773, in step
getattr(self, inst.opname)(inst)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 2268, in RETURN_VALUE
self.output.compile_subgraph(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 981, in compile_subgraph
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 1178, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 1251, in call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 1232, in call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\repro\after_dynamo.py", line 117, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\repro\after_dynamo.py", line 117, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_init.py", line 1731, in call
return compile_fx(model, inputs, config_patches=self.config)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 1102, in compile_fx
return compile_fx(
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 1330, in compile_fx
return aot_autograd(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\backends\common.py", line 58, in compiler_fn
cg = aot_module_simplified(gm, example_inputs, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch\aot_autograd.py", line 903, in aot_module_simplified
compiled_fn = create_aot_dispatcher_function(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch\aot_autograd.py", line 628, in create_aot_dispatcher_function
compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch_aot_autograd\runtime_wrappers.py", line 443, in aot_wrapper_dedupe
return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch_aot_autograd\runtime_wrappers.py", line 648, in aot_wrapper_synthetic_base
return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch_aot_autograd\jit_compile_runtime_wrappers.py", line 119, in aot_dispatch_base
compiled_fw = compiler(fw_module, updated_flat_args)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 1257, in fw_compiler_base
return inner_compile(
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\repro\after_aot.py", line 83, in debug_wrapper
inner_compiled_fn = compiler_fn(gm, example_inputs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\debug.py", line 304, in inner
return fn(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 434, in compile_fx_inner
compiled_graph = FxGraphCache.load(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codecache.py", line 829, in load
compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 714, in fx_codegen_and_compile
compiled_fn = graph.compile_to_fn()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\graph.py", line 1307, in compile_to_fn
return self.compile_to_module().call
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\graph.py", line 1250, in compile_to_module
self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\graph.py", line 1208, in codegen
self.scheduler.codegen()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\scheduler.py", line 2339, in codegen
self.get_backend(device).codegen_nodes(node.get_nodes()) # type: ignore[possibly-undefined]
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\cuda_combined_scheduling.py", line 63, in codegen_nodes
return self._triton_scheduling.codegen_nodes(nodes)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\triton.py", line 3255, in codegen_nodes
return self.codegen_node_schedule(node_schedule, buf_accesses, numel, rnumel)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\triton.py", line 3425, in codegen_node_schedule
src_code = kernel.codegen_kernel()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\triton.py", line 2753, in codegen_kernel
"backend_hash": torch.utils._triton.triton_hash_with_backend(),
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_triton.py", line 101, in triton_hash_with_backend
backend_hash = triton_backend_hash()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_triton.py", line 37, in triton_backend_hash
from triton.common.backend import get_backend, get_cuda_version_key
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
ModuleNotFoundError: No module named 'triton.common'
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
The text was updated successfully, but these errors were encountered:
Self Checks
Cloud or Self Hosted
Self Hosted (Source)
Steps to reproduce
python -m tools.api --listen 0.0.0.0:8080 --llama-checkpoint-path "checkpoints/fish-speech-1.4" --decoder-checkpoint-path "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" --decoder-config-name firefly_gan_vq --compile
✔️ Expected Behavior
No response
❌ Actual Behavior
(fish-speech) E:\temp_python_ai__face_wav_singer_fish-speech.git>python -m tools.api --listen 0.0.0.0:8080 --llama-checkpoint-path "checkpoints/fish-speech-1.4" --decoder-checkpoint-path "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" --decoder-config-name firefly_gan_vq --compile
2024-09-13 14:37:15.164 | INFO | main::403 - Loading Llama model...
2024-09-13 14:37:19.134 | INFO | tools.llama.generate:load_model:350 - Restored model from checkpoint
2024-09-13 14:37:19.135 | INFO | tools.llama.generate:load_model:354 - Using DualARTransformer
2024-09-13 14:37:19.135 | INFO | tools.llama.generate:load_model:360 - Compiling function...
2024-09-13 14:37:19.167 | INFO | main::410 - Llama model loaded, loading VQ-GAN model...
2024-09-13 14:37:20.744 | INFO | tools.vqgan.inference:load_model:44 - Loaded model:
2024-09-13 14:37:20.744 | INFO | main::418 - VQ-GAN model loaded, warming up...
2024-09-13 14:37:20.774 | INFO | tools.llama.generate:generate_long:438 - Encoded text: Hello world.
2024-09-13 14:37:20.775 | INFO | tools.llama.generate:generate_long:456 - Generating sentence 1/1 of sample 1/1
E:\temp_python_ai_face_wav_singer_fish-speech.git\fish_speech\models\text2semantic\llama.py:682: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at ..\aten\src\ATen\native\transformers\cuda\sdp_utils.cpp:455.)
y = F.scaled_dot_product_attention(
0%| | 0/1023 [00:00<?, ?it/s]E:\anaconda3\envs\fish-speech\lib\site-packages\torch\backends\cuda_init.py:342: FutureWarning: torch.backends.cuda.sdp_kernel() is deprecated. In the future, this context manager will be removed. Please see, torch.nn.attention.sdpa_kernel() for the new context manager, with updated signature.
warnings.warn(
0%| | 0/1023 [00:37<?, ?it/s]
Traceback (most recent call last):
File "E:\anaconda3\envs\fish-speech\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "E:\anaconda3\envs\fish-speech\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\api.py", line 421, in
list(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
response = gen.send(None)
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\api.py", line 243, in inference
raise result.response
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 581, in worker
for chunk in generate_long(
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 490, in generate_long
y = generate(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 264, in generate
x = decode_n_tokens(
File "E:\temp_python_ai__face_wav_singer_fish-speech.git\tools\llama\generate.py", line 202, in decode_n_tokens
next_token = decode_one_token(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\eval_frame.py", line 451, in _fn
return fn(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 921, in catch_errors
return callback(frame, cache_entry, hooks, frame_state, skip=1)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 400, in _convert_frame_assert
return compile(
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 676, in compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 535, in compile_inner
out_code = transform_code_object(code, transform)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\bytecode_transformation.py", line 1036, in transform_code_object
transformations(instructions, code_options)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 165, in fn
return fn(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\convert_frame.py", line 500, in transform
tracer.run()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 2149, in run
super().run()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 810, in run
and self.step()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 773, in step
getattr(self, inst.opname)(inst)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\symbolic_convert.py", line 2268, in RETURN_VALUE
self.output.compile_subgraph(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 981, in compile_subgraph
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 1178, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 1251, in call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\output_graph.py", line 1232, in call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\repro\after_dynamo.py", line 117, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\repro\after_dynamo.py", line 117, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_init.py", line 1731, in call
return compile_fx(model, inputs, config_patches=self.config)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 1102, in compile_fx
return compile_fx(
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 1330, in compile_fx
return aot_autograd(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\backends\common.py", line 58, in compiler_fn
cg = aot_module_simplified(gm, example_inputs, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch\aot_autograd.py", line 903, in aot_module_simplified
compiled_fn = create_aot_dispatcher_function(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch\aot_autograd.py", line 628, in create_aot_dispatcher_function
compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch_aot_autograd\runtime_wrappers.py", line 443, in aot_wrapper_dedupe
return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch_aot_autograd\runtime_wrappers.py", line 648, in aot_wrapper_synthetic_base
return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_functorch_aot_autograd\jit_compile_runtime_wrappers.py", line 119, in aot_dispatch_base
compiled_fw = compiler(fw_module, updated_flat_args)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 1257, in fw_compiler_base
return inner_compile(
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\repro\after_aot.py", line 83, in debug_wrapper
inner_compiled_fn = compiler_fn(gm, example_inputs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\debug.py", line 304, in inner
return fn(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\contextlib.py", line 79, in inner
return func(*args, **kwds)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 434, in compile_fx_inner
compiled_graph = FxGraphCache.load(
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codecache.py", line 829, in load
compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\compile_fx.py", line 714, in fx_codegen_and_compile
compiled_fn = graph.compile_to_fn()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\graph.py", line 1307, in compile_to_fn
return self.compile_to_module().call
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\graph.py", line 1250, in compile_to_module
self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\graph.py", line 1208, in codegen
self.scheduler.codegen()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_dynamo\utils.py", line 262, in time_wrapper
r = func(*args, **kwargs)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\scheduler.py", line 2339, in codegen
self.get_backend(device).codegen_nodes(node.get_nodes()) # type: ignore[possibly-undefined]
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\cuda_combined_scheduling.py", line 63, in codegen_nodes
return self._triton_scheduling.codegen_nodes(nodes)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\triton.py", line 3255, in codegen_nodes
return self.codegen_node_schedule(node_schedule, buf_accesses, numel, rnumel)
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\triton.py", line 3425, in codegen_node_schedule
src_code = kernel.codegen_kernel()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch_inductor\codegen\triton.py", line 2753, in codegen_kernel
"backend_hash": torch.utils._triton.triton_hash_with_backend(),
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_triton.py", line 101, in triton_hash_with_backend
backend_hash = triton_backend_hash()
File "E:\anaconda3\envs\fish-speech\lib\site-packages\torch\utils_triton.py", line 37, in triton_backend_hash
from triton.common.backend import get_backend, get_cuda_version_key
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
ModuleNotFoundError: No module named 'triton.common'
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
The text was updated successfully, but these errors were encountered: