You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
1Torch was not compiled with flash attention. (Triggered internally at ..\aten\src\ATen\native\transformers\cuda\sdp_utils.cpp:263.)
Traceback (most recent call last):
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\routes.py", line 439, in run_predict
output = await app.get_blocks().process_api(
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\blocks.py", line 1389, in process_api
result = await self.call_function(
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\blocks.py", line 1094, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\admin\AppData\Roaming\Python\Python310\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "C:\Users\admin\AppData\Roaming\Python\Python310\site-packages\anyio_backends_asyncio.py", line 2144, in run_sync_in_worker_thread
return await future
File "C:\Users\admin\AppData\Roaming\Python\Python310\site-packages\anyio_backends_asyncio.py", line 851, in run
result = context.run(func, *args)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\utils.py", line 704, in wrapper
response = f(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\app.py", line 514, in model_run
video_path = motionctrl_sample(
File "D:\AIGC\MotionCtrl-svd\gradio_utils\motionctrl_cmcm_gradio.py", line 148, in motionctrl_sample
c, uc = model.conditioner.get_unconditional_conditioning(
File "D:\AIGC\MotionCtrl-svd\sgm\modules\encoders\modules.py", line 179, in get_unconditional_conditioning
c = self(batch_c, force_cond_zero_embeddings)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\encoders\modules.py", line 132, in forward
emb_out = embedder(batch[embedder.input_key])
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\encoders\modules.py", line 1012, in forward
out = self.encoder.encode(vid[n * n_samples : (n + 1) * n_samples])
File "D:\AIGC\MotionCtrl-svd\sgm\models\autoencoder.py", line 472, in encode
z = self.encoder(x)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\diffusionmodules\model.py", line 596, in forward
h = self.mid.attn_1(h)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\diffusionmodules\model.py", line 265, in forward
h = self.attention(h)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\diffusionmodules\model.py", line 250, in attention
out = xformers.ops.memory_efficient_attention(
AttributeError: module 'xformers' has no attribute 'ops'. Did you mean: 'os'?
The text was updated successfully, but these errors were encountered:
我使用的是cuda121,torch2.2.1,xformers0.0.25,然后运行gradio之后会提示:
AttributeError: module 'xformers' has no attribute 'ops'. Did you mean: 'os'?
尝试过把torch+xformers一系列都降级,但是提示CUDA与这些版本不匹配,有没有办法不降级CUDA的情况下解决这个问题呢
(其他环境需要比较新的CUDA
具体报错如下:
1
[0. 0. 1.] [0. 0. 0.]
1
[0. 0. 1.] [0. 0. 0.]
C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\functional.py:5476: UserWarning:
1Torch was not compiled with flash attention. (Triggered internally at ..\aten\src\ATen\native\transformers\cuda\sdp_utils.cpp:263.)
Traceback (most recent call last):
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\routes.py", line 439, in run_predict
output = await app.get_blocks().process_api(
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\blocks.py", line 1389, in process_api
result = await self.call_function(
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\blocks.py", line 1094, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\admin\AppData\Roaming\Python\Python310\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "C:\Users\admin\AppData\Roaming\Python\Python310\site-packages\anyio_backends_asyncio.py", line 2144, in run_sync_in_worker_thread
return await future
File "C:\Users\admin\AppData\Roaming\Python\Python310\site-packages\anyio_backends_asyncio.py", line 851, in run
result = context.run(func, *args)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\gradio\utils.py", line 704, in wrapper
response = f(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\app.py", line 514, in model_run
video_path = motionctrl_sample(
File "D:\AIGC\MotionCtrl-svd\gradio_utils\motionctrl_cmcm_gradio.py", line 148, in motionctrl_sample
c, uc = model.conditioner.get_unconditional_conditioning(
File "D:\AIGC\MotionCtrl-svd\sgm\modules\encoders\modules.py", line 179, in get_unconditional_conditioning
c = self(batch_c, force_cond_zero_embeddings)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\encoders\modules.py", line 132, in forward
emb_out = embedder(batch[embedder.input_key])
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\encoders\modules.py", line 1012, in forward
out = self.encoder.encode(vid[n * n_samples : (n + 1) * n_samples])
File "D:\AIGC\MotionCtrl-svd\sgm\models\autoencoder.py", line 472, in encode
z = self.encoder(x)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\diffusionmodules\model.py", line 596, in forward
h = self.mid.attn_1(h)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "C:\Users\admin\miniconda3\envs\motionctrl\lib\site-packages\torch\nn\modules\module.py", line 1520, in call_impl
return forward_call(*args, **kwargs)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\diffusionmodules\model.py", line 265, in forward
h = self.attention(h)
File "D:\AIGC\MotionCtrl-svd\sgm\modules\diffusionmodules\model.py", line 250, in attention
out = xformers.ops.memory_efficient_attention(
AttributeError: module 'xformers' has no attribute 'ops'. Did you mean: 'os'?
The text was updated successfully, but these errors were encountered: