Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

run gradio error #27

Open
zixian-wu opened this issue Jun 28, 2024 · 2 comments
Open

run gradio error #27

zixian-wu opened this issue Jun 28, 2024 · 2 comments

Comments

@zixian-wu
Copy link

Hi when I run the gradio,error message is as follows, Can you help me out? Thanks a lot
Traceback (most recent call last):
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 501, in process_events
response = await self.call_prediction(awake_events, batch)
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 465, in call_prediction
raise Exception(str(error) if show_error else None) from error
Exception: None
torch.Size([1, 24, 2, 512, 512])
Traceback (most recent call last):
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 456, in call_prediction
output = await route_utils.call_process_api(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/route_utils.py", line 232, in call_process_api
output = await app.get_blocks().process_api(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1522, in process_api
result = await self.call_function(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1144, in call_function
prediction = await anyio.to_thread.run_sync(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/utils.py", line 674, in wrapper
response = f(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 760, in run
save_root = os.path.join(os.path.dirname(audio_path), save_name)
File "/opt/miniconda/envs/mofa/lib/python3.10/posixpath.py", line 152, in dirname
p = os.fspath(p)
TypeError: expected str, bytes or os.PathLike object, not NoneType
Traceback (most recent call last):
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 456, in call_prediction
output = await route_utils.call_process_api(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/route_utils.py", line 232, in call_process_api
output = await app.get_blocks().process_api(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1522, in process_api
result = await self.call_function(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1144, in call_function
prediction = await anyio.to_thread.run_sync(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/utils.py", line 674, in wrapper
response = f(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 760, in run
save_root = os.path.join(os.path.dirname(audio_path), save_name)
File "/opt/miniconda/envs/mofa/lib/python3.10/posixpath.py", line 152, in dirname
p = os.fspath(p)
TypeError: expected str, bytes or os.PathLike object, not NoneType

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 501, in process_events
response = await self.call_prediction(awake_events, batch)
File "/opt/miniconda/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 465, in call_prediction
raise Exception(str(error) if show_error else None) from error
Exception: None

image
Selection_763

@zixian-wu
Copy link
Author

Traceback (most recent call last):
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/aniportrait/audio2ldmk.py", line 309, in
main()
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/aniportrait/audio2ldmk.py", line 271, in main
projected_vertices = project_points(pred, face_result['trans_mat'], pose_seq, [height, width])
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/aniportrait/src/utils/pose_util.py", line 38, in project_points
transformed_points = points_3d_homogeneous @ (transformation_matrix @ euler_and_translation_to_matrix(pose_vectors[i][:3], pose_vectors[i][3:])).T @ P
IndexError: index 250 is out of bounds for axis 0 with size 250
Traceback (most recent call last):
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 456, in call_prediction
output = await route_utils.call_process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/route_utils.py", line 232, in call_process_api
output = await app.get_blocks().process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1522, in process_api
result = await self.call_function(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1144, in call_function
prediction = await anyio.to_thread.run_sync(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/utils.py", line 674, in wrapper
response = f(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 861, in run
outputs = self.forward_sample(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 442, in forward_sample
ldmk_controlnet_flow, ldmk_pose_imgs, landmarks, num_frames = self.get_landmarks(save_root, first_frame_path, audio_path, input_first_frame[0], self.model_length, ldmk_render=ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 708, in get_landmarks
ldmknpy_dir = self.audio2landmark(audio_path, first_frame_path, ldmk_dir, ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 698, in audio2landmark
assert return_code == 0, "Errors in generating landmarks! Please trace back up for detailed error report."
AssertionError: Errors in generating landmarks! Please trace back up for detailed error report.
Traceback (most recent call last):
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 456, in call_prediction
output = await route_utils.call_process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/route_utils.py", line 232, in call_process_api
output = await app.get_blocks().process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1522, in process_api
result = await self.call_function(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1144, in call_function
prediction = await anyio.to_thread.run_sync(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/utils.py", line 674, in wrapper
response = f(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 861, in run
outputs = self.forward_sample(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 442, in forward_sample
ldmk_controlnet_flow, ldmk_pose_imgs, landmarks, num_frames = self.get_landmarks(save_root, first_frame_path, audio_path, input_first_frame[0], self.model_length, ldmk_render=ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 708, in get_landmarks
ldmknpy_dir = self.audio2landmark(audio_path, first_frame_path, ldmk_dir, ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 698, in audio2landmark
assert return_code == 0, "Errors in generating landmarks! Please trace back up for detailed error report."
AssertionError: Errors in generating landmarks! Please trace back up for detailed error report.

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 501, in process_events
response = await self.call_prediction(awake_events, batch)
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 465, in call_prediction
raise Exception(str(error) if show_error else None) from error
Exception: None
Some weights of Wav2Vec2Model were not initialized from the model checkpoint at ckpts/aniportrait/wav2vec2-base-960h and are newly initialized: ['wav2vec2.masked_spec_embed']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
Some weights of Wav2Vec2Model were not initialized from the model checkpoint at ckpts/aniportrait/wav2vec2-base-960h and are newly initialized: ['wav2vec2.masked_spec_embed']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1719554800.270370 4235 task_runner.cc:85] GPU suport is not available: INTERNAL: ; RET_CHECK failure (mediapipe/gpu/gl_context_egl.cc:77) display != EGL_NO_DISPLAYeglGetDisplay() returned error 0x300c
W0000 00:00:1719554800.270964 4235 face_landmarker_graph.cc:174] Sets FaceBlendshapesGraph acceleration to xnnpack by default.
INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
W0000 00:00:1719554800.291229 4444 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
W0000 00:00:1719554800.302677 4459 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
I0000 00:00:1719554800.306953 4235 task_runner.cc:85] GPU suport is not available: INTERNAL: ; RET_CHECK failure (mediapipe/gpu/gl_context_egl.cc:77) display != EGL_NO_DISPLAYeglGetDisplay() returned error 0x300c
W0000 00:00:1719554800.309512 4499 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/google/protobuf/symbol_database.py:55: UserWarning: SymbolDatabase.GetPrototype() is deprecated. Please use message_factory.GetMessageClass() instead. SymbolDatabase.GetPrototype() will be removed soon.
warnings.warn('SymbolDatabase.GetPrototype() is deprecated. Please '
Traceback (most recent call last):
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/aniportrait/audio2ldmk.py", line 309, in
main()
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/aniportrait/audio2ldmk.py", line 271, in main
projected_vertices = project_points(pred, face_result['trans_mat'], pose_seq, [height, width])
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/aniportrait/src/utils/pose_util.py", line 38, in project_points
transformed_points = points_3d_homogeneous @ (transformation_matrix @ euler_and_translation_to_matrix(pose_vectors[i][:3], pose_vectors[i][3:])).T @ P
IndexError: index 250 is out of bounds for axis 0 with size 250
Traceback (most recent call last):
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 456, in call_prediction
output = await route_utils.call_process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/route_utils.py", line 232, in call_process_api
output = await app.get_blocks().process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1522, in process_api
result = await self.call_function(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1144, in call_function
prediction = await anyio.to_thread.run_sync(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/utils.py", line 674, in wrapper
response = f(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 861, in run
outputs = self.forward_sample(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 442, in forward_sample
ldmk_controlnet_flow, ldmk_pose_imgs, landmarks, num_frames = self.get_landmarks(save_root, first_frame_path, audio_path, input_first_frame[0], self.model_length, ldmk_render=ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 708, in get_landmarks
ldmknpy_dir = self.audio2landmark(audio_path, first_frame_path, ldmk_dir, ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 698, in audio2landmark
assert return_code == 0, "Errors in generating landmarks! Please trace back up for detailed error report."
AssertionError: Errors in generating landmarks! Please trace back up for detailed error report.
Traceback (most recent call last):
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 456, in call_prediction
output = await route_utils.call_process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/route_utils.py", line 232, in call_process_api
output = await app.get_blocks().process_api(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1522, in process_api
result = await self.call_function(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/blocks.py", line 1144, in call_function
prediction = await anyio.to_thread.run_sync(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
result = context.run(func, *args)
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/utils.py", line 674, in wrapper
response = f(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 861, in run
outputs = self.forward_sample(
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 442, in forward_sample
ldmk_controlnet_flow, ldmk_pose_imgs, landmarks, num_frames = self.get_landmarks(save_root, first_frame_path, audio_path, input_first_frame[0], self.model_length, ldmk_render=ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 708, in get_landmarks
ldmknpy_dir = self.audio2landmark(audio_path, first_frame_path, ldmk_dir, ldmk_render)
File "/workspace/MOFA-Video/MOFA-Video-Hybrid/run_gradio_audio_driven.py", line 698, in audio2landmark
assert return_code == 0, "Errors in generating landmarks! Please trace back up for detailed error report."
AssertionError: Errors in generating landmarks! Please trace back up for detailed error report.

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 501, in process_events
response = await self.call_prediction(awake_events, batch)
File "/workspace/miniconda3/envs/mofa/lib/python3.10/site-packages/gradio/queueing.py", line 465, in call_prediction
raise Exception(str(error) if show_error else None) from error
Exception: None

@wangaocheng
Copy link

我也得到这个错误。4070显卡12G

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants