Skip to content

Commit

Permalink
fix: rely on comfyui internals for loading cascade series models
Browse files Browse the repository at this point in the history
This is a patent "temporary" fix to prevent extremely large models, such as cascade models, from being forced to fully load. I will track the resolution of this in an issue: #312
  • Loading branch information
tazlin committed Aug 25, 2024
1 parent 3cb5049 commit f0982cb
Showing 1 changed file with 22 additions and 0 deletions.
22 changes: 22 additions & 0 deletions hordelib/comfy_horde.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,10 @@ def do_comfy_import(
# isort: on


def _model_patcher_has_cascade(model_patcher):
return "cascade" in str(type(model_patcher.model)).lower()


def _load_models_gpu_hijack(*args, **kwargs):
"""Intercepts the comfy load_models_gpu function to force full load.
Expand All @@ -248,7 +252,18 @@ def _load_models_gpu_hijack(*args, **kwargs):
and the worker/horde-engine takes responsibility for managing the memory or the problems this may
cause.
"""
found_cascade = False
for model_patcher in args[0]:
found_cascade = _model_patcher_has_cascade(model_patcher)
if found_cascade:
break

global _comfy_current_loaded_models
if found_cascade:
logger.debug("Not overriding cascade model load")
_comfy_load_models_gpu(*args, **kwargs)
return

if "force_full_load" in kwargs:
kwargs.pop("force_full_load")

Expand All @@ -262,6 +277,13 @@ def _model_patcher_load_hijack(*args, **kwargs):
See _load_models_gpu_hijack for more information
"""
global _comfy_model_patcher_load

model_patcher = args[0]
if _model_patcher_has_cascade(model_patcher):
logger.debug("Not overriding cascade model load")
_comfy_model_patcher_load(*args, **kwargs)
return

if "full_load" in kwargs:
kwargs.pop("full_load")

Expand Down

0 comments on commit f0982cb

Please sign in to comment.