Skip to content

Commit

Permalink
Merge pull request #181 from Haidra-Org/pyinstaller
Browse files Browse the repository at this point in the history
feat: pyinstaller support
  • Loading branch information
tazlin authored Feb 6, 2024
2 parents 4cc32a9 + 5473325 commit 1b19074
Show file tree
Hide file tree
Showing 146 changed files with 5,496 additions and 6,047 deletions.
10 changes: 10 additions & 0 deletions hordelib/__pyinstaller.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import pathlib


def get_hook_dirs() -> list[str]:
return [str(pathlib.Path(__file__).parent / "pyinstaller_hooks")]


def get_PyInstaller_tests() -> list[str]:
return [] # FIXME
return [str(pathlib.Path(__file__).parent / "pyinstaller_tests")]
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def __init__(self):
)
modelpath = os.path.join(builtins.annotator_ckpts_path, "network-bsds500.pth")
if not os.path.exists(modelpath):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url

load_file_from_url(remote_model_path, model_dir=builtins.annotator_ckpts_path)
self.netNetwork = Network(modelpath).to(model_management.get_torch_device()).eval()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def download_model_if_not_existed():
# if os.path.exists(old_model_path):
# model_path = old_model_path
if not os.path.exists(model_path):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url

load_file_from_url(remote_model_path_leres, model_dir=builtins.annotator_ckpts_path)
os.rename(os.path.join(builtins.annotator_ckpts_path, "res101.pth"), model_path)
Expand All @@ -66,7 +66,7 @@ def apply_leres(input_image, thr_a, thr_b):
""" if boost and pix2pixmodel is None:
pix2pixmodel_path = os.path.join(builtins.annotator_ckpts_path, "latest_net_G.pth")
if not os.path.exists(pix2pixmodel_path):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url
load_file_from_url(remote_model_path_pix2pix, model_dir=builtins.annotator_ckpts_path)
opt = TestOptions().parse()
Expand All @@ -84,7 +84,6 @@ def apply_leres(input_image, thr_a, thr_b):
height, width, dim = input_image.shape

with torch.no_grad():

if boost:
depth = estimateboost(input_image, model, 0, pix2pixmodel, max(width, height))
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,22 @@ def get_func(func_name):
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
if func_name == "":
return None
try:
parts = func_name.split('.')
parts = func_name.split(".")
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'comfy_controlnet_preprocessors.leres.leres.' + '.'.join(parts[:-1])
module_name = "hordelib.nodes.comfy_controlnet_preprocessors.leres.leres." + ".".join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
print('Failed to f1ind function: %s', func_name)
print("Failed to f1ind function: %s", func_name)
raise


def load_ckpt(args, depth_model, shift_model, focal_model):
"""
Load checkpoint.
Expand All @@ -32,13 +33,10 @@ def load_ckpt(args, depth_model, shift_model, focal_model):
print("loading checkpoint %s" % args.load_ckpt)
checkpoint = torch.load(args.load_ckpt)
if shift_model is not None:
shift_model.load_state_dict(strip_prefix_if_present(checkpoint['shift_model'], 'module.'),
strict=True)
shift_model.load_state_dict(strip_prefix_if_present(checkpoint["shift_model"], "module."), strict=True)
if focal_model is not None:
focal_model.load_state_dict(strip_prefix_if_present(checkpoint['focal_model'], 'module.'),
strict=True)
depth_model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."),
strict=True)
focal_model.load_state_dict(strip_prefix_if_present(checkpoint["focal_model"], "module."), strict=True)
depth_model.load_state_dict(strip_prefix_if_present(checkpoint["depth_model"], "module."), strict=True)
del checkpoint
torch.cuda.empty_cache()

Expand Down
2 changes: 1 addition & 1 deletion hordelib/nodes/comfy_controlnet_preprocessors/midas/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def load_model(model_type):

elif model_type == "dpt_hybrid": # DPT-Hybrid
if not os.path.exists(model_path):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url

load_file_from_url(remote_model_path, model_dir=builtins.annotator_ckpts_path)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class MLSDdetector:
def __init__(self):
model_path = os.path.join(builtins.annotator_ckpts_path, "mlsd_large_512_fp32.pth")
if not os.path.exists(model_path):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url

load_file_from_url(remote_model_path, model_dir=builtins.annotator_ckpts_path)
model = MobileV2_MLSD_Large()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def __init__(self):
hand_modelpath = os.path.join(builtins.annotator_ckpts_path, "hand_pose_model.pth")

if not os.path.exists(hand_modelpath):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url

load_file_from_url(body_model_path, model_dir=builtins.annotator_ckpts_path)
load_file_from_url(hand_model_path, model_dir=builtins.annotator_ckpts_path)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
from einops import rearrange
from .model import pidinet
from comfy_controlnet_preprocessors.util import load_state_dict, load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_state_dict, load_file_from_url
import builtins
import model_management

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import os

from comfy_controlnet_preprocessors.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
from comfy_controlnet_preprocessors.uniformer.mmseg.core.evaluation import get_palette
from hordelib.nodes.comfy_controlnet_preprocessors.uniformer.mmseg.apis import (
init_segmentor,
inference_segmentor,
show_result_pyplot,
)
from hordelib.nodes.comfy_controlnet_preprocessors.uniformer.mmseg.core.evaluation import get_palette
import builtins

import model_management
Expand All @@ -14,7 +18,7 @@ class UniformerDetector:
def __init__(self):
modelpath = os.path.join(builtins.annotator_ckpts_path, "upernet_global_small.pth")
if not os.path.exists(modelpath):
from comfy_controlnet_preprocessors.util import load_file_from_url
from hordelib.nodes.comfy_controlnet_preprocessors.util import load_file_from_url

load_file_from_url(checkpoint_file, model_dir=builtins.annotator_ckpts_path)
config_file = os.path.join(os.path.dirname(__file__), "exp", "upernet_global_small", "config.py")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,18 @@
import torch.nn as nn
import torch.nn.functional as F

from comfy_controlnet_preprocessors.uniformer.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version
from hordelib.nodes.comfy_controlnet_preprocessors.uniformer.mmcv.utils import (
TORCH_VERSION,
build_from_cfg,
digit_version,
)
from .registry import ACTIVATION_LAYERS

for module in [
nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU,
nn.Sigmoid, nn.Tanh
]:
for module in [nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU, nn.Sigmoid, nn.Tanh]:
ACTIVATION_LAYERS.register_module(module=module)


@ACTIVATION_LAYERS.register_module(name='Clip')
@ACTIVATION_LAYERS.register_module(name="Clip")
@ACTIVATION_LAYERS.register_module()
class Clamp(nn.Module):
"""Clamp activation layer.
Expand All @@ -28,7 +29,7 @@ class Clamp(nn.Module):
Default to 1.
"""

def __init__(self, min=-1., max=1.):
def __init__(self, min=-1.0, max=1.0):
super(Clamp, self).__init__()
self.min = min
self.max = max
Expand Down Expand Up @@ -71,8 +72,7 @@ def forward(self, input):
return F.gelu(input)


if (TORCH_VERSION == 'parrots'
or digit_version(TORCH_VERSION) < digit_version('1.4')):
if TORCH_VERSION == "parrots" or digit_version(TORCH_VERSION) < digit_version("1.4"):
ACTIVATION_LAYERS.register_module(module=GELU)
else:
ACTIVATION_LAYERS.register_module(module=nn.GELU)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import torch.nn as nn

from comfy_controlnet_preprocessors.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
from hordelib.nodes.comfy_controlnet_preprocessors.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
from ..utils import constant_init, kaiming_init
from .activation import build_activation_layer
from .conv import build_conv_layer
Expand Down Expand Up @@ -65,29 +65,31 @@ class ConvModule(nn.Module):
Default: ('conv', 'norm', 'act').
"""

_abbr_ = 'conv_block'

def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
inplace=True,
with_spectral_norm=False,
padding_mode='zeros',
order=('conv', 'norm', 'act')):
_abbr_ = "conv_block"

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias="auto",
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type="ReLU"),
inplace=True,
with_spectral_norm=False,
padding_mode="zeros",
order=("conv", "norm", "act"),
):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
assert act_cfg is None or isinstance(act_cfg, dict)
official_padding_mode = ['zeros', 'circular']
official_padding_mode = ["zeros", "circular"]
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
Expand All @@ -96,12 +98,12 @@ def __init__(self,
self.with_explicit_padding = padding_mode not in official_padding_mode
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == set(['conv', 'norm', 'act'])
assert set(order) == set(["conv", "norm", "act"])

self.with_norm = norm_cfg is not None
self.with_activation = act_cfg is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
if bias == "auto":
bias = not self.with_norm
self.with_bias = bias

Expand All @@ -121,7 +123,8 @@ def __init__(self,
padding=conv_padding,
dilation=dilation,
groups=groups,
bias=bias)
bias=bias,
)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
Expand All @@ -139,27 +142,24 @@ def __init__(self,
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
if order.index('norm') > order.index('conv'):
if order.index("norm") > order.index("conv"):
norm_channels = out_channels
else:
norm_channels = in_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
if self.with_bias:
if isinstance(norm, (_BatchNorm, _InstanceNorm)):
warnings.warn(
'Unnecessary conv bias before batch/instance norm')
warnings.warn("Unnecessary conv bias before batch/instance norm")
else:
self.norm_name = None

# build activation layer
if self.with_activation:
act_cfg_ = act_cfg.copy()
# nn.Tanh has no 'inplace' argument
if act_cfg_['type'] not in [
'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
]:
act_cfg_.setdefault('inplace', inplace)
if act_cfg_["type"] not in ["Tanh", "PReLU", "Sigmoid", "HSigmoid", "Swish"]:
act_cfg_.setdefault("inplace", inplace)
self.activate = build_activation_layer(act_cfg_)

# Use msra init by default
Expand All @@ -182,25 +182,25 @@ def init_weights(self):
# this method with default ``kaiming_init``.
# Note: For PyTorch's conv layers, they will be overwritten by our
# initialization implementation using default ``kaiming_init``.
if not hasattr(self.conv, 'init_weights'):
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
nonlinearity = 'leaky_relu'
a = self.act_cfg.get('negative_slope', 0.01)
if not hasattr(self.conv, "init_weights"):
if self.with_activation and self.act_cfg["type"] == "LeakyReLU":
nonlinearity = "leaky_relu"
a = self.act_cfg.get("negative_slope", 0.01)
else:
nonlinearity = 'relu'
nonlinearity = "relu"
a = 0
kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)

def forward(self, x, activate=True, norm=True):
for layer in self.order:
if layer == 'conv':
if layer == "conv":
if self.with_explicit_padding:
x = self.padding_layer(x)
x = self.conv(x)
elif layer == 'norm' and norm and self.with_norm:
elif layer == "norm" and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_activation:
elif layer == "act" and activate and self.with_activation:
x = self.activate(x)
return x
Original file line number Diff line number Diff line change
Expand Up @@ -2,24 +2,23 @@
import torch
import torch.nn as nn

from comfy_controlnet_preprocessors.uniformer.mmcv import build_from_cfg
from hordelib.nodes.comfy_controlnet_preprocessors.uniformer.mmcv import build_from_cfg
from .registry import DROPOUT_LAYERS


def drop_path(x, drop_prob=0., training=False):
def drop_path(x, drop_prob=0.0, training=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
residual blocks).
We follow the implementation
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
"""
if drop_prob == 0. or not training:
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
# handle tensors with different dimensions, not just 4D tensors.
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(
shape, dtype=x.dtype, device=x.device)
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
output = x.div(keep_prob) * random_tensor.floor()
return output

Expand Down
Loading

0 comments on commit 1b19074

Please sign in to comment.