diff --git a/.release b/.release index 13be740f2..c1338f829 100644 --- a/.release +++ b/.release @@ -1 +1 @@ -v24.0.5 \ No newline at end of file +v24.0.6 \ No newline at end of file diff --git a/README.md b/README.md index 6bf9e22c4..bb93713b0 100644 --- a/README.md +++ b/README.md @@ -42,10 +42,12 @@ The GUI allows you to set the training parameters and generate and run the requi - [SDXL training](#sdxl-training) - [Masked loss](#masked-loss) - [Change History](#change-history) + - [2024/04/220 (v24.0.6)](#202404220-v2406) - [2024/04/19 (v24.0.5)](#20240419-v2405) + - [New Contributors](#new-contributors) - [2024/04/18 (v24.0.4)](#20240418-v2404) - [What's Changed](#whats-changed) - - [New Contributors](#new-contributors) + - [New Contributors](#new-contributors-1) - [2024/04/24 (v24.0.3)](#20240424-v2403) - [2024/04/24 (v24.0.2)](#20240424-v2402) - [2024/04/17 (v24.0.1)](#20240417-v2401) @@ -412,9 +414,20 @@ ControlNet dataset is used to specify the mask. The mask images should be the RG ## Change History +### 2024/04/220 (v24.0.6) + +- Make start and stop buttons visible in headless +- Add validation for lr and optimizer arguments + ### 2024/04/19 (v24.0.5) -- fdds +- Hide tensorboard button if tensorflow module is not installed by @bmaltais in +- wd14 captioning issue with undesired tags nor tag replacement by @bmaltais in +- Changed logger checkbox to dropdown, renamed use_wandb -> log_with by @ccharest93 in + +#### New Contributors + +- @ccharest93 made their first contribution in ### 2024/04/18 (v24.0.4) @@ -468,6 +481,7 @@ The `gui.bat` and `gui.sh` scripts now include the `--do_not_use_shell` argument #### Miscellaneous - Made various other minor improvements and bug fixes to enhance overall functionality and user experience. +- Fixed an issue with existing LoRA network weights were not properly loaded prior to training ### 2024/04/10 (v23.1.5) diff --git a/assets/style.css b/assets/style.css index 38c22d4b6..939ac937f 100644 --- a/assets/style.css +++ b/assets/style.css @@ -37,11 +37,11 @@ #myTensorButton { background: radial-gradient(ellipse, #3a99ff, #52c8ff); color: white; - border: none; + border: #296eb8; } #myTensorButtonStop { background: radial-gradient(ellipse, #52c8ff, #3a99ff); color: black; - border: none; + border: #296eb8; } \ No newline at end of file diff --git a/config example.toml b/config example.toml index 11c852346..c137d4387 100644 --- a/config example.toml +++ b/config example.toml @@ -113,7 +113,7 @@ save_state_on_train_end = false # Save state on train end scale_v_pred_loss_like_noise_pred = false # Scale v pred loss like noise pred shuffle_caption = false # Shuffle captions state_dir = "./outputs" # Resume from saved training state -use_wandb = false # Use wandb +log_with = "" # Logger to use ["wandb", "tensorboard", "all", ""] vae_batch_size = 0 # VAE batch size vae_dir = "./models/vae" # VAEs folder path v_pred_like_loss = 0 # V pred like loss weight diff --git a/kohya_gui/class_command_executor.py b/kohya_gui/class_command_executor.py index ec9c5fd5b..440a46c40 100644 --- a/kohya_gui/class_command_executor.py +++ b/kohya_gui/class_command_executor.py @@ -14,12 +14,19 @@ class CommandExecutor: A class to execute and manage commands. """ - def __init__(self): + def __init__(self, headless: bool = False): """ Initialize the CommandExecutor. """ + self.headless = headless self.process = None - self.run_state = gr.Textbox(value="", visible=False) + + with gr.Row(): + self.button_run = gr.Button("Start training", variant="primary") + + self.button_stop_training = gr.Button( + "Stop training", visible=self.process is not None or headless, variant="stop" + ) def execute_command(self, run_cmd: str, use_shell: bool = False, **kwargs): """ @@ -64,16 +71,17 @@ def kill_command(self): # General exception handling for any other errors log.info(f"Error when terminating process: {e}") else: + self.process = None log.info("There is no running process to kill.") - return gr.Button(visible=True), gr.Button(visible=False) + return gr.Button(visible=True), gr.Button(visible=False or self.headless) def wait_for_training_to_end(self): while self.is_running(): time.sleep(1) log.debug("Waiting for training to end...") log.info("Training has ended.") - return gr.Button(visible=True), gr.Button(visible=False) + return gr.Button(visible=True), gr.Button(visible=False or self.headless) def is_running(self): """ @@ -82,4 +90,4 @@ def is_running(self): Returns: - bool: True if the command is running, False otherwise. """ - return self.process and self.process.poll() is None + return self.process is not None and self.process.poll() is None diff --git a/kohya_gui/class_tensorboard.py b/kohya_gui/class_tensorboard.py index 70c5d8e00..061b0796a 100644 --- a/kohya_gui/class_tensorboard.py +++ b/kohya_gui/class_tensorboard.py @@ -7,10 +7,11 @@ from threading import Thread, Event from .custom_logging import setup_logging + class TensorboardManager: DEFAULT_TENSORBOARD_PORT = 6006 - def __init__(self, logging_dir, headless=True, wait_time=5): + def __init__(self, logging_dir, headless: bool = False, wait_time=5): self.logging_dir = logging_dir self.headless = headless self.wait_time = wait_time @@ -25,9 +26,17 @@ def __init__(self, logging_dir, headless=True, wait_time=5): self.gradio_interface() def get_button_states(self, started=False): - return gr.Button(visible=not started), gr.Button(visible=started) + return gr.Button(visible=not started or self.headless), gr.Button( + visible=started or self.headless + ) def start_tensorboard(self, logging_dir=None): + if self.tensorboard_proc is not None: + self.log.info( + "Tensorboard is already running. Terminating existing process before starting new one..." + ) + self.stop_tensorboard() + if not os.path.exists(logging_dir) or not os.listdir(logging_dir): self.log.error( "Error: logging folder does not exist or does not contain logs." @@ -46,11 +55,6 @@ def start_tensorboard(self, logging_dir=None): ] self.log.info(run_cmd) - if self.tensorboard_proc is not None: - self.log.info( - "Tensorboard is already running. Terminating existing process before starting new one..." - ) - self.stop_tensorboard() self.log.info("Starting TensorBoard on port {}".format(self.tensorboard_port)) try: @@ -73,7 +77,7 @@ def open_tensorboard_url(): self.thread = Thread(target=open_tensorboard_url) self.thread.start() - return self.get_button_states(started=True) + return self.get_button_states(started=True or self.headless) def stop_tensorboard(self): if self.tensorboard_proc is not None: @@ -84,34 +88,38 @@ def stop_tensorboard(self): self.log.info("...process stopped") except Exception as e: self.log.error("Failed to stop Tensorboard:", e) - + if self.thread is not None: self.stop_event.set() self.thread.join() # Wait for the thread to finish self.thread = None self.log.info("Thread terminated successfully.") - return self.get_button_states(started=False) + return self.get_button_states(started=False or self.headless) def gradio_interface(self): try: - os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' - - import tensorflow # Attempt to import tensorflow to check if it is installed - + os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0" + + import tensorflow # Attempt to import tensorflow to check if it is installed + visibility = True - + except ImportError: - self.log.error("tensorflow is not installed, hiding the tensorboard button...") + self.log.error( + "tensorflow is not installed, hiding the tensorboard button..." + ) visibility = False - + with gr.Row(): button_start_tensorboard = gr.Button( - value="Start tensorboard", elem_id="myTensorButton", visible=visibility + value="Start tensorboard", + elem_id="myTensorButton", + visible=visibility or self.headless, ) button_stop_tensorboard = gr.Button( value="Stop tensorboard", - visible=False, + visible=False or self.headless, elem_id="myTensorButtonStop", ) button_start_tensorboard.click( @@ -124,4 +132,4 @@ def gradio_interface(self): self.stop_tensorboard, outputs=[button_start_tensorboard, button_stop_tensorboard], show_progress=False, - ) \ No newline at end of file + ) diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index a00d3e681..6572b78b7 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -429,7 +429,15 @@ def update_my_data(my_data): pass my_data.pop(key, None) - + + + # Replace the lora_network_weights key with network_weights keeping the original value + for key in ["lora_network_weights"]: + value = my_data.get(key) # Get original value + if value is not None: # Check if the key exists in the dictionary + my_data["network_weights"] = value + my_data.pop(key, None) + return my_data @@ -1490,4 +1498,17 @@ def print_command_and_toml(run_cmd, tmpfilename): log.info(toml_file.read()) log.info(f"end of toml config file: {tmpfilename}") - save_to_file(command_to_run) \ No newline at end of file + save_to_file(command_to_run) + +def validate_args_setting(input_string): + # Regex pattern to handle multiple conditions: + # - Empty string is valid + # - Single or multiple key/value pairs with exactly one space between pairs + # - No spaces around '=' and no spaces within keys or values + pattern = r'^(\S+=\S+)( \S+=\S+)*$|^$' + if re.match(pattern, input_string): + return True + else: + log.info(f"'{input_string}' is not a valid settings string.") + log.info("A valid settings string must consist of one or more key/value pairs formatted as key=value, with no spaces around the equals sign or within the value. Multiple pairs should be separated by a space.") + return False \ No newline at end of file diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index bcbe7e59c..f9b97e9bd 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -18,6 +18,7 @@ scriptdir, update_my_data, validate_paths, + validate_args_setting, ) from .class_accelerate_launch import AccelerateLaunch from .class_configuration_file import ConfigurationFile @@ -43,17 +44,12 @@ log = setup_logging() # Setup command executor -executor = CommandExecutor() +executor = None # Setup huggingface huggingface = None use_shell = False - -TRAIN_BUTTON_VISIBLE = [ - gr.Button(visible=True), - gr.Button(visible=False), - gr.Textbox(value=time.time()), -] +train_state_value = time.time() def save_configuration( @@ -495,9 +491,28 @@ def train_model( ): # Get list of function parameters and values parameters = list(locals().items()) + global train_state_value + + TRAIN_BUTTON_VISIBLE = [ + gr.Button(visible=True), + gr.Button(visible=False or headless), + gr.Textbox(value=train_state_value), + ] + + if executor.is_running(): + log.error("Training is already running. Can't start another training session.") + return TRAIN_BUTTON_VISIBLE log.info(f"Start training Dreambooth...") + log.info(f"Validating lr scheduler arguments...") + if not validate_args_setting(lr_scheduler_args): + return + + log.info(f"Validating optimizer arguments...") + if not validate_args_setting(optimizer_args): + return + # This function validates files or folder paths. Simply add new variables containing file of folder path # to validate below if not validate_paths( @@ -663,8 +678,8 @@ def train_model( # def save_huggingface_to_toml(self, toml_file_path: str): config_toml_data = { # Update the values in the TOML data - "async_upload": async_upload, "adaptive_noise_scale": adaptive_noise_scale if not 0 else None, + "async_upload": async_upload, "bucket_no_upscale": bucket_no_upscale, "bucket_reso_steps": bucket_reso_steps, "cache_latents": cache_latents, @@ -680,18 +695,17 @@ def train_model( "enable_bucket": enable_bucket, "epoch": int(epoch), "flip_aug": flip_aug, - "masked_loss": masked_loss, "full_bf16": full_bf16, "full_fp16": full_fp16, "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, "huber_schedule": huber_schedule, + "huggingface_path_in_repo": huggingface_path_in_repo, "huggingface_repo_id": huggingface_repo_id, - "huggingface_token": huggingface_token, "huggingface_repo_type": huggingface_repo_type, "huggingface_repo_visibility": huggingface_repo_visibility, - "huggingface_path_in_repo": huggingface_path_in_repo, + "huggingface_token": huggingface_token, "ip_noise_gamma": ip_noise_gamma if ip_noise_gamma != 0 else None, "ip_noise_gamma_random_strength": ip_noise_gamma_random_strength, "keep_tokens": int(keep_tokens), @@ -706,8 +720,9 @@ def train_model( learning_rate_te2 if sdxl and not 0 else None ), # only for sdxl and not 0 "logging_dir": logging_dir, - "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, + "log_tracker_name": log_tracker_name, + "log_with": log_with, "loss_type": loss_type, "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), @@ -716,6 +731,7 @@ def train_model( ), "lr_scheduler_power": lr_scheduler_power, "lr_warmup_steps": lr_warmup_steps, + "masked_loss": masked_loss, "max_bucket_reso": max_bucket_reso, "max_timestep": max_timestep if max_timestep != 0 else None, "max_token_length": int(max_token_length), @@ -737,12 +753,12 @@ def train_model( "noise_offset": noise_offset if not 0 else None, "noise_offset_random_strength": noise_offset_random_strength, "noise_offset_type": noise_offset_type, - "optimizer_type": optimizer, "optimizer_args": ( str(optimizer_args).replace('"', "").split() if optimizer_args != "" else None ), + "optimizer_type": optimizer, "output_dir": output_dir, "output_name": output_name, "persistent_data_loader_workers": persistent_data_loader_workers, @@ -783,7 +799,6 @@ def train_model( ), "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, - "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -802,9 +817,9 @@ def train_model( for key, value in config_toml_data.items() if value not in ["", False, None] } - + config_toml_data["max_data_loader_n_workers"] = max_data_loader_n_workers - + # Sort the dictionary by keys config_toml_data = dict(sorted(config_toml_data.items())) @@ -856,10 +871,12 @@ def train_model( executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env) + train_state_value = time.time() + return ( - gr.Button(visible=False), + gr.Button(visible=False or headless), gr.Button(visible=True), - gr.Textbox(value=time.time()), + gr.Textbox(value=train_state_value), ) @@ -940,21 +957,15 @@ def dreambooth_tab( with gr.Accordion("HuggingFace", open=False): huggingface = HuggingFace(config=config) - with gr.Column(), gr.Group(): - with gr.Row(): - button_run = gr.Button("Start training", variant="primary") - - button_stop_training = gr.Button( - "Stop training", visible=False, variant="stop" - ) + global executor + executor = CommandExecutor(headless=headless) with gr.Column(), gr.Group(): with gr.Row(): button_print = gr.Button("Print training command") # Setup gradio tensorboard buttons - with gr.Column(), gr.Group(): - TensorboardManager(headless=headless, logging_dir=folders.logging_dir) + TensorboardManager(headless=headless, logging_dir=folders.logging_dir) settings_list = [ source_model.pretrained_model_name_or_path, @@ -1101,36 +1112,23 @@ def dreambooth_tab( show_progress=False, ) - # config.button_save_as_config.click( - # save_configuration, - # inputs=[dummy_db_true, config.config_file_name] + settings_list, - # outputs=[config.config_file_name], - # show_progress=False, - # ) - - # def wait_for_training_to_end(): - # while executor.is_running(): - # time.sleep(1) - # log.debug("Waiting for training to end...") - # log.info("Training has ended.") - # return gr.Button(visible=True), gr.Button(visible=False) - - # Hidden textbox used to run the wait_for_training_to_end function to hide stop and show start at the end of the training - run_state = gr.Textbox(value="", visible=False) + run_state = gr.Textbox(value=train_state_value, visible=False) + run_state.change( fn=executor.wait_for_training_to_end, - outputs=[button_run, button_stop_training], + outputs=[executor.button_run, executor.button_stop_training], ) - button_run.click( + executor.button_run.click( train_model, inputs=[dummy_headless] + [dummy_db_false] + settings_list, - outputs=[button_run, button_stop_training, run_state], + outputs=[executor.button_run, executor.button_stop_training, run_state], show_progress=False, ) - button_stop_training.click( - executor.kill_command, outputs=[button_run, button_stop_training] + executor.button_stop_training.click( + executor.kill_command, + outputs=[executor.button_run, executor.button_stop_training], ) button_print.click( diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 4c7de548e..ae3f82e7c 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -19,6 +19,7 @@ scriptdir, update_my_data, validate_paths, + validate_args_setting ) from .class_accelerate_launch import AccelerateLaunch from .class_configuration_file import ConfigurationFile @@ -40,13 +41,12 @@ log = setup_logging() # Setup command executor -executor = CommandExecutor() +executor = None # Setup huggingface huggingface = None use_shell = False - -# from easygui import msgbox +train_state_value = time.time() folder_symbol = "\U0001f4c2" # 📂 refresh_symbol = "\U0001f504" # 🔄 @@ -56,11 +56,6 @@ PYTHON = sys.executable presets_dir = rf"{scriptdir}/presets" -TRAIN_BUTTON_VISIBLE = [ - gr.Button(visible=True), - gr.Button(visible=False), - gr.Textbox(value=time.time()), -] def save_configuration( @@ -534,11 +529,30 @@ def train_model( ): # Get list of function parameters and values parameters = list(locals().items()) + global train_state_value + + TRAIN_BUTTON_VISIBLE = [ + gr.Button(visible=True), + gr.Button(visible=False or headless), + gr.Textbox(value=train_state_value), + ] + + if executor.is_running(): + log.error("Training is already running. Can't start another training session.") + return TRAIN_BUTTON_VISIBLE log.debug(f"headless = {headless} ; print_only = {print_only}") log.info(f"Start Finetuning...") + log.info(f"Validating lr scheduler arguments...") + if not validate_args_setting(lr_scheduler_args): + return + + log.info(f"Validating optimizer arguments...") + if not validate_args_setting(optimizer_args): + return + if train_dir != "" and not os.path.exists(train_dir): os.mkdir(train_dir) @@ -741,10 +755,10 @@ def train_model( config_toml_data = { # Update the values in the TOML data - "async_upload": async_upload, "adaptive_noise_scale": ( adaptive_noise_scale if adaptive_noise_scale != 0 else None ), + "async_upload": async_upload, "block_lr": block_lr, "bucket_no_upscale": bucket_no_upscale, "bucket_reso_steps": bucket_reso_steps, @@ -762,7 +776,6 @@ def train_model( "dynamo_backend": dynamo_backend, "enable_bucket": True, "flip_aug": flip_aug, - "masked_loss": masked_loss, "full_bf16": full_bf16, "full_fp16": full_fp16, "gradient_accumulation_steps": int(gradient_accumulation_steps), @@ -795,6 +808,7 @@ def train_model( "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), "lr_warmup_steps": lr_warmup_steps, + "masked_loss": masked_loss, "max_bucket_reso": int(max_bucket_reso), "max_timestep": max_timestep if max_timestep != 0 else None, "max_token_length": int(max_token_length), @@ -927,10 +941,12 @@ def train_model( # Run the command executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env) + train_state_value = time.time() + return ( - gr.Button(visible=False), + gr.Button(visible=False or headless), gr.Button(visible=True), - gr.Textbox(value=time.time()), + gr.Textbox(value=train_state_value), ) @@ -1090,21 +1106,14 @@ def list_presets(path): with gr.Accordion("HuggingFace", open=False): huggingface = HuggingFace(config=config) - with gr.Column(), gr.Group(): - with gr.Row(): - button_run = gr.Button("Start training", variant="primary") - - button_stop_training = gr.Button( - "Stop training", visible=False, variant="stop" - ) + global executor + executor = CommandExecutor(headless=headless) with gr.Column(), gr.Group(): with gr.Row(): button_print = gr.Button("Print training command") - # Setup gradio tensorboard buttons - with gr.Column(), gr.Group(): - TensorboardManager(headless=headless, logging_dir=folders.logging_dir) + TensorboardManager(headless=headless, logging_dir=folders.logging_dir) settings_list = [ source_model.pretrained_model_name_or_path, @@ -1264,13 +1273,6 @@ def list_presets(path): show_progress=False, ) - # config.button_load_config.click( - # open_configuration, - # inputs=[dummy_db_false, config.config_file_name] + settings_list, - # outputs=[config.config_file_name] + settings_list, - # show_progress=False, - # ) - training_preset.input( open_configuration, inputs=[dummy_db_false, dummy_db_true, configuration.config_file_name] @@ -1280,22 +1282,22 @@ def list_presets(path): show_progress=False, ) - # Hidden textbox used to run the wait_for_training_to_end function to hide stop and show start at the end of the training - run_state = gr.Textbox(value="", visible=False) + run_state = gr.Textbox(value=train_state_value, visible=False) + run_state.change( fn=executor.wait_for_training_to_end, - outputs=[button_run, button_stop_training], + outputs=[executor.button_run, executor.button_stop_training], ) - button_run.click( + executor.button_run.click( train_model, inputs=[dummy_headless] + [dummy_db_false] + settings_list, - outputs=[button_run, button_stop_training, run_state], + outputs=[executor.button_run, executor.button_stop_training, run_state], show_progress=False, ) - button_stop_training.click( - executor.kill_command, outputs=[button_run, button_stop_training] + executor.button_stop_training.click( + executor.kill_command, outputs=[executor.button_run, executor.button_stop_training] ) button_print.click( diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 059705404..fbdf41977 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -20,6 +20,7 @@ scriptdir, update_my_data, validate_paths, + validate_args_setting ) from .class_accelerate_launch import AccelerateLaunch from .class_configuration_file import ConfigurationFile @@ -47,27 +48,18 @@ log = setup_logging() # Setup command executor -executor = CommandExecutor() +executor = None # Setup huggingface huggingface = None use_shell = False - -button_run = gr.Button("Start training", variant="primary") - -button_stop_training = gr.Button("Stop training", visible=False) +train_state_value = time.time() document_symbol = "\U0001F4C4" # 📄 presets_dir = rf"{scriptdir}/presets" -TRAIN_BUTTON_VISIBLE = [ - gr.Button(visible=True), - gr.Button(visible=False), - gr.Textbox(value=time.time()), -] - def save_configuration( save_as_bool, @@ -114,7 +106,7 @@ def save_configuration( text_encoder_lr, unet_lr, network_dim, - lora_network_weights, + network_weights, dim_from_weights, color_aug, flip_aug, @@ -319,7 +311,7 @@ def open_configuration( text_encoder_lr, unet_lr, network_dim, - lora_network_weights, + network_weights, dim_from_weights, color_aug, flip_aug, @@ -554,7 +546,7 @@ def train_model( text_encoder_lr, unet_lr, network_dim, - lora_network_weights, + network_weights, dim_from_weights, color_aug, flip_aug, @@ -674,10 +666,28 @@ def train_model( ): # Get list of function parameters and values parameters = list(locals().items()) - global command_running + global train_state_value + + TRAIN_BUTTON_VISIBLE = [ + gr.Button(visible=True), + gr.Button(visible=False or headless), + gr.Textbox(value=train_state_value), + ] + + if executor.is_running(): + log.error("Training is already running. Can't start another training session.") + return TRAIN_BUTTON_VISIBLE log.info(f"Start training LoRA {LoRA_type} ...") + log.info(f"Validating lr scheduler arguments...") + if not validate_args_setting(lr_scheduler_args): + return + + log.info(f"Validating optimizer arguments...") + if not validate_args_setting(optimizer_args): + return + if not validate_paths( output_dir=output_dir, pretrained_model_name_or_path=pretrained_model_name_or_path, @@ -688,7 +698,7 @@ def train_model( log_tracker_config=log_tracker_config, resume=resume, vae=vae, - lora_network_weights=lora_network_weights, + network_weights=network_weights, dataset_config=dataset_config, ): return TRAIN_BUTTON_VISIBLE @@ -1023,10 +1033,10 @@ def train_model( network_train_unet_only = text_encoder_lr_float == 0 and unet_lr_float != 0 config_toml_data = { - "async_upload": async_upload, "adaptive_noise_scale": ( adaptive_noise_scale if adaptive_noise_scale != 0 else None ), + "async_upload": async_upload, "bucket_no_upscale": bucket_no_upscale, "bucket_reso_steps": bucket_reso_steps, "cache_latents": cache_latents, @@ -1046,7 +1056,6 @@ def train_model( "enable_bucket": enable_bucket, "epoch": int(epoch), "flip_aug": flip_aug, - "masked_loss": masked_loss, "fp8_base": fp8_base, "full_bf16": full_bf16, "full_fp16": full_fp16, @@ -1066,7 +1075,6 @@ def train_model( "logging_dir": logging_dir, "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, - "lora_network_weights": lora_network_weights, "loss_type": loss_type, "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), @@ -1075,6 +1083,7 @@ def train_model( ), "lr_scheduler_power": lr_scheduler_power, "lr_warmup_steps": lr_warmup_steps, + "masked_loss": masked_loss, "max_bucket_reso": max_bucket_reso, "max_grad_norm": max_grad_norm, "max_timestep": max_timestep if max_timestep != 0 else None, @@ -1102,6 +1111,7 @@ def train_model( "network_module": network_module, "network_train_unet_only": network_train_unet_only, "network_train_text_encoder_only": network_train_text_encoder_only, + "network_weights": network_weights, "no_half_vae": True if sdxl and sdxl_no_half_vae else None, "noise_offset": noise_offset if noise_offset != 0 else None, "noise_offset_random_strength": noise_offset_random_strength, @@ -1221,12 +1231,15 @@ def train_model( env["TF_ENABLE_ONEDNN_OPTS"] = "0" # Run the command + executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env) + + train_state_value = time.time() return ( - gr.Button(visible=False), + gr.Button(visible=False or headless), gr.Button(visible=True), - gr.Textbox(value=time.time()), + gr.Textbox(value=train_state_value), ) @@ -1356,21 +1369,21 @@ def list_presets(path): ) with gr.Group(): with gr.Row(): - lora_network_weights = gr.Textbox( - label="LoRA network weights", + network_weights = gr.Textbox( + label="Network weights", placeholder="(Optional)", info="Path to an existing LoRA network weights to resume training from", ) - lora_network_weights_file = gr.Button( + network_weights_file = gr.Button( document_symbol, elem_id="open_folder_small", elem_classes=["tool"], visible=(not headless), ) - lora_network_weights_file.click( + network_weights_file.click( get_any_file_path, - inputs=[lora_network_weights], - outputs=lora_network_weights, + inputs=[network_weights], + outputs=network_weights, show_progress=False, ) dim_from_weights = gr.Checkbox( @@ -1623,7 +1636,7 @@ def update_LoRA_settings( }, }, }, - "lora_network_weights": { + "network_weights": { "gr_type": gr.Textbox, "update_params": { "visible": LoRA_type @@ -1643,7 +1656,7 @@ def update_LoRA_settings( }, }, }, - "lora_network_weights_file": { + "network_weights_file": { "gr_type": gr.Button, "update_params": { "visible": LoRA_type @@ -2057,8 +2070,8 @@ def update_LoRA_settings( network_row, convolution_row, kohya_advanced_lora, - lora_network_weights, - lora_network_weights_file, + network_weights, + network_weights_file, dim_from_weights, factor, conv_dim, @@ -2084,21 +2097,15 @@ def update_LoRA_settings( ], ) - with gr.Column(), gr.Group(): - with gr.Row(): - button_run = gr.Button("Start training", variant="primary") - - button_stop_training = gr.Button( - "Stop training", visible=False, variant="stop" - ) - + global executor + executor = CommandExecutor(headless=headless) + with gr.Column(), gr.Group(): with gr.Row(): button_print = gr.Button("Print training command") # Setup gradio tensorboard buttons - with gr.Column(), gr.Group(): - TensorboardManager(headless=headless, logging_dir=folders.logging_dir) + TensorboardManager(headless=headless, logging_dir=folders.logging_dir) settings_list = [ source_model.pretrained_model_name_or_path, @@ -2142,7 +2149,7 @@ def update_LoRA_settings( text_encoder_lr, unet_lr, network_dim, - lora_network_weights, + network_weights, dim_from_weights, advanced_training.color_aug, advanced_training.flip_aug, @@ -2301,29 +2308,22 @@ def update_LoRA_settings( show_progress=False, ) - # config.button_save_as_config.click( - # save_configuration, - # inputs=[dummy_db_true, config.config_file_name] + settings_list, - # outputs=[config.config_file_name], - # show_progress=False, - # ) - - # Hidden textbox used to run the wait_for_training_to_end function to hide stop and show start at the end of the training - run_state = gr.Textbox(value="", visible=False) + run_state = gr.Textbox(value=train_state_value, visible=False) + run_state.change( fn=executor.wait_for_training_to_end, - outputs=[button_run, button_stop_training], + outputs=[executor.button_run, executor.button_stop_training], ) - button_run.click( + executor.button_run.click( train_model, inputs=[dummy_headless] + [dummy_db_false] + settings_list, - outputs=[button_run, button_stop_training, run_state], + outputs=[executor.button_run, executor.button_stop_training, run_state], show_progress=False, ) - button_stop_training.click( - executor.kill_command, outputs=[button_run, button_stop_training] + executor.button_stop_training.click( + executor.kill_command, outputs=[executor.button_run, executor.button_stop_training] ) button_print.click( diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 4e7b6a45a..6d9da7c36 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -20,6 +20,7 @@ scriptdir, update_my_data, validate_paths, + validate_args_setting ) from .class_accelerate_launch import AccelerateLaunch from .class_configuration_file import ConfigurationFile @@ -45,17 +46,12 @@ log = setup_logging() # Setup command executor -executor = CommandExecutor() +executor = None # Setup huggingface huggingface = None use_shell = False - -TRAIN_BUTTON_VISIBLE = [ - gr.Button(visible=True), - gr.Button(visible=False), - gr.Textbox(value=time.time()), -] +train_state_value = time.time() def save_configuration( @@ -496,9 +492,28 @@ def train_model( ): # Get list of function parameters and values parameters = list(locals().items()) + global train_state_value + + TRAIN_BUTTON_VISIBLE = [ + gr.Button(visible=True), + gr.Button(visible=False or headless), + gr.Textbox(value=train_state_value), + ] + + if executor.is_running(): + log.error("Training is already running. Can't start another training session.") + return TRAIN_BUTTON_VISIBLE log.info(f"Start training TI...") + log.info(f"Validating lr scheduler arguments...") + if not validate_args_setting(lr_scheduler_args): + return + + log.info(f"Validating optimizer arguments...") + if not validate_args_setting(optimizer_args): + return + if not validate_paths( output_dir=output_dir, pretrained_model_name_or_path=pretrained_model_name_or_path, @@ -689,10 +704,10 @@ def train_model( # def save_huggingface_to_toml(self, toml_file_path: str): config_toml_data = { # Update the values in the TOML data - "async_upload": async_upload, "adaptive_noise_scale": ( adaptive_noise_scale if adaptive_noise_scale != 0 else None ), + "async_upload": async_upload, "bucket_no_upscale": bucket_no_upscale, "bucket_reso_steps": bucket_reso_steps, "cache_latents": cache_latents, @@ -874,11 +889,13 @@ def train_model( # Run the command executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env) + + train_state_value = time.time() return ( - gr.Button(visible=False), + gr.Button(visible=False or headless), gr.Button(visible=True), - gr.Textbox(value=time.time()), + gr.Textbox(value=train_state_value), ) @@ -1052,21 +1069,15 @@ def list_embedding_files(path): with gr.Accordion("HuggingFace", open=False): huggingface = HuggingFace(config=config) - with gr.Column(), gr.Group(): - with gr.Row(): - button_run = gr.Button("Start training", variant="primary") - - button_stop_training = gr.Button( - "Stop training", visible=False, variant="stop" - ) - + global executor + executor = CommandExecutor(headless=headless) + with gr.Column(), gr.Group(): with gr.Row(): button_print = gr.Button("Print training command") # Setup gradio tensorboard buttons - with gr.Column(), gr.Group(): - TensorboardManager(headless=headless, logging_dir=folders.logging_dir) + TensorboardManager(headless=headless, logging_dir=folders.logging_dir) settings_list = [ source_model.pretrained_model_name_or_path, @@ -1211,30 +1222,23 @@ def list_embedding_files(path): outputs=[configuration.config_file_name], show_progress=False, ) - - # config.button_save_as_config.click( - # save_configuration, - # inputs=[dummy_db_true, config.config_file_name] + settings_list, - # outputs=[config.config_file_name], - # show_progress=False, - # ) - - # Hidden textbox used to run the wait_for_training_to_end function to hide stop and show start at the end of the training - run_state = gr.Textbox(value="", visible=False) + + run_state = gr.Textbox(value=train_state_value, visible=False) + run_state.change( fn=executor.wait_for_training_to_end, - outputs=[button_run, button_stop_training], + outputs=[executor.button_run, executor.button_stop_training], ) - button_run.click( + executor.button_run.click( train_model, inputs=[dummy_headless] + [dummy_db_false] + settings_list, - outputs=[button_run, button_stop_training, run_state], + outputs=[executor.button_run, executor.button_stop_training, run_state], show_progress=False, ) - button_stop_training.click( - executor.kill_command, outputs=[button_run, button_stop_training] + executor.button_stop_training.click( + executor.kill_command, outputs=[executor.button_run, executor.button_stop_training] ) button_print.click( diff --git a/presets/finetune/SDXL - AI_Now PagedAdamW8bit v1.0.json b/presets/finetune/SDXL - AI_Now PagedAdamW8bit v1.0.json index 0821e5829..7cb08bdd9 100644 --- a/presets/finetune/SDXL - AI_Now PagedAdamW8bit v1.0.json +++ b/presets/finetune/SDXL - AI_Now PagedAdamW8bit v1.0.json @@ -61,7 +61,7 @@ "train_batch_size": 2, "train_text_encoder": false, "use_latent_files": "No", - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/finetune/SDXL - Essenz series by AI_Characters_Training v1.0.json b/presets/finetune/SDXL - Essenz series by AI_Characters_Training v1.0.json index 89cec3866..7779af2e3 100644 --- a/presets/finetune/SDXL - Essenz series by AI_Characters_Training v1.0.json +++ b/presets/finetune/SDXL - Essenz series by AI_Characters_Training v1.0.json @@ -77,7 +77,7 @@ "train_dir": "/kohya_ss/output/SDXL1.0_Essenz-series-by-AI_Characters_Concept_Morphing-v1.0", "train_text_encoder": true, "use_latent_files": "Yes", - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - 1 image LoRA v1.0.json b/presets/lora/SDXL - 1 image LoRA v1.0.json index 3f686aaaa..a9ad0c4ae 100644 --- a/presets/lora/SDXL - 1 image LoRA v1.0.json +++ b/presets/lora/SDXL - 1 image LoRA v1.0.json @@ -100,7 +100,7 @@ "use_cp": false, "use_scalar": false, "use_tucker": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - LoHA AI_Characters v1.0.json b/presets/lora/SDXL - LoHA AI_Characters v1.0.json index c2c317a89..7d9012317 100644 --- a/presets/lora/SDXL - LoHA AI_Characters v1.0.json +++ b/presets/lora/SDXL - LoHA AI_Characters v1.0.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoKR v1.0.json b/presets/lora/SDXL - LoKR v1.0.json index 20af86d12..2d476fa25 100644 --- a/presets/lora/SDXL - LoKR v1.0.json +++ b/presets/lora/SDXL - LoKR v1.0.json @@ -82,7 +82,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": true, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA AI_Now ADamW v1.0.json b/presets/lora/SDXL - LoRA AI_Now ADamW v1.0.json index da25fba46..e45535e7f 100644 --- a/presets/lora/SDXL - LoRA AI_Now ADamW v1.0.json +++ b/presets/lora/SDXL - LoRA AI_Now ADamW v1.0.json @@ -87,7 +87,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - LoRA AI_Now prodigy v1.0.json b/presets/lora/SDXL - LoRA AI_Now prodigy v1.0.json index b876ffd2e..1827b713a 100644 --- a/presets/lora/SDXL - LoRA AI_Now prodigy v1.0.json +++ b/presets/lora/SDXL - LoRA AI_Now prodigy v1.0.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA AI_characters standard v1.0.json b/presets/lora/SDXL - LoRA AI_characters standard v1.0.json index 1a8555a73..c37a47fc5 100644 --- a/presets/lora/SDXL - LoRA AI_characters standard v1.0.json +++ b/presets/lora/SDXL - LoRA AI_characters standard v1.0.json @@ -87,7 +87,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - LoRA AI_characters standard v1.1.json b/presets/lora/SDXL - LoRA AI_characters standard v1.1.json index c163df4a5..aa0e0ba28 100644 --- a/presets/lora/SDXL - LoRA AI_characters standard v1.1.json +++ b/presets/lora/SDXL - LoRA AI_characters standard v1.1.json @@ -87,7 +87,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - LoRA adafactor v1.0.json b/presets/lora/SDXL - LoRA adafactor v1.0.json index 17a7fffcf..cf195cc05 100644 --- a/presets/lora/SDXL - LoRA adafactor v1.0.json +++ b/presets/lora/SDXL - LoRA adafactor v1.0.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA aitrepreneur clothing v1.0.json b/presets/lora/SDXL - LoRA aitrepreneur clothing v1.0.json index 48d049fd1..f3608af33 100644 --- a/presets/lora/SDXL - LoRA aitrepreneur clothing v1.0.json +++ b/presets/lora/SDXL - LoRA aitrepreneur clothing v1.0.json @@ -87,7 +87,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - LoRA by malcolmrey training v1.0.json b/presets/lora/SDXL - LoRA by malcolmrey training v1.0.json index 80c3c659b..e61950aa7 100644 --- a/presets/lora/SDXL - LoRA by malcolmrey training v1.0.json +++ b/presets/lora/SDXL - LoRA by malcolmrey training v1.0.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA face dogu_cat v1.0.json b/presets/lora/SDXL - LoRA face dogu_cat v1.0.json index 25ad855cb..80a863074 100644 --- a/presets/lora/SDXL - LoRA face dogu_cat v1.0.json +++ b/presets/lora/SDXL - LoRA face dogu_cat v1.0.json @@ -87,7 +87,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/SDXL - LoRA finetuning phase 1_v1.1.json b/presets/lora/SDXL - LoRA finetuning phase 1_v1.1.json index fcf2ac564..b9e439cb6 100644 --- a/presets/lora/SDXL - LoRA finetuning phase 1_v1.1.json +++ b/presets/lora/SDXL - LoRA finetuning phase 1_v1.1.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": true, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA finetuning phase 2_v1.1.json b/presets/lora/SDXL - LoRA finetuning phase 2_v1.1.json index 94ddaf68e..7edf86ce3 100644 --- a/presets/lora/SDXL - LoRA finetuning phase 2_v1.1.json +++ b/presets/lora/SDXL - LoRA finetuning phase 2_v1.1.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.0.json b/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.0.json index 3e5af9f62..3c82be343 100644 --- a/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.0.json +++ b/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.0.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": true, + "log_with": "wandb", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.1.json b/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.1.json index 5cb0296bc..c38137866 100644 --- a/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.1.json +++ b/presets/lora/SDXL - LoRA kudou-reira dadaptadam v1.1.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": true, + "log_with": "wandb", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - LoRA kudou-reira prodigy v4.0.json b/presets/lora/SDXL - LoRA kudou-reira prodigy v4.0.json index 755743728..8efb85fbf 100644 --- a/presets/lora/SDXL - LoRA kudou-reira prodigy v4.0.json +++ b/presets/lora/SDXL - LoRA kudou-reira prodigy v4.0.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": true, + "log_with": "wandb", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - edgLoRAXL AI_Now.json b/presets/lora/SDXL - edgLoRAXL AI_Now.json index a3ea4e6e2..d10eb38d3 100644 --- a/presets/lora/SDXL - edgLoRAXL AI_Now.json +++ b/presets/lora/SDXL - edgLoRAXL AI_Now.json @@ -86,7 +86,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": true, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/SDXL - edgLoRAXL.json b/presets/lora/SDXL - edgLoRAXL.json index fb5ae337c..e0e739c80 100644 --- a/presets/lora/SDXL - edgLoRAXL.json +++ b/presets/lora/SDXL - edgLoRAXL.json @@ -85,7 +85,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": true, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/ia3-sd15.json b/presets/lora/ia3-sd15.json index e2febebe5..f85abe898 100644 --- a/presets/lora/ia3-sd15.json +++ b/presets/lora/ia3-sd15.json @@ -77,7 +77,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/locon-dadaptation-sdxl.json b/presets/lora/locon-dadaptation-sdxl.json index 5340cabd5..5cbb9c663 100644 --- a/presets/lora/locon-dadaptation-sdxl.json +++ b/presets/lora/locon-dadaptation-sdxl.json @@ -78,7 +78,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/loha-sd15.json b/presets/lora/loha-sd15.json index 48b19dc0b..44588580e 100644 --- a/presets/lora/loha-sd15.json +++ b/presets/lora/loha-sd15.json @@ -77,7 +77,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": true, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/lokr-sd15.json b/presets/lora/lokr-sd15.json index 78750c043..9ba0d5db8 100644 --- a/presets/lora/lokr-sd15.json +++ b/presets/lora/lokr-sd15.json @@ -75,7 +75,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "vae_batch_size": 0, diff --git a/presets/lora/sd15 - GLoRA v1.0.json b/presets/lora/sd15 - GLoRA v1.0.json index bf63a4fb8..79626b046 100644 --- a/presets/lora/sd15 - GLoRA v1.0.json +++ b/presets/lora/sd15 - GLoRA v1.0.json @@ -96,7 +96,7 @@ "use_cp": true, "use_scalar": false, "use_tucker": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/sd15 - LoKR v1.0.json b/presets/lora/sd15 - LoKR v1.0.json index ed4a272f8..0cf755936 100644 --- a/presets/lora/sd15 - LoKR v1.0.json +++ b/presets/lora/sd15 - LoKR v1.0.json @@ -89,7 +89,7 @@ "unit": 1, "up_lr_weight": "", "use_cp": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/sd15 - LoKr v1.1.json b/presets/lora/sd15 - LoKr v1.1.json index bf63a4fb8..79626b046 100644 --- a/presets/lora/sd15 - LoKr v1.1.json +++ b/presets/lora/sd15 - LoKr v1.1.json @@ -96,7 +96,7 @@ "use_cp": true, "use_scalar": false, "use_tucker": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, diff --git a/presets/lora/sd15 - LoKr v2.0.json b/presets/lora/sd15 - LoKr v2.0.json index e65637dab..82a5e82a3 100644 --- a/presets/lora/sd15 - LoKr v2.0.json +++ b/presets/lora/sd15 - LoKr v2.0.json @@ -96,7 +96,7 @@ "use_cp": false, "use_scalar": false, "use_tucker": false, - "use_wandb": false, + "log_with": "", "v2": false, "v_parameterization": false, "v_pred_like_loss": 0,