From 25d7c6889f759903185174e327e7c47e074235e7 Mon Sep 17 00:00:00 2001 From: Maatra <40346852+ccharest93@users.noreply.github.com> Date: Fri, 19 Apr 2024 18:14:15 +0100 Subject: [PATCH] Changed logger checkbox to dropdown, renamed use_wandb -> log_with (#2352) --- kohya_gui/class_advanced_training.py | 9 +++++---- kohya_gui/dreambooth_gui.py | 10 +++++----- kohya_gui/finetune_gui.py | 10 +++++----- kohya_gui/lora_gui.py | 10 +++++----- kohya_gui/textual_inversion_gui.py | 10 +++++----- 5 files changed, 25 insertions(+), 24 deletions(-) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 67b4c79e2..c9784c304 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -510,10 +510,11 @@ def list_state_dirs(path): value=self.config.get("advanced.max_data_loader_n_workers", 0), ) with gr.Row(): - self.use_wandb = gr.Checkbox( - label="WANDB Logging", - value=self.config.get("advanced.use_wandb", False), - info="If unchecked, tensorboard will be used as the default for logging.", + self.log_with = gr.Dropdown( + label="Logging", + choices=["","wandb", "tensorboard","all"], + value="", + info="Loggers to use, tensorboard will be used as the default.", ) self.wandb_api_key = gr.Textbox( label="WANDB API Key", diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 5433c42ba..bcbe7e59c 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -159,7 +159,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -317,7 +317,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -470,7 +470,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -783,7 +783,7 @@ def train_model( ), "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -1056,7 +1056,7 @@ def dreambooth_tab( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 9e8906f38..4c7de548e 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -169,7 +169,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -335,7 +335,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -507,7 +507,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -856,7 +856,7 @@ def train_model( "train_batch_size": train_batch_size, "train_data_dir": image_folder, "train_text_encoder": train_text_encoder, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -1209,7 +1209,7 @@ def list_presets(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 1ca4cad96..059705404 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -195,7 +195,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -400,7 +400,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -635,7 +635,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -1152,7 +1152,7 @@ def train_model( "train_data_dir": train_data_dir, "training_comment": training_comment, "unet_lr": unet_lr if not 0 else None, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -2223,7 +2223,7 @@ def update_LoRA_settings( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 41b89c64f..4e7b6a45a 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -160,7 +160,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -319,7 +319,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -471,7 +471,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -800,7 +800,7 @@ def train_model( "token_string": token_string, "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -1167,7 +1167,7 @@ def list_embedding_files(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name,