Skip to content

Commit

Permalink
feat(config) Update default implementations of configurations
Browse files Browse the repository at this point in the history
  • Loading branch information
juanwulu committed Oct 3, 2024
1 parent d5044b4 commit d4159dd
Show file tree
Hide file tree
Showing 47 changed files with 243 additions and 656 deletions.
1 change: 0 additions & 1 deletion configs/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@
# this file is needed here to include configs when building project as a package
25 changes: 10 additions & 15 deletions configs/callbacks/default.yaml
Original file line number Diff line number Diff line change
@@ -1,22 +1,17 @@
defaults:
- model_checkpoint
- early_stopping
- model_summary
- rich_progress_bar
- model_checkpoint # checkpointing
- rich_model_summary # model summary
- tqdm_progress_bar # progress bar
- _self_

# Overrides the default configurations
model_checkpoint:
dirpath: ${paths.output_dir}/checkpoints
filename: "epoch_{epoch:03d}"
monitor: "val/acc"
mode: "max"
save_last: True
auto_insert_metric_name: False

early_stopping:
monitor: "val/acc"
patience: 100
mode: "max"
filename: "epoch_{epoch:03d}_${oc.env:PROJECT_GIT_SHORT_SHA}"
monitor: "val/loss"
mode: "min"
save_last: true
auto_insert_metric_name: false

model_summary:
max_depth: -1
max_depth: 2
7 changes: 4 additions & 3 deletions configs/callbacks/early_stopping.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.EarlyStopping.html
# See https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.EarlyStopping.html#lightning.pytorch.callbacks.EarlyStopping

early_stopping:
_target_: lightning.pytorch.callbacks.EarlyStopping
_convert_: "all"
monitor: ??? # quantity to be monitored, must be specified !!!
min_delta: 0. # minimum change in the monitored quantity to qualify as an improvement
min_delta: 0.0 # minimum change in the monitored quantity to qualify as an improvement
patience: 3 # number of checks with no improvement after which training will be stopped
verbose: False # verbosity mode
mode: "min" # "max" means higher metric value is better, can be also "min"
Expand All @@ -12,4 +13,4 @@ early_stopping:
stopping_threshold: null # stop training immediately once the monitored quantity reaches this threshold
divergence_threshold: null # stop training as soon as the monitored quantity becomes worse than this threshold
check_on_train_epoch_end: null # whether to run early stopping at the end of the training epoch
# log_rank_zero_only: False # this keyword argument isn't available in stable version
log_rank_zero_only: False # this keyword argument isn't available in stable version
3 changes: 2 additions & 1 deletion configs/callbacks/model_checkpoint.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html
# See https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html#lightning.pytorch.callbacks.ModelCheckpoint

model_checkpoint:
_target_: lightning.pytorch.callbacks.ModelCheckpoint
_convert_: "all"
dirpath: null # directory to save the model file
filename: null # checkpoint filename
monitor: null # name of the logged metric which determines when model is improving
Expand Down
7 changes: 4 additions & 3 deletions configs/callbacks/model_summary.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html
# See https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelSummary.html

model_summary:
_target_: lightning.pytorch.callbacks.RichModelSummary
max_depth: 1 # the maximum depth of layer nesting that the summary will include
_target_: lightning.pytorch.callbacks.ModelSummary
_convert_: "all"
max_depth: 1
6 changes: 6 additions & 0 deletions configs/callbacks/rich_model_summary.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# See https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html?highlight=richmodelsummary#lightning.pytorch.callbacks.RichModelSummary

model_summary:
_target_: lightning.pytorch.callbacks.RichModelSummary
_convert_: "all"
max_depth: 1
18 changes: 17 additions & 1 deletion configs/callbacks/rich_progress_bar.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,20 @@
# https://lightning.ai/docs/pytorch/latest/api/lightning.pytorch.callbacks.RichProgressBar.html
# See https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichProgressBar.html?highlight=progressbar#lightning.pytorch.callbacks.RichProgressBar

rich_progress_bar:
_target_: lightning.pytorch.callbacks.RichProgressBar
_convert_: "all"
refresh_rate: 1 # int: Determines at which rate (in number of batches) the progress bars get updated. Set it to 0 to disable the display.
leave: false # bool: Leaves the finished progress bar in the terminal at the end of the epoch. Default: False
theme: # RichProgressBarTheme: Contains styles used to stylize the progress bar.
# See https://lightning.ai/docs/pytorch/stable/_modules/lightning/pytorch/callbacks/progress/rich_progress.html#RichProgressBarTheme
_target_: lightning.pytorch.callbacks.progress.rich_progress.RichProgressBarTheme
_convert_: "all"
description: "Steps" # str: Description of the progress bar.
progress_bar: "#CFB991" # str: Style for the bar in progress.
progress_bar_finished: "#A0C544" # str: Style for the finished progress bar.
progress_bar_pulse: "#555960" # str: Style for the progress bar when `IterableDataset` is being processed.
batch_progress: "white" # str: Style for the progress tracker (i.e 10/50 batches completed).
time: "grey54" # str: Style for the processed time and estimate time remaining
processing_speed: "grey70" # str: Style for the speed of the batches being processed
metrics: "white" # str: Style for the metrics
console_kwargs: null # Optional[Dict[str, Any]]: Args for constructing a Console
20 changes: 20 additions & 0 deletions configs/callbacks/tqdm_progress_bar.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# See https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichProgressBar.html?highlight=progressbar#lightning.pytorch.callbacks.RichProgressBar

rich_progress_bar:
_target_: lightning.pytorch.callbacks.RichProgressBar
_convert_: "all"
refresh_rate: 1 # int: Determines at which rate (in number of batches) the progress bars get updated. Set it to 0 to disable the display.
leave: false # bool: Leaves the finished progress bar in the terminal at the end of the epoch. Default: False
theme: # RichProgressBarTheme: Contains styles used to stylize the progress bar.
# See https://lightning.ai/docs/pytorch/stable/_modules/lightning/pytorch/callbacks/progress/rich_progress.html#RichProgressBarTheme
_target_: lightning.pytorch.callbacks.progress.rich_progress.RichProgressBarTheme
_convert_: "all"
description: "Steps" # str: Description of the progress bar.
progress_bar: "#CFB991" # str: Style for the bar in progress.
progress_bar_finished: "#A0C544" # str: Style for the finished progress bar.
progress_bar_pulse: "#555960" # str: Style for the progress bar when `IterableDataset` is being processed.
batch_progress: "white" # str: Style for the progress tracker (i.e 10/50 batches completed).
time: "grey54" # str: Style for the processed time and estimate time remaining
processing_speed: "grey70" # str: Style for the speed of the batches being processed
metrics: "white" # str: Style for the metrics
console_kwargs: null # Optional[Dict[str, Any]]: Args for constructing a Console
6 changes: 0 additions & 6 deletions configs/data/mnist.yaml

This file was deleted.

File renamed without changes.
35 changes: 0 additions & 35 deletions configs/debug/default.yaml

This file was deleted.

9 changes: 0 additions & 9 deletions configs/debug/fdr.yaml

This file was deleted.

12 changes: 0 additions & 12 deletions configs/debug/limit.yaml

This file was deleted.

13 changes: 0 additions & 13 deletions configs/debug/overfit.yaml

This file was deleted.

12 changes: 0 additions & 12 deletions configs/debug/profiler.yaml

This file was deleted.

30 changes: 23 additions & 7 deletions configs/eval.yaml
Original file line number Diff line number Diff line change
@@ -1,18 +1,34 @@
# @package _global_

defaults:
- _self_
- data: mnist # choose datamodule with `test_dataloader()` for evaluation
- model: mnist
- logger: null
- dataset: null
- model: null
- callbacks: default
- logger: tensorboard
- trainer: default
- paths: default
- extras: default
- hydra: default
# Experiment specific settings
- experiment: null

# Enforce clean git repository
clean_repo: false

# Job name
task_name: "eval"

tags: ["dev"]
# Experiment tags
tags: "dev"

# If true, train the model from scratch
train: false

# If to compile the model for acceleration
compile: false

# Specify the previous checkpoint to resume training
checkpoint: null

# passing checkpoint path is necessary for evaluation
ckpt_path: ???
# Global seed
seed: 42
File renamed without changes.
41 changes: 0 additions & 41 deletions configs/experiment/example.yaml

This file was deleted.

12 changes: 6 additions & 6 deletions configs/extras/default.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# disable python warnings if they annoy you
ignore_warnings: False
# disable Python warnings
ignore_warnings: true

# ask user for tags if none are provided in the config
enforce_tags: True
# If true, enforce tags for experiment tracking
enforce_tags: true

# pretty print config tree at the start of the run using Rich library
print_config: True
# pretty print the configuration tree before training
print_config: true
52 changes: 0 additions & 52 deletions configs/hparams_search/mnist_optuna.yaml

This file was deleted.

19 changes: 6 additions & 13 deletions configs/hydra/default.yaml
Original file line number Diff line number Diff line change
@@ -1,19 +1,12 @@
# https://hydra.cc/docs/configure_hydra/intro/

# enable color logging
defaults:
# enable color logging
- override hydra_logging: colorlog
- override job_logging: colorlog

# output directory, generated dynamically on each run
# Output directory
run:
dir: ${paths.log_dir}/${task_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
# directory for a single run
dir: ${paths.log_dir}/${task_name}/output/${join_string_underscore:${tags}}/${now:%Y-%m-%d_%H-%M-%S}
sweep:
dir: ${paths.log_dir}/${task_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
subdir: ${hydra.job.num}

job_logging:
handlers:
file:
# Incorporates fix from https://github.com/facebookresearch/hydra/pull/2242
filename: ${hydra.runtime.output_dir}/${task_name}.log
# directory for a queue of sub-runs
dir: ${paths.log_dir}/${task_name}/output/multiruns/${join_string_underscore:${tags}}/${now:%Y-%m-%d_%H-%M-%S}
Loading

0 comments on commit d4159dd

Please sign in to comment.