From 10295d2cda531681fd88e7641064f5ec34f27ad0 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Fri, 19 Jan 2024 11:27:09 +0000 Subject: [PATCH 01/18] build: Update lightning version --- poetry.lock | 40 ++++++++++++++++++---------------------- pyproject.toml | 2 +- 2 files changed, 19 insertions(+), 23 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8b32e9b1..2709009b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -4074,9 +4074,9 @@ files = [ [package.dependencies] numpy = [ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, - {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, + {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, ] [[package]] @@ -4098,9 +4098,9 @@ files = [ [package.dependencies] numpy = [ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, - {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, + {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, ] [[package]] @@ -5155,38 +5155,34 @@ dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatc [[package]] name = "pytorch-lightning" -version = "1.9.5" +version = "2.1.3" description = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytorch-lightning-1.9.5.tar.gz", hash = "sha256:925fe7b80ddf04859fa385aa493b260be4000b11a2f22447afb4a932d1f07d26"}, - {file = "pytorch_lightning-1.9.5-py3-none-any.whl", hash = "sha256:06821558158623c5d2ecf5d3d0374dc8bd661e0acd3acf54a6d6f71737c156c5"}, + {file = "pytorch-lightning-2.1.3.tar.gz", hash = "sha256:2500b002fa09cb37b0e12f879876bf30a2d260b0f04783d33264dab175f0c966"}, + {file = "pytorch_lightning-2.1.3-py3-none-any.whl", hash = "sha256:03ed186035a230b161130e0d8ecf1dd6657ff7e3f1520e9257b0db7650f9aeea"}, ] [package.dependencies] -fsspec = {version = ">2021.06.0", extras = ["http"]} -lightning-utilities = ">=0.6.0.post0" +fsspec = {version = ">=2022.5.0", extras = ["http"]} +lightning-utilities = ">=0.8.0" numpy = ">=1.17.2" -packaging = ">=17.1" +packaging = ">=20.0" PyYAML = ">=5.4" -torch = ">=1.10.0" +torch = ">=1.12.0" torchmetrics = ">=0.7.0" tqdm = ">=4.57.0" typing-extensions = ">=4.0.0" [package.extras] -all = ["colossalai (>=0.2.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.11.1)"] -colossalai = ["colossalai (>=0.2.0)"] -deepspeed = ["deepspeed (>=0.6.0)"] -dev = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "colossalai (>=0.2.0)", "coverage (==6.5.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "fastapi (<0.87.0)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnx (<1.14.0)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "rich (>=10.14.0,!=10.15.0.a)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.11.1)", "uvicorn (<0.19.1)"] -examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.7.1)", "torchmetrics (>=0.10.0)", "torchvision (>=0.11.1)"] -extra = ["hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)"] -fairscale = ["fairscale (>=0.4.5)"] -hivemind = ["hivemind (==1.1.5)"] -horovod = ["horovod (>=0.21.2,!=0.24.0)"] -strategies = ["colossalai (>=0.2.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)"] -test = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "coverage (==6.5.0)", "fastapi (<0.87.0)", "onnx (<1.14.0)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn (<0.19.1)"] +all = ["bitsandbytes (<=0.41.1)", "deepspeed (>=0.8.2,<=0.9.3)", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.26.1)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] +deepspeed = ["deepspeed (>=0.8.2,<=0.9.3)"] +dev = ["bitsandbytes (<=0.41.1)", "cloudpickle (>=1.3)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "fastapi", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.26.1)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "rich (>=12.3.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)", "uvicorn"] +examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.15.0)", "lightning-utilities (>=0.8.0)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] +extra = ["bitsandbytes (<=0.41.1)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.26.1)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)"] +strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] +test = ["cloudpickle (>=1.3)", "coverage (==7.3.1)", "fastapi", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn"] [[package]] name = "pytz" @@ -7610,4 +7606,4 @@ test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_moc [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.11" -content-hash = "54c2bce1c42e7f3a9b3f80b326b9d216099a30211f2828367aeba31f558abac5" +content-hash = "b38bf7787c99e6291ad7a700db8d7308176a181fac99959b00269303f2131c32" diff --git a/pyproject.toml b/pyproject.toml index 2a401702..13e2c516 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ hydra-plugins = { path = "quadra_hydra_plugin", optional = true } torch = { version = "~2.1", source = "torch_cu121" } torchvision = { version = "~0.16", source = "torch_cu121" } -pytorch_lightning = "1.9.5" +pytorch_lightning = "~2.1" torchsummary = "~1.5" torchmetrics = "~0.10" hydra_core = "~1.3" From eeaa31dac62eb31135f566e22d64ad29167fff14 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Fri, 19 Jan 2024 14:01:56 +0000 Subject: [PATCH 02/18] wip: Change incorrect code after lightning upgrade --- docs/tutorials/devices_setup.md | 1 - quadra/configs/experiment/base/anomaly/cfa.yaml | 4 +--- quadra/configs/experiment/base/anomaly/cflow.yaml | 4 +--- quadra/configs/experiment/base/anomaly/csflow.yaml | 4 +--- quadra/configs/experiment/base/anomaly/draem.yaml | 4 +--- quadra/configs/experiment/base/anomaly/efficient_ad.yaml | 8 +------- quadra/configs/experiment/base/anomaly/fastflow.yaml | 4 +--- quadra/configs/experiment/base/anomaly/padim.yaml | 8 +------- quadra/configs/experiment/base/anomaly/patchcore.yaml | 8 +------- quadra/configs/experiment/base/ssl/linear_eval.yaml | 1 - quadra/configs/model/anomalib/cfa.yaml | 4 +--- quadra/configs/model/anomalib/csflow.yaml | 8 +------- quadra/configs/trainer/lightning_cpu.yaml | 1 - quadra/configs/trainer/lightning_gpu.yaml | 1 - quadra/configs/trainer/lightning_gpu_bf16.yaml | 1 - quadra/configs/trainer/lightning_gpu_fp16.yaml | 1 - quadra/configs/trainer/lightning_multigpu.yaml | 1 - quadra/modules/ssl/hyperspherical.py | 2 +- quadra/tasks/base.py | 4 ++-- quadra/utils/utils.py | 4 ++-- 20 files changed, 15 insertions(+), 58 deletions(-) diff --git a/docs/tutorials/devices_setup.md b/docs/tutorials/devices_setup.md index 3a8eea5e..05786932 100644 --- a/docs/tutorials/devices_setup.md +++ b/docs/tutorials/devices_setup.md @@ -12,7 +12,6 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 ``` diff --git a/quadra/configs/experiment/base/anomaly/cfa.yaml b/quadra/configs/experiment/base/anomaly/cfa.yaml index 9d0176d0..1985892a 100644 --- a/quadra/configs/experiment/base/anomaly/cfa.yaml +++ b/quadra/configs/experiment/base/anomaly/cfa.yaml @@ -51,7 +51,6 @@ trainer: num_nodes: 1 enable_progress_bar: true overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false accumulate_grad_batches: 1 @@ -76,9 +75,8 @@ trainer: deterministic: false reload_dataloaders_every_n_epochs: 0 auto_lr_find: false - replace_sampler_ddp: true + use_distributed_sampler: true detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/cflow.yaml b/quadra/configs/experiment/base/anomaly/cflow.yaml index 67df900d..df3f29d2 100644 --- a/quadra/configs/experiment/base/anomaly/cflow.yaml +++ b/quadra/configs/experiment/base/anomaly/cflow.yaml @@ -50,7 +50,6 @@ trainer: num_nodes: 1 enable_progress_bar: true overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false accumulate_grad_batches: 1 @@ -75,9 +74,8 @@ trainer: deterministic: false reload_dataloaders_every_n_epochs: 0 auto_lr_find: false - replace_sampler_ddp: true + use_distributed_sampler: true detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/csflow.yaml b/quadra/configs/experiment/base/anomaly/csflow.yaml index faa6f256..c545c737 100644 --- a/quadra/configs/experiment/base/anomaly/csflow.yaml +++ b/quadra/configs/experiment/base/anomaly/csflow.yaml @@ -49,7 +49,6 @@ trainer: num_nodes: 1 enable_progress_bar: true overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false accumulate_grad_batches: 1 @@ -74,9 +73,8 @@ trainer: deterministic: false reload_dataloaders_every_n_epochs: 0 auto_lr_find: false - replace_sampler_ddp: true + use_distributed_sampler: true detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/draem.yaml b/quadra/configs/experiment/base/anomaly/draem.yaml index c696c621..656f7a7e 100644 --- a/quadra/configs/experiment/base/anomaly/draem.yaml +++ b/quadra/configs/experiment/base/anomaly/draem.yaml @@ -52,7 +52,6 @@ trainer: num_nodes: 1 enable_progress_bar: true overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false accumulate_grad_batches: 1 @@ -77,9 +76,8 @@ trainer: deterministic: false reload_dataloaders_every_n_epochs: 0 auto_lr_find: false - replace_sampler_ddp: true + use_distributed_sampler: true detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml index 4d1fd57a..ecf70657 100644 --- a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml +++ b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml @@ -36,10 +36,8 @@ trainer: accelerator: auto strategy: accumulate_grad_batches: 1 - amp_backend: native auto_lr_find: false auto_scale_batch_size: false - auto_select_gpus: false benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} default_root_dir: null @@ -50,7 +48,6 @@ trainer: enable_progress_bar: true fast_dev_run: false gradient_clip_val: 0 - ipus: null limit_predict_batches: 1.0 limit_test_batches: 1.0 limit_train_batches: 1.0 @@ -61,7 +58,6 @@ trainer: max_time: null min_epochs: null min_steps: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 @@ -69,8 +65,6 @@ trainer: plugins: null precision: 32 profiler: null - replace_sampler_ddp: true + use_distributed_sampler: true sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/experiment/base/anomaly/fastflow.yaml b/quadra/configs/experiment/base/anomaly/fastflow.yaml index 61eedf64..4af44911 100644 --- a/quadra/configs/experiment/base/anomaly/fastflow.yaml +++ b/quadra/configs/experiment/base/anomaly/fastflow.yaml @@ -48,7 +48,6 @@ trainer: num_nodes: 1 enable_progress_bar: true overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false accumulate_grad_batches: 1 @@ -73,9 +72,8 @@ trainer: deterministic: false reload_dataloaders_every_n_epochs: 0 auto_lr_find: false - replace_sampler_ddp: true + use_distributed_sampler: true detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/padim.yaml b/quadra/configs/experiment/base/anomaly/padim.yaml index 1b05f7a2..32ee3c8b 100644 --- a/quadra/configs/experiment/base/anomaly/padim.yaml +++ b/quadra/configs/experiment/base/anomaly/padim.yaml @@ -36,10 +36,8 @@ trainer: accelerator: auto strategy: accumulate_grad_batches: 1 - amp_backend: native auto_lr_find: false auto_scale_batch_size: false - auto_select_gpus: false benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. default_root_dir: null @@ -50,7 +48,6 @@ trainer: enable_progress_bar: true fast_dev_run: false gradient_clip_val: 0 - ipus: null limit_predict_batches: 1.0 limit_test_batches: 1.0 limit_train_batches: 1.0 @@ -61,7 +58,6 @@ trainer: max_time: null min_epochs: null min_steps: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 @@ -69,8 +65,6 @@ trainer: plugins: null precision: 32 profiler: null - replace_sampler_ddp: true + use_distributed_sampler: true sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/experiment/base/anomaly/patchcore.yaml b/quadra/configs/experiment/base/anomaly/patchcore.yaml index f71ec1e2..ea719c26 100644 --- a/quadra/configs/experiment/base/anomaly/patchcore.yaml +++ b/quadra/configs/experiment/base/anomaly/patchcore.yaml @@ -36,10 +36,8 @@ trainer: accelerator: auto strategy: accumulate_grad_batches: 1 - amp_backend: native auto_lr_find: false auto_scale_batch_size: false - auto_select_gpus: false benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. default_root_dir: null @@ -50,7 +48,6 @@ trainer: enable_progress_bar: true fast_dev_run: false gradient_clip_val: 0 - ipus: null limit_predict_batches: 1.0 limit_test_batches: 1.0 limit_train_batches: 1.0 @@ -61,7 +58,6 @@ trainer: max_time: null min_epochs: null min_steps: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 @@ -69,8 +65,6 @@ trainer: plugins: null precision: 32 profiler: null - replace_sampler_ddp: true + use_distributed_sampler: true sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/experiment/base/ssl/linear_eval.yaml b/quadra/configs/experiment/base/ssl/linear_eval.yaml index f8ac93a8..50ab0f07 100644 --- a/quadra/configs/experiment/base/ssl/linear_eval.yaml +++ b/quadra/configs/experiment/base/ssl/linear_eval.yaml @@ -18,7 +18,6 @@ core: backbone: model: num_classes: 2 - ckpt_path: ??? trainer: devices: [2, 3] diff --git a/quadra/configs/model/anomalib/cfa.yaml b/quadra/configs/model/anomalib/cfa.yaml index bed23d7f..866c2c26 100644 --- a/quadra/configs/model/anomalib/cfa.yaml +++ b/quadra/configs/model/anomalib/cfa.yaml @@ -35,7 +35,6 @@ trainer: devices: [0] enable_progress_bar: true overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false accumulate_grad_batches: 1 @@ -61,9 +60,8 @@ trainer: deterministic: false reload_dataloaders_every_n_epochs: 0 auto_lr_find: false - replace_sampler_ddp: true + use_distributed_sampler: true detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/model/anomalib/csflow.yaml b/quadra/configs/model/anomalib/csflow.yaml index 85c78f82..907dbdfd 100644 --- a/quadra/configs/model/anomalib/csflow.yaml +++ b/quadra/configs/model/anomalib/csflow.yaml @@ -37,10 +37,8 @@ metrics: trainer: accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> accumulate_grad_batches: 1 - amp_backend: native auto_lr_find: false auto_scale_batch_size: false - auto_select_gpus: false benchmark: false check_val_every_n_epoch: 1 default_root_dir: null @@ -52,7 +50,6 @@ trainer: enable_progress_bar: true fast_dev_run: false gradient_clip_val: 1 # Grad clip value set based on the official implementation - ipus: null limit_predict_batches: 1.0 limit_test_batches: 1.0 limit_train_batches: 1.0 @@ -63,7 +60,6 @@ trainer: max_time: null min_epochs: null min_steps: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 @@ -72,9 +68,7 @@ trainer: precision: 32 profiler: null reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true + use_distributed_sampler: true strategy: null sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 val_check_interval: 1.0 diff --git a/quadra/configs/trainer/lightning_cpu.yaml b/quadra/configs/trainer/lightning_cpu.yaml index 3236010b..d2673b42 100644 --- a/quadra/configs/trainer/lightning_cpu.yaml +++ b/quadra/configs/trainer/lightning_cpu.yaml @@ -4,6 +4,5 @@ accelerator: cpu min_epochs: 1 max_epochs: 10 strategy: null -resume_from_checkpoint: null log_every_n_steps: 10 precision: 32 diff --git a/quadra/configs/trainer/lightning_gpu.yaml b/quadra/configs/trainer/lightning_gpu.yaml index 66e52054..1ef20b47 100644 --- a/quadra/configs/trainer/lightning_gpu.yaml +++ b/quadra/configs/trainer/lightning_gpu.yaml @@ -3,5 +3,4 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 diff --git a/quadra/configs/trainer/lightning_gpu_bf16.yaml b/quadra/configs/trainer/lightning_gpu_bf16.yaml index 868770e7..7d851fc4 100644 --- a/quadra/configs/trainer/lightning_gpu_bf16.yaml +++ b/quadra/configs/trainer/lightning_gpu_bf16.yaml @@ -3,6 +3,5 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 precision: bf16 diff --git a/quadra/configs/trainer/lightning_gpu_fp16.yaml b/quadra/configs/trainer/lightning_gpu_fp16.yaml index 081f08d9..5da7940d 100644 --- a/quadra/configs/trainer/lightning_gpu_fp16.yaml +++ b/quadra/configs/trainer/lightning_gpu_fp16.yaml @@ -3,6 +3,5 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 precision: 16 diff --git a/quadra/configs/trainer/lightning_multigpu.yaml b/quadra/configs/trainer/lightning_multigpu.yaml index bdcc0ee4..0fd5fda7 100644 --- a/quadra/configs/trainer/lightning_multigpu.yaml +++ b/quadra/configs/trainer/lightning_multigpu.yaml @@ -6,5 +6,4 @@ max_epochs: 10 strategy: _target_: pytorch_lightning.strategies.DDPStrategy find_unused_parameters: false -resume_from_checkpoint: null log_every_n_steps: 10 diff --git a/quadra/modules/ssl/hyperspherical.py b/quadra/modules/ssl/hyperspherical.py index b5186594..b7242cac 100644 --- a/quadra/modules/ssl/hyperspherical.py +++ b/quadra/modules/ssl/hyperspherical.py @@ -199,7 +199,7 @@ def validation_step(self, batch, batch_idx): ) return {"val_loss": total_loss} - def validation_epoch_end(self, outputs): + def on_validation_epoch_end(self, outputs): avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() return {"val_loss": avg_loss} diff --git a/quadra/tasks/base.py b/quadra/tasks/base.py index 40c45590..72a1edcc 100644 --- a/quadra/tasks/base.py +++ b/quadra/tasks/base.py @@ -6,10 +6,10 @@ import hydra import torch from hydra.core.hydra_config import HydraConfig +from lightning_fabric.utilities.device_parser import _parse_gpu_ids from omegaconf import DictConfig, OmegaConf, open_dict from pytorch_lightning import Callback, LightningModule, Trainer from pytorch_lightning.loggers import Logger, MLFlowLogger -from pytorch_lightning.utilities.device_parser import parse_gpu_ids from pytorch_lightning.utilities.exceptions import MisconfigurationException from quadra import get_version @@ -231,7 +231,7 @@ def devices(self, devices) -> None: return try: - self._devices = parse_gpu_ids(devices, include_cuda=True) + self._devices = _parse_gpu_ids(devices, include_cuda=True) except MisconfigurationException: self._devices = 1 self.config.trainer["accelerator"] = "cpu" diff --git a/quadra/utils/utils.py b/quadra/utils/utils.py index 2644aa98..16d4760a 100644 --- a/quadra/utils/utils.py +++ b/quadra/utils/utils.py @@ -20,10 +20,10 @@ import torch from hydra.core.hydra_config import HydraConfig from hydra.utils import get_original_cwd +from lightning_fabric.utilities.device_parser import _parse_gpu_ids from omegaconf import DictConfig, OmegaConf from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.utilities import rank_zero_only -from pytorch_lightning.utilities.device_parser import parse_gpu_ids import quadra import quadra.utils.export as quadra_export @@ -255,7 +255,7 @@ def finish( tensorboard_logger = get_tensorboard_logger(trainer=trainer) file_names = ["config.yaml", "config_resolved.yaml", "config_tree.txt", "data/dataset.csv"] if "16" in str(trainer.precision): - index = parse_gpu_ids(config.trainer.devices, include_cuda=True)[0] + index = _parse_gpu_ids(config.trainer.devices, include_cuda=True)[0] device = "cuda:" + str(index) half_precision = True else: From 79f9777a47a37e5cd49c33950ddc9f00340d5c4d Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Fri, 19 Jan 2024 15:54:52 +0000 Subject: [PATCH 03/18] refactor: Update configurations after lightning upgrade --- quadra/configs/experiment/base/anomaly/cfa.yaml | 4 ---- quadra/configs/experiment/base/anomaly/cflow.yaml | 4 ---- quadra/configs/experiment/base/anomaly/csflow.yaml | 4 ---- quadra/configs/experiment/base/anomaly/draem.yaml | 4 ---- quadra/configs/experiment/base/anomaly/efficient_ad.yaml | 4 ---- quadra/configs/experiment/base/anomaly/fastflow.yaml | 4 ---- quadra/configs/experiment/base/anomaly/padim.yaml | 4 ---- quadra/configs/experiment/base/anomaly/patchcore.yaml | 4 ---- quadra/configs/model/anomalib/cfa.yaml | 4 ---- quadra/configs/model/anomalib/csflow.yaml | 4 ---- quadra/configs/trainer/lightning_cpu.yaml | 2 +- 11 files changed, 1 insertion(+), 41 deletions(-) diff --git a/quadra/configs/experiment/base/anomaly/cfa.yaml b/quadra/configs/experiment/base/anomaly/cfa.yaml index 1985892a..bafbdb51 100644 --- a/quadra/configs/experiment/base/anomaly/cfa.yaml +++ b/quadra/configs/experiment/base/anomaly/cfa.yaml @@ -65,7 +65,6 @@ trainer: limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 - strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true @@ -74,9 +73,6 @@ trainer: benchmark: false deterministic: false reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false use_distributed_sampler: true detect_anomaly: false - auto_scale_batch_size: false plugins: null - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/cflow.yaml b/quadra/configs/experiment/base/anomaly/cflow.yaml index df3f29d2..0c142551 100644 --- a/quadra/configs/experiment/base/anomaly/cflow.yaml +++ b/quadra/configs/experiment/base/anomaly/cflow.yaml @@ -64,7 +64,6 @@ trainer: limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 - strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true @@ -73,9 +72,6 @@ trainer: benchmark: false deterministic: false reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false use_distributed_sampler: true detect_anomaly: false - auto_scale_batch_size: false plugins: null - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/csflow.yaml b/quadra/configs/experiment/base/anomaly/csflow.yaml index c545c737..13c58d4e 100644 --- a/quadra/configs/experiment/base/anomaly/csflow.yaml +++ b/quadra/configs/experiment/base/anomaly/csflow.yaml @@ -63,7 +63,6 @@ trainer: limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 - strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true @@ -72,9 +71,6 @@ trainer: benchmark: false deterministic: false reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false use_distributed_sampler: true detect_anomaly: false - auto_scale_batch_size: false plugins: null - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/draem.yaml b/quadra/configs/experiment/base/anomaly/draem.yaml index 656f7a7e..c2f96582 100644 --- a/quadra/configs/experiment/base/anomaly/draem.yaml +++ b/quadra/configs/experiment/base/anomaly/draem.yaml @@ -66,7 +66,6 @@ trainer: limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 - strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true @@ -75,9 +74,6 @@ trainer: benchmark: false deterministic: false reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false use_distributed_sampler: true detect_anomaly: false - auto_scale_batch_size: false plugins: null - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml index ecf70657..f8dfac3a 100644 --- a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml +++ b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml @@ -34,10 +34,7 @@ logger: trainer: devices: [2] accelerator: auto - strategy: accumulate_grad_batches: 1 - auto_lr_find: false - auto_scale_batch_size: false benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} default_root_dir: null @@ -58,7 +55,6 @@ trainer: max_time: null min_epochs: null min_steps: null - multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 overfit_batches: 0.0 diff --git a/quadra/configs/experiment/base/anomaly/fastflow.yaml b/quadra/configs/experiment/base/anomaly/fastflow.yaml index 4af44911..841fe504 100644 --- a/quadra/configs/experiment/base/anomaly/fastflow.yaml +++ b/quadra/configs/experiment/base/anomaly/fastflow.yaml @@ -62,7 +62,6 @@ trainer: limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 - strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true @@ -71,9 +70,6 @@ trainer: benchmark: false deterministic: false reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false use_distributed_sampler: true detect_anomaly: false - auto_scale_batch_size: false plugins: null - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/padim.yaml b/quadra/configs/experiment/base/anomaly/padim.yaml index 32ee3c8b..bb7f6768 100644 --- a/quadra/configs/experiment/base/anomaly/padim.yaml +++ b/quadra/configs/experiment/base/anomaly/padim.yaml @@ -34,10 +34,7 @@ logger: trainer: devices: [2] accelerator: auto - strategy: accumulate_grad_batches: 1 - auto_lr_find: false - auto_scale_batch_size: false benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. default_root_dir: null @@ -58,7 +55,6 @@ trainer: max_time: null min_epochs: null min_steps: null - multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 overfit_batches: 0.0 diff --git a/quadra/configs/experiment/base/anomaly/patchcore.yaml b/quadra/configs/experiment/base/anomaly/patchcore.yaml index ea719c26..50364d3e 100644 --- a/quadra/configs/experiment/base/anomaly/patchcore.yaml +++ b/quadra/configs/experiment/base/anomaly/patchcore.yaml @@ -34,10 +34,7 @@ logger: trainer: devices: [2] accelerator: auto - strategy: accumulate_grad_batches: 1 - auto_lr_find: false - auto_scale_batch_size: false benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. default_root_dir: null @@ -58,7 +55,6 @@ trainer: max_time: null min_epochs: null min_steps: null - multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 overfit_batches: 0.0 diff --git a/quadra/configs/model/anomalib/cfa.yaml b/quadra/configs/model/anomalib/cfa.yaml index 866c2c26..cd873b9a 100644 --- a/quadra/configs/model/anomalib/cfa.yaml +++ b/quadra/configs/model/anomalib/cfa.yaml @@ -50,7 +50,6 @@ trainer: val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true @@ -59,9 +58,6 @@ trainer: benchmark: false deterministic: false reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false use_distributed_sampler: true detect_anomaly: false - auto_scale_batch_size: false plugins: null - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/model/anomalib/csflow.yaml b/quadra/configs/model/anomalib/csflow.yaml index 907dbdfd..166911ac 100644 --- a/quadra/configs/model/anomalib/csflow.yaml +++ b/quadra/configs/model/anomalib/csflow.yaml @@ -37,8 +37,6 @@ metrics: trainer: accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> accumulate_grad_batches: 1 - auto_lr_find: false - auto_scale_batch_size: false benchmark: false check_val_every_n_epoch: 1 default_root_dir: null @@ -60,7 +58,6 @@ trainer: max_time: null min_epochs: null min_steps: null - multiple_trainloader_mode: max_size_cycle num_nodes: 1 num_sanity_val_steps: 0 overfit_batches: 0.0 @@ -69,6 +66,5 @@ trainer: profiler: null reload_dataloaders_every_n_epochs: 0 use_distributed_sampler: true - strategy: null sync_batchnorm: false val_check_interval: 1.0 diff --git a/quadra/configs/trainer/lightning_cpu.yaml b/quadra/configs/trainer/lightning_cpu.yaml index d2673b42..ae2b6bd1 100644 --- a/quadra/configs/trainer/lightning_cpu.yaml +++ b/quadra/configs/trainer/lightning_cpu.yaml @@ -3,6 +3,6 @@ devices: 1 accelerator: cpu min_epochs: 1 max_epochs: 10 -strategy: null +strategy: auto log_every_n_steps: 10 precision: 32 From 60723d895d4d0f0c83e87a75f47fd6293e578739 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Mon, 22 Jan 2024 10:49:20 +0000 Subject: [PATCH 04/18] build: Update lock file --- poetry.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index b9c1e7e4..3eb3d606 100644 --- a/poetry.lock +++ b/poetry.lock @@ -7606,4 +7606,4 @@ test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_moc [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.11" -content-hash = "44a4a78e129bf1504d046845cdf9014b8a196a5868630ed2e5131588eedff6e1" +content-hash = "1153edbac74fe70ca0744ca93bdc26808780d30e2bdfca1d3f0b7684fbfa8c66" From 975d4b8a1c3900432432a5f893ecb540a365f740 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Mon, 22 Jan 2024 12:33:05 +0000 Subject: [PATCH 05/18] fix: Fix incompatibilities with new lightning package --- quadra/modules/base.py | 3 ++- quadra/modules/ssl/byol.py | 6 ------ quadra/modules/ssl/dino.py | 10 +--------- quadra/tasks/classification.py | 10 +++------- quadra/tasks/segmentation.py | 6 ++++-- quadra/tasks/ssl.py | 8 ++++---- 6 files changed, 14 insertions(+), 29 deletions(-) diff --git a/quadra/modules/base.py b/quadra/modules/base.py index ae030d26..36cb28e4 100644 --- a/quadra/modules/base.py +++ b/quadra/modules/base.py @@ -67,7 +67,8 @@ def configure_optimizers(self) -> Tuple[List[Any], List[Dict[str, Any]]]: } return [self.optimizer], [lr_scheduler_conf] - def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): + # pylint: disable=unused-argument + def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx: int = 0): """Redefine optimizer zero grad.""" optimizer.zero_grad(set_to_none=True) diff --git a/quadra/modules/ssl/byol.py b/quadra/modules/ssl/byol.py index a439b4f8..64fceb4a 100644 --- a/quadra/modules/ssl/byol.py +++ b/quadra/modules/ssl/byol.py @@ -138,20 +138,14 @@ def optimizer_step( epoch: int, batch_idx: int, optimizer: Union[Optimizer, LightningOptimizer], - optimizer_idx: int = 0, optimizer_closure: Optional[Callable[[], Any]] = None, - on_tpu: bool = False, - using_lbfgs: bool = False, ) -> None: """Override optimizer step to update the teacher parameters.""" super().optimizer_step( epoch, batch_idx, optimizer, - optimizer_idx=optimizer_idx, optimizer_closure=optimizer_closure, - on_tpu=on_tpu, - using_lbfgs=using_lbfgs, ) self.update_teacher() diff --git a/quadra/modules/ssl/dino.py b/quadra/modules/ssl/dino.py index 59c71a30..59555e8c 100644 --- a/quadra/modules/ssl/dino.py +++ b/quadra/modules/ssl/dino.py @@ -157,13 +157,11 @@ def training_step(self, batch: Tuple[List[torch.Tensor], torch.Tensor], *args: A def configure_gradient_clipping( self, optimizer: Optimizer, - optimizer_idx: int, gradient_clip_val: Optional[Union[int, float]] = None, gradient_clip_algorithm: Optional[str] = None, ): """Configure gradient clipping for the optimizer.""" if gradient_clip_algorithm is not None and gradient_clip_val is not None: - clip_gradients(self.model, gradient_clip_val) clip_gradients(self.student_projection_mlp, gradient_clip_val) self.cancel_gradients_last_layer(self.current_epoch, self.freeze_last_layer) @@ -173,19 +171,13 @@ def optimizer_step( epoch: int, batch_idx: int, optimizer: Union[Optimizer, LightningOptimizer], - optimizer_idx: int = 0, optimizer_closure: Optional[Callable[[], Any]] = None, - on_tpu: bool = False, - using_lbfgs: bool = False, ) -> None: - """Override optimizer_step to update the teacher model.""" + """Override optimizer step to update the teacher parameters.""" super().optimizer_step( epoch, batch_idx, optimizer, - optimizer_idx=optimizer_idx, optimizer_closure=optimizer_closure, - on_tpu=on_tpu, - using_lbfgs=using_lbfgs, ) self.update_teacher() diff --git a/quadra/tasks/classification.py b/quadra/tasks/classification.py index 7c449328..991ccb44 100644 --- a/quadra/tasks/classification.py +++ b/quadra/tasks/classification.py @@ -178,7 +178,7 @@ def module(self, module_config): ) if self.checkpoint_path is not None: log.info("Loading model from lightning checkpoint: %s", self.checkpoint_path) - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, model=self.model, optimizer=self.optimizer, @@ -286,7 +286,7 @@ def export(self) -> None: if self.best_model_path is not None: log.info("Saving deployment model for %s checkpoint", self.best_model_path) - module = self.module.load_from_checkpoint( + module = self.module.__class__.load_from_checkpoint( self.best_model_path, model=self.module.model, optimizer=self.optimizer, @@ -1099,11 +1099,7 @@ def prepare_gradcam(self) -> None: return if isinstance(self.deployment_model.model.features_extractor, timm.models.resnet.ResNet): - target_layers = [ - cast(BaseNetworkBuilder, self.deployment_model.model).features_extractor.layer4[ - -1 - ] # type: ignore[index] - ] + target_layers = [cast(BaseNetworkBuilder, self.deployment_model.model).features_extractor.layer4[-1]] self.cam = GradCAM( model=self.deployment_model.model, target_layers=target_layers, diff --git a/quadra/tasks/segmentation.py b/quadra/tasks/segmentation.py index a61d840f..18974475 100644 --- a/quadra/tasks/segmentation.py +++ b/quadra/tasks/segmentation.py @@ -107,7 +107,9 @@ def module(self, module_config) -> None: log.info("Instantiating module <%s>", module_config.module["_target_"]) module = hydra.utils.instantiate(module_config.module, model=model, optimizer=optimizer, lr_scheduler=scheduler) if self.checkpoint_path is not None: - module.load_from_checkpoint(self.checkpoint_path, model=model, optimizer=optimizer, lr_scheduler=scheduler) + module.__class__.load_from_checkpoint( + self.checkpoint_path, model=model, optimizer=optimizer, lr_scheduler=scheduler + ) self._module = module def prepare(self) -> None: @@ -129,7 +131,7 @@ def export(self) -> None: best_model_path = self.trainer.checkpoint_callback.best_model_path log.info("Loaded best model from %s", best_model_path) - module = self.module.load_from_checkpoint( + module = self.module.__class__.load_from_checkpoint( best_model_path, model=self.module.model, loss_fun=None, diff --git a/quadra/tasks/ssl.py b/quadra/tasks/ssl.py index 0804154b..4638a2c6 100644 --- a/quadra/tasks/ssl.py +++ b/quadra/tasks/ssl.py @@ -170,7 +170,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, model=self.backbone, projection_mlp=self.projection_mlp, @@ -231,7 +231,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, model=self.backbone, projection_mlp=self.projection_mlp, @@ -349,7 +349,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, student=self.student_model, teacher=self.teacher_model, @@ -420,7 +420,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, student=self.student_model, teacher=self.teacher_model, From 2e0a480e5649a80b2dd8a7b6ebd720192a0bd474 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Mon, 22 Jan 2024 14:18:02 +0000 Subject: [PATCH 06/18] build: Limit torch version downloaded by poetry --- poetry.lock | 600 ++++++++++++++++++++++++++++++++++--------------- pyproject.toml | 9 +- 2 files changed, 425 insertions(+), 184 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3eb3d606..11cdb399 100644 --- a/poetry.lock +++ b/poetry.lock @@ -187,7 +187,7 @@ test = ["flake8 (==3.7.9)", "mock (==2.0.0)", "pylint (==1.9.3)"] [[package]] name = "anomalib" -version = "0.7.0+obx.1.2.10" +version = "0.7.0+obx.1.2.11" description = "anomalib - Anomaly Detection Library" optional = false python-versions = ">=3.7" @@ -1896,13 +1896,13 @@ test = ["objgraph", "psutil"] [[package]] name = "griffe" -version = "0.39.0" +version = "0.39.1" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = true python-versions = ">=3.8" files = [ - {file = "griffe-0.39.0-py3-none-any.whl", hash = "sha256:b5e2f249d86feaad1d3068b33b1c8c2ecf39cb870bf292f2af3a4311891a4835"}, - {file = "griffe-0.39.0.tar.gz", hash = "sha256:34461730c975a31c4ffa413bdad841f217a5d1a1c611b95f6110ed0f2b2fc04d"}, + {file = "griffe-0.39.1-py3-none-any.whl", hash = "sha256:6ce4ecffcf0d2f96362c5974b3f7df812da8f8d4cfcc5ebc8202ef72656fc087"}, + {file = "griffe-0.39.1.tar.gz", hash = "sha256:ead8dfede6e6531cce6bf69090a4f3c6d36fdf923c43f8e85aa530552cef0c09"}, ] [package.dependencies] @@ -2044,13 +2044,13 @@ numpy = ">=1.14.5" [[package]] name = "huggingface-hub" -version = "0.20.2" +version = "0.20.3" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.20.2-py3-none-any.whl", hash = "sha256:53752eda2239d30a470c307a61cf9adcf136bc77b0a734338c7d04941af560d8"}, - {file = "huggingface_hub-0.20.2.tar.gz", hash = "sha256:215c5fceff631030c7a3d19ba7b588921c908b3f21eef31d160ebc245b200ff6"}, + {file = "huggingface_hub-0.20.3-py3-none-any.whl", hash = "sha256:d988ae4f00d3e307b0c80c6a05ca6dbb7edba8bba3079f74cda7d9c2e562a7b6"}, + {file = "huggingface_hub-0.20.3.tar.gz", hash = "sha256:94e7f8e074475fbc67d6a71957b678e1b4a74ff1b64a644fd6cbb83da962d05d"}, ] [package.dependencies] @@ -2777,13 +2777,13 @@ files = [ [[package]] name = "lightning-utilities" -version = "0.10.0" -description = "PyTorch Lightning Sample project." +version = "0.10.1" +description = "Lightning toolbox for across the our ecosystem." optional = false python-versions = ">=3.7" files = [ - {file = "lightning-utilities-0.10.0.tar.gz", hash = "sha256:9e31617eccbbadc6b737a2432fd7076ff8e24957f9c63aeba2530b189e19319c"}, - {file = "lightning_utilities-0.10.0-py3-none-any.whl", hash = "sha256:84d09b11fe9bc16c803ae5e412874748239d73ad2f3d1b90862f99ce15a03aa0"}, + {file = "lightning-utilities-0.10.1.tar.gz", hash = "sha256:362755023dcf93b8fa519bc002ae41794943a3ffbc5318e40804d36aa14bf1fd"}, + {file = "lightning_utilities-0.10.1-py3-none-any.whl", hash = "sha256:e67be3f328b1c14f2b36cc09e84642db5b50afeab94e7704969b2130fe6a3bda"}, ] [package.dependencies] @@ -3035,71 +3035,71 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.1.3" +version = "2.1.4" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-win32.whl", hash = "sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-win32.whl", hash = "sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-win_amd64.whl", hash = "sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-win32.whl", hash = "sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-win32.whl", hash = "sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-win32.whl", hash = "sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-win32.whl", hash = "sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959"}, + {file = "MarkupSafe-2.1.4.tar.gz", hash = "sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f"}, ] [[package]] @@ -3881,6 +3881,115 @@ files = [ {file = "numpy-1.26.3.tar.gz", hash = "sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4"}, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + [[package]] name = "nvidia-ml-py" version = "11.515.75" @@ -3892,6 +4001,38 @@ files = [ {file = "nvidia_ml_py-11.515.75-py3-none-any.whl", hash = "sha256:5bf5f5240f5a242689c1d1129135a0bd79c8b93d2a282c7229fe32ab63e7999b"}, ] +[[package]] +name = "nvidia-nccl-cu12" +version = "2.18.1" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:1a6c4acefcbebfa6de320f412bf7866de856e786e0462326ba1bac40de0b5e71"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.3.101" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + [[package]] name = "nvitop" version = "0.11.0" @@ -4073,9 +4214,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, + {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, ] @@ -4097,9 +4238,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, + {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, ] @@ -4140,61 +4281,61 @@ tests = ["fakeredis", "pytest"] [[package]] name = "orjson" -version = "3.9.10" +version = "3.9.12" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.9.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d"}, - {file = "orjson-3.9.10-cp310-none-win32.whl", hash = "sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1"}, - {file = "orjson-3.9.10-cp310-none-win_amd64.whl", hash = "sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7"}, - {file = "orjson-3.9.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3"}, - {file = "orjson-3.9.10-cp311-none-win32.whl", hash = "sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8"}, - {file = "orjson-3.9.10-cp311-none-win_amd64.whl", hash = "sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616"}, - {file = "orjson-3.9.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca"}, - {file = "orjson-3.9.10-cp312-none-win_amd64.whl", hash = "sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d"}, - {file = "orjson-3.9.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8"}, - {file = "orjson-3.9.10-cp38-none-win32.whl", hash = "sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643"}, - {file = "orjson-3.9.10-cp38-none-win_amd64.whl", hash = "sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5"}, - {file = "orjson-3.9.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade"}, - {file = "orjson-3.9.10-cp39-none-win32.whl", hash = "sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088"}, - {file = "orjson-3.9.10-cp39-none-win_amd64.whl", hash = "sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff"}, - {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"}, + {file = "orjson-3.9.12-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6b4e2bed7d00753c438e83b613923afdd067564ff7ed696bfe3a7b073a236e07"}, + {file = "orjson-3.9.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd1b8ec63f0bf54a50b498eedeccdca23bd7b658f81c524d18e410c203189365"}, + {file = "orjson-3.9.12-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab8add018a53665042a5ae68200f1ad14c7953fa12110d12d41166f111724656"}, + {file = "orjson-3.9.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12756a108875526b76e505afe6d6ba34960ac6b8c5ec2f35faf73ef161e97e07"}, + {file = "orjson-3.9.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:890e7519c0c70296253660455f77e3a194554a3c45e42aa193cdebc76a02d82b"}, + {file = "orjson-3.9.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d664880d7f016efbae97c725b243b33c2cbb4851ddc77f683fd1eec4a7894146"}, + {file = "orjson-3.9.12-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cfdaede0fa5b500314ec7b1249c7e30e871504a57004acd116be6acdda3b8ab3"}, + {file = "orjson-3.9.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6492ff5953011e1ba9ed1bf086835fd574bd0a3cbe252db8e15ed72a30479081"}, + {file = "orjson-3.9.12-cp310-none-win32.whl", hash = "sha256:29bf08e2eadb2c480fdc2e2daae58f2f013dff5d3b506edd1e02963b9ce9f8a9"}, + {file = "orjson-3.9.12-cp310-none-win_amd64.whl", hash = "sha256:0fc156fba60d6b50743337ba09f052d8afc8b64595112996d22f5fce01ab57da"}, + {file = "orjson-3.9.12-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2849f88a0a12b8d94579b67486cbd8f3a49e36a4cb3d3f0ab352c596078c730c"}, + {file = "orjson-3.9.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3186b18754befa660b31c649a108a915493ea69b4fc33f624ed854ad3563ac65"}, + {file = "orjson-3.9.12-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbbf313c9fb9d4f6cf9c22ced4b6682230457741daeb3d7060c5d06c2e73884a"}, + {file = "orjson-3.9.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99e8cd005b3926c3db9b63d264bd05e1bf4451787cc79a048f27f5190a9a0311"}, + {file = "orjson-3.9.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59feb148392d9155f3bfed0a2a3209268e000c2c3c834fb8fe1a6af9392efcbf"}, + {file = "orjson-3.9.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4ae815a172a1f073b05b9e04273e3b23e608a0858c4e76f606d2d75fcabde0c"}, + {file = "orjson-3.9.12-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed398f9a9d5a1bf55b6e362ffc80ac846af2122d14a8243a1e6510a4eabcb71e"}, + {file = "orjson-3.9.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d3cfb76600c5a1e6be91326b8f3b83035a370e727854a96d801c1ea08b708073"}, + {file = "orjson-3.9.12-cp311-none-win32.whl", hash = "sha256:a2b6f5252c92bcab3b742ddb3ac195c0fa74bed4319acd74f5d54d79ef4715dc"}, + {file = "orjson-3.9.12-cp311-none-win_amd64.whl", hash = "sha256:c95488e4aa1d078ff5776b58f66bd29d628fa59adcb2047f4efd3ecb2bd41a71"}, + {file = "orjson-3.9.12-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d6ce2062c4af43b92b0221ed4f445632c6bf4213f8a7da5396a122931377acd9"}, + {file = "orjson-3.9.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:950951799967558c214cd6cceb7ceceed6f81d2c3c4135ee4a2c9c69f58aa225"}, + {file = "orjson-3.9.12-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2dfaf71499d6fd4153f5c86eebb68e3ec1bf95851b030a4b55c7637a37bbdee4"}, + {file = "orjson-3.9.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:659a8d7279e46c97661839035a1a218b61957316bf0202674e944ac5cfe7ed83"}, + {file = "orjson-3.9.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af17fa87bccad0b7f6fd8ac8f9cbc9ee656b4552783b10b97a071337616db3e4"}, + {file = "orjson-3.9.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd52dec9eddf4c8c74392f3fd52fa137b5f2e2bed1d9ae958d879de5f7d7cded"}, + {file = "orjson-3.9.12-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:640e2b5d8e36b970202cfd0799d11a9a4ab46cf9212332cd642101ec952df7c8"}, + {file = "orjson-3.9.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:daa438bd8024e03bcea2c5a92cd719a663a58e223fba967296b6ab9992259dbf"}, + {file = "orjson-3.9.12-cp312-none-win_amd64.whl", hash = "sha256:1bb8f657c39ecdb924d02e809f992c9aafeb1ad70127d53fb573a6a6ab59d549"}, + {file = "orjson-3.9.12-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f4098c7674901402c86ba6045a551a2ee345f9f7ed54eeffc7d86d155c8427e5"}, + {file = "orjson-3.9.12-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5586a533998267458fad3a457d6f3cdbddbcce696c916599fa8e2a10a89b24d3"}, + {file = "orjson-3.9.12-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54071b7398cd3f90e4bb61df46705ee96cb5e33e53fc0b2f47dbd9b000e238e1"}, + {file = "orjson-3.9.12-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:67426651faa671b40443ea6f03065f9c8e22272b62fa23238b3efdacd301df31"}, + {file = "orjson-3.9.12-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4a0cd56e8ee56b203abae7d482ac0d233dbfb436bb2e2d5cbcb539fe1200a312"}, + {file = "orjson-3.9.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a84a0c3d4841a42e2571b1c1ead20a83e2792644c5827a606c50fc8af7ca4bee"}, + {file = "orjson-3.9.12-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:09d60450cda3fa6c8ed17770c3a88473a16460cd0ff2ba74ef0df663b6fd3bb8"}, + {file = "orjson-3.9.12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bc82a4db9934a78ade211cf2e07161e4f068a461c1796465d10069cb50b32a80"}, + {file = "orjson-3.9.12-cp38-none-win32.whl", hash = "sha256:61563d5d3b0019804d782137a4f32c72dc44c84e7d078b89d2d2a1adbaa47b52"}, + {file = "orjson-3.9.12-cp38-none-win_amd64.whl", hash = "sha256:410f24309fbbaa2fab776e3212a81b96a1ec6037259359a32ea79fbccfcf76aa"}, + {file = "orjson-3.9.12-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e773f251258dd82795fd5daeac081d00b97bacf1548e44e71245543374874bcf"}, + {file = "orjson-3.9.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b159baecfda51c840a619948c25817d37733a4d9877fea96590ef8606468b362"}, + {file = "orjson-3.9.12-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:975e72e81a249174840d5a8df977d067b0183ef1560a32998be340f7e195c730"}, + {file = "orjson-3.9.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:06e42e899dde61eb1851a9fad7f1a21b8e4be063438399b63c07839b57668f6c"}, + {file = "orjson-3.9.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c157e999e5694475a5515942aebeed6e43f7a1ed52267c1c93dcfde7d78d421"}, + {file = "orjson-3.9.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dde1bc7c035f2d03aa49dc8642d9c6c9b1a81f2470e02055e76ed8853cfae0c3"}, + {file = "orjson-3.9.12-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b0e9d73cdbdad76a53a48f563447e0e1ce34bcecef4614eb4b146383e6e7d8c9"}, + {file = "orjson-3.9.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:96e44b21fe407b8ed48afbb3721f3c8c8ce17e345fbe232bd4651ace7317782d"}, + {file = "orjson-3.9.12-cp39-none-win32.whl", hash = "sha256:cbd0f3555205bf2a60f8812133f2452d498dbefa14423ba90fe89f32276f7abf"}, + {file = "orjson-3.9.12-cp39-none-win_amd64.whl", hash = "sha256:03ea7ee7e992532c2f4a06edd7ee1553f0644790553a118e003e3c405add41fa"}, + {file = "orjson-3.9.12.tar.gz", hash = "sha256:da908d23a3b3243632b523344403b128722a5f45e278a8343c2bb67538dff0e4"}, ] [[package]] @@ -4256,8 +4397,8 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.20.3", markers = "python_version < \"3.10\""}, {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, ] python-dateutil = ">=2.8.1" pytz = ">=2020.1" @@ -4646,27 +4787,27 @@ files = [ [[package]] name = "psutil" -version = "5.9.7" +version = "5.9.8" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "psutil-5.9.7-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:0bd41bf2d1463dfa535942b2a8f0e958acf6607ac0be52265ab31f7923bcd5e6"}, - {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:5794944462509e49d4d458f4dbfb92c47539e7d8d15c796f141f474010084056"}, - {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:fe361f743cb3389b8efda21980d93eb55c1f1e3898269bc9a2a1d0bb7b1f6508"}, - {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:e469990e28f1ad738f65a42dcfc17adaed9d0f325d55047593cb9033a0ab63df"}, - {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:3c4747a3e2ead1589e647e64aad601981f01b68f9398ddf94d01e3dc0d1e57c7"}, - {file = "psutil-5.9.7-cp27-none-win32.whl", hash = "sha256:1d4bc4a0148fdd7fd8f38e0498639ae128e64538faa507df25a20f8f7fb2341c"}, - {file = "psutil-5.9.7-cp27-none-win_amd64.whl", hash = "sha256:4c03362e280d06bbbfcd52f29acd79c733e0af33d707c54255d21029b8b32ba6"}, - {file = "psutil-5.9.7-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ea36cc62e69a13ec52b2f625c27527f6e4479bca2b340b7a452af55b34fcbe2e"}, - {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1132704b876e58d277168cd729d64750633d5ff0183acf5b3c986b8466cd0284"}, - {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8b7f07948f1304497ce4f4684881250cd859b16d06a1dc4d7941eeb6233bfe"}, - {file = "psutil-5.9.7-cp36-cp36m-win32.whl", hash = "sha256:b27f8fdb190c8c03914f908a4555159327d7481dac2f01008d483137ef3311a9"}, - {file = "psutil-5.9.7-cp36-cp36m-win_amd64.whl", hash = "sha256:44969859757f4d8f2a9bd5b76eba8c3099a2c8cf3992ff62144061e39ba8568e"}, - {file = "psutil-5.9.7-cp37-abi3-win32.whl", hash = "sha256:c727ca5a9b2dd5193b8644b9f0c883d54f1248310023b5ad3e92036c5e2ada68"}, - {file = "psutil-5.9.7-cp37-abi3-win_amd64.whl", hash = "sha256:f37f87e4d73b79e6c5e749440c3113b81d1ee7d26f21c19c47371ddea834f414"}, - {file = "psutil-5.9.7-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:032f4f2c909818c86cea4fe2cc407f1c0f0cde8e6c6d702b28b8ce0c0d143340"}, - {file = "psutil-5.9.7.tar.gz", hash = "sha256:3f02134e82cfb5d089fddf20bb2e03fd5cd52395321d1c8458a9e58500ff417c"}, + {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"}, + {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"}, + {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"}, + {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, + {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"}, + {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"}, + {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, + {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, + {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, + {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, ] [package.extras] @@ -5921,45 +6062,45 @@ files = [ [[package]] name = "scipy" -version = "1.11.4" +version = "1.12.0" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"}, - {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"}, - {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"}, - {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"}, - {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"}, - {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"}, - {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"}, - {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"}, - {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"}, - {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"}, - {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"}, -] - -[package.dependencies] -numpy = ">=1.21.6,<1.28.0" + {file = "scipy-1.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78e4402e140879387187f7f25d91cc592b3501a2e51dfb320f48dfb73565f10b"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5f00ebaf8de24d14b8449981a2842d404152774c1a1d880c901bf454cb8e2a1"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e53958531a7c695ff66c2e7bb7b79560ffdc562e2051644c5576c39ff8efb563"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e32847e08da8d895ce09d108a494d9eb78974cf6de23063f93306a3e419960c"}, + {file = "scipy-1.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c1020cad92772bf44b8e4cdabc1df5d87376cb219742549ef69fc9fd86282dd"}, + {file = "scipy-1.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:75ea2a144096b5e39402e2ff53a36fecfd3b960d786b7efd3c180e29c39e53f2"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:408c68423f9de16cb9e602528be4ce0d6312b05001f3de61fe9ec8b1263cad08"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5adfad5dbf0163397beb4aca679187d24aec085343755fcdbdeb32b3679f254c"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3003652496f6e7c387b1cf63f4bb720951cfa18907e998ea551e6de51a04467"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8066bce124ee5531d12a74b617d9ac0ea59245246410e19bca549656d9a40a"}, + {file = "scipy-1.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8bee4993817e204d761dba10dbab0774ba5a8612e57e81319ea04d84945375ba"}, + {file = "scipy-1.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a24024d45ce9a675c1fb8494e8e5244efea1c7a09c60beb1eeb80373d0fecc70"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7e76cc48638228212c747ada851ef355c2bb5e7f939e10952bc504c11f4e372"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f7ce148dffcd64ade37b2df9315541f9adad6efcaa86866ee7dd5db0c8f041c3"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c39f92041f490422924dfdb782527a4abddf4707616e07b021de33467f917bc"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ebda398f86e56178c2fa94cad15bf457a218a54a35c2a7b4490b9f9cb2676c"}, + {file = "scipy-1.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:95e5c750d55cf518c398a8240571b0e0782c2d5a703250872f36eaf737751338"}, + {file = "scipy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e646d8571804a304e1da01040d21577685ce8e2db08ac58e543eaca063453e1c"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:913d6e7956c3a671de3b05ccb66b11bc293f56bfdef040583a7221d9e22a2e35"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba1b0c7256ad75401c73e4b3cf09d1f176e9bd4248f0d3112170fb2ec4db067"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:730badef9b827b368f351eacae2e82da414e13cf8bd5051b4bdfd720271a5371"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6546dc2c11a9df6926afcbdd8a3edec28566e4e785b915e849348c6dd9f3f490"}, + {file = "scipy-1.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:196ebad3a4882081f62a5bf4aeb7326aa34b110e533aab23e4374fcccb0890dc"}, + {file = "scipy-1.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:b360f1b6b2f742781299514e99ff560d1fe9bd1bff2712894b52abe528d1fd1e"}, + {file = "scipy-1.12.0.tar.gz", hash = "sha256:4bf5abab8a36d20193c698b0f1fc282c1d083c94723902c447e5d2f1780936a3"}, +] + +[package.dependencies] +numpy = ">=1.22.4,<1.29.0" [package.extras] dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "seaborn" @@ -6626,13 +6767,70 @@ optional = false python-versions = ">=3.8.0" files = [ {file = "torch-2.1.2+cu121-cp310-cp310-linux_x86_64.whl", hash = "sha256:b2184b7729ef3b9b10065c074a37c1e603fd99f91e38376e25cb7ed6e1d54696"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +dynamo = ["jinja2"] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-linux_x86_64.whl" + +[[package]] +name = "torch" +version = "2.1.2+cu121" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ {file = "torch-2.1.2+cu121-cp310-cp310-win_amd64.whl", hash = "sha256:9925143dece0e63c5404a72d59eb668ef78795418e96b576f94d75dcea6030b9"}, - {file = "torch-2.1.2+cu121-cp311-cp311-linux_x86_64.whl", hash = "sha256:ca05cae9334504d1903e16c50ddf045329a859d5b1a27ed2dc1d58ed066df6fa"}, - {file = "torch-2.1.2+cu121-cp311-cp311-win_amd64.whl", hash = "sha256:c92e9c559a82466fc5989f648807d2c0215bcce09b97ad7a20d038b686783229"}, - {file = "torch-2.1.2+cu121-cp38-cp38-linux_x86_64.whl", hash = "sha256:daa179bb558f78f2165db974a6744ec8de2ea71eb6aaf362bdae7616012c0302"}, - {file = "torch-2.1.2+cu121-cp38-cp38-win_amd64.whl", hash = "sha256:44c31fc1e470428682e212473507116ec3afa583d6b79d92858bf3dc24b334ea"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-win_amd64.whl" + +[[package]] +name = "torch" +version = "2.1.2+cu121" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ {file = "torch-2.1.2+cu121-cp39-cp39-linux_x86_64.whl", hash = "sha256:eaaf6907e3723c0ca6a91df5e01a7eef8cabec93120e9a50739f5a5f14a2aa46"}, - {file = "torch-2.1.2+cu121-cp39-cp39-win_amd64.whl", hash = "sha256:2d287804328dfb950ae6d418c9d8561d8f379237cf0710566d80efb96b6cd744"}, ] [package.dependencies] @@ -6649,9 +6847,45 @@ dynamo = ["jinja2"] opt-einsum = ["opt-einsum (>=3.3)"] [package.source] -type = "legacy" -url = "https://download.pytorch.org/whl/cu121" -reference = "torch_cu121" +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-linux_x86_64.whl" + +[[package]] +name = "torch" +version = "2.1.2+cu121" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.1.2+cu121-cp39-cp39-win_amd64.whl", hash = "sha256:2d287804328dfb950ae6d418c9d8561d8f379237cf0710566d80efb96b6cd744"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-win_amd64.whl" [[package]] name = "torchinfo" @@ -7606,4 +7840,4 @@ test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_moc [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.11" -content-hash = "1153edbac74fe70ca0744ca93bdc26808780d30e2bdfca1d3f0b7684fbfa8c66" +content-hash = "be27a8e5cdf1db9742ae054e4efb70aeec29455a2950a32cd5f5e99c3a3d0b9b" diff --git a/pyproject.toml b/pyproject.toml index b3a630c0..fac80447 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,9 +45,16 @@ hydra-plugins = { path = "quadra_hydra_plugin", optional = true } # TODO: We could support previous torch version using mutually exclusive python version but it's bad... # TODO: Right now it seems that poetry will download every kind of possible dependency from cu116 # To make it faster we could hardcode the correct version of the dependencies -torch = { version = "~2.1", source = "torch_cu121" } +# torch = { version = "~2.1", source = "torch_cu121" } torchvision = { version = "~0.16", source = "torch_cu121" } +torch = [ + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-linux_x86_64.whl", markers = "sys_platform == 'linux' and python_version == '3.10'" }, + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-win_amd64.whl", markers = "sys_platform == 'win32' and python_version == '3.10'" }, + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-linux_x86_64.whl", markers = "sys_platform == 'linux' and python_version == '3.9'" }, + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-win_amd64.whl", markers = "sys_platform == 'win32' and python_version == '3.9'" }, +] + pytorch_lightning = "~2.1" torchsummary = "~1.5" torchmetrics = "~0.10" From e0fa7ed7c1842700108a55581380199e9fc93223 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Tue, 23 Jan 2024 11:23:42 +0000 Subject: [PATCH 07/18] build: Move hydra-plugins as groupd dependency --- poetry.lock | 6 +++--- pyproject.toml | 9 ++++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index ee8c0706..eb9698be 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2138,7 +2138,7 @@ optuna = ">=2.10.0,<3.0.0" name = "hydra-plugins" version = "1.0.0" description = "Hydra plugin allowing the discovery of external configurations" -optional = true +optional = false python-versions = "*" files = [] develop = false @@ -7831,7 +7831,7 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -dev = ["black", "bump2version", "cairosvg", "hydra-plugins", "interrogate", "isort", "mike", "mkdocs", "mkdocs_autorefs", "mkdocs_gen_files", "mkdocs_literate_nav", "mkdocs_material", "mkdocs_material_extensions", "mkdocs_section_index", "mkdocstrings", "mkdocstrings_python", "mypy", "pandas_stubs", "poetry-dynamic-versioning", "pre_commit", "pylint", "pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_mock", "ruff", "twine", "types_pyyaml"] +dev = ["black", "bump2version", "cairosvg", "interrogate", "isort", "mike", "mkdocs", "mkdocs_autorefs", "mkdocs_gen_files", "mkdocs_literate_nav", "mkdocs_material", "mkdocs_material_extensions", "mkdocs_section_index", "mkdocstrings", "mkdocstrings_python", "mypy", "pandas_stubs", "poetry-dynamic-versioning", "pre_commit", "pylint", "pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_mock", "ruff", "twine", "types_pyyaml"] docs = ["cairosvg", "mike", "mkdocs", "mkdocs_autorefs", "mkdocs_gen_files", "mkdocs_literate_nav", "mkdocs_material", "mkdocs_material_extensions", "mkdocs_section_index", "mkdocstrings", "mkdocstrings_python"] onnx = ["onnx", "onnxruntime_gpu", "onnxsim"] test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_mock"] @@ -7839,4 +7839,4 @@ test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_moc [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.11" -content-hash = "6e4c28ff669d1237e7df3d2ab50719ce0eef82d2357d226767513822cfe299fc" +content-hash = "01ee411d7ff248fc1f316cd011377702d2b3c9de7d86d2f7f78cea1e53799cc8" diff --git a/pyproject.toml b/pyproject.toml index eec68ffa..e0b053e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,8 +40,6 @@ quadra = "quadra.main:main" [tool.poetry.dependencies] python = ">=3.9,<3.11" poetry = "1.7.1" -# This will make hydra-plugins available also when running with poetry install -hydra-plugins = { path = "quadra_hydra_plugin", optional = true } # TODO: We could support previous torch version using mutually exclusive python version but it's bad... # TODO: Right now it seems that poetry will download every kind of possible dependency from cu116 # To make it faster we could hardcode the correct version of the dependencies @@ -138,6 +136,12 @@ name = "onnx_cu12" url = "https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple/" priority = "explicit" +[tool.poetry.group.dev] +optional = true + +[tool.poetry.group.dev.dependencies] +hydra-plugins = { path = "quadra_hydra_plugin" } + [tool.poetry.extras] dev = [ "black", @@ -169,7 +173,6 @@ dev = [ "mike", "cairosvg", "poetry-dynamic-versioning", - "hydra-plugins", ] test = [ From 96be4078221750cb5bf98846c3f739800c43bb4a Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Wed, 24 Jan 2024 13:25:42 +0000 Subject: [PATCH 08/18] build: Upgrade version --- pyproject.toml | 2 +- quadra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a054c144..4945594e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "quadra" -version = "2.0.0a3" +version = "2.0.0a4" description = "Deep Learning experiment orchestration library" authors = [ "Federico Belotti ", diff --git a/quadra/__init__.py b/quadra/__init__.py index 93b18dae..8c8be769 100644 --- a/quadra/__init__.py +++ b/quadra/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.0.0a3" +__version__ = "2.0.0a4" def get_version(): From 519945964ff349d1c587173b50ad6c2b2e98c287 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Wed, 24 Jan 2024 19:42:45 +0000 Subject: [PATCH 09/18] test: Disable cuda visible devices call to avoid conflicts with lightning --- tests/conftest.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 85922195..44b0b87e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -44,7 +44,9 @@ def limit_torch_threads(): @pytest.fixture(autouse=True) def setup_devices(device: str): """Set the device to run tests on.""" - torch_device = torch.device(device) + # torch_device = torch.device(device) os.environ["QUADRA_TEST_DEVICE"] = device - if torch_device.type != "cuda": - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + + # TODO: If we use this lightning crashes because it sees gpus but no gpu are available!! + # if torch_device.type != "cuda": + # os.environ["CUDA_VISIBLE_DEVICES"] = "-1" From 0e89fc8d687853fc69911c70525bf56402482fc5 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Wed, 24 Jan 2024 19:43:20 +0000 Subject: [PATCH 10/18] refactor: Avoid changing trainer parameters for anomalib tasks when not needed --- .../configs/experiment/base/anomaly/cfa.yaml | 31 ---------------- .../experiment/base/anomaly/cflow.yaml | 32 +---------------- .../experiment/base/anomaly/csflow.yaml | 30 +--------------- .../experiment/base/anomaly/draem.yaml | 28 --------------- .../experiment/base/anomaly/efficient_ad.yaml | 31 +++------------- .../experiment/base/anomaly/fastflow.yaml | 31 +--------------- .../experiment/base/anomaly/padim.yaml | 29 --------------- .../experiment/base/anomaly/patchcore.yaml | 29 --------------- quadra/configs/model/anomalib/cfa.yaml | 28 --------------- quadra/configs/model/anomalib/csflow.yaml | 36 ------------------- 10 files changed, 7 insertions(+), 298 deletions(-) diff --git a/quadra/configs/experiment/base/anomaly/cfa.yaml b/quadra/configs/experiment/base/anomaly/cfa.yaml index bafbdb51..1fd25197 100644 --- a/quadra/configs/experiment/base/anomaly/cfa.yaml +++ b/quadra/configs/experiment/base/anomaly/cfa.yaml @@ -42,37 +42,6 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null - gradient_clip_val: 0 - gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 30 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - detect_anomaly: false - plugins: null diff --git a/quadra/configs/experiment/base/anomaly/cflow.yaml b/quadra/configs/experiment/base/anomaly/cflow.yaml index 0c142551..87279bfd 100644 --- a/quadra/configs/experiment/base/anomaly/cflow.yaml +++ b/quadra/configs/experiment/base/anomaly/cflow.yaml @@ -41,37 +41,7 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null - gradient_clip_val: 0 - gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 50 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - detect_anomaly: false - plugins: null + max_epochs: 50 diff --git a/quadra/configs/experiment/base/anomaly/csflow.yaml b/quadra/configs/experiment/base/anomaly/csflow.yaml index 13c58d4e..a1bf91e8 100644 --- a/quadra/configs/experiment/base/anomaly/csflow.yaml +++ b/quadra/configs/experiment/base/anomaly/csflow.yaml @@ -40,37 +40,9 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 1 # Grad clip value set based on the official implementation gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 240 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - detect_anomaly: false - plugins: null + max_epochs: 240 diff --git a/quadra/configs/experiment/base/anomaly/draem.yaml b/quadra/configs/experiment/base/anomaly/draem.yaml index c2f96582..b7e4174e 100644 --- a/quadra/configs/experiment/base/anomaly/draem.yaml +++ b/quadra/configs/experiment/base/anomaly/draem.yaml @@ -43,37 +43,9 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 0 gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 max_epochs: 700 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - detect_anomaly: false - plugins: null diff --git a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml index f8dfac3a..e98c47c7 100644 --- a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml +++ b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml @@ -33,34 +33,11 @@ logger: trainer: devices: [2] - accelerator: auto - accumulate_grad_batches: 1 - benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} - default_root_dir: null - detect_anomaly: false - deterministic: false - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 0 - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 max_epochs: 20 max_steps: 20000 - max_time: null - min_epochs: null - min_steps: null - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - use_distributed_sampler: true - sync_batchnorm: false val_check_interval: 1.0 # Don't validate before extracting features. + # This will avoid issues with ModelSignatureWrapper + # As the default forward for EfficientAD is performed with a None attribute + # Which we currently can't handle + num_sanity_val_steps: 0 diff --git a/quadra/configs/experiment/base/anomaly/fastflow.yaml b/quadra/configs/experiment/base/anomaly/fastflow.yaml index 841fe504..69027764 100644 --- a/quadra/configs/experiment/base/anomaly/fastflow.yaml +++ b/quadra/configs/experiment/base/anomaly/fastflow.yaml @@ -37,39 +37,10 @@ logger: experiment_name: run_name: ${core.name} -# PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 0 gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 500 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - detect_anomaly: false - plugins: null + max_epochs: 500 diff --git a/quadra/configs/experiment/base/anomaly/padim.yaml b/quadra/configs/experiment/base/anomaly/padim.yaml index bb7f6768..bd251ca5 100644 --- a/quadra/configs/experiment/base/anomaly/padim.yaml +++ b/quadra/configs/experiment/base/anomaly/padim.yaml @@ -33,34 +33,5 @@ logger: trainer: devices: [2] - accelerator: auto - accumulate_grad_batches: 1 - benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 0 - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - use_distributed_sampler: true - sync_batchnorm: false - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/experiment/base/anomaly/patchcore.yaml b/quadra/configs/experiment/base/anomaly/patchcore.yaml index 50364d3e..1c0767f7 100644 --- a/quadra/configs/experiment/base/anomaly/patchcore.yaml +++ b/quadra/configs/experiment/base/anomaly/patchcore.yaml @@ -33,34 +33,5 @@ logger: trainer: devices: [2] - accelerator: auto - accumulate_grad_batches: 1 - benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 0 - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - use_distributed_sampler: true - sync_batchnorm: false - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/model/anomalib/cfa.yaml b/quadra/configs/model/anomalib/cfa.yaml index cd873b9a..afacc73d 100644 --- a/quadra/configs/model/anomalib/cfa.yaml +++ b/quadra/configs/model/anomalib/cfa.yaml @@ -27,37 +27,9 @@ metrics: # PL Trainer Args. Don't add extra parameter here. trainer: - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 0 gradient_clip_algorithm: norm - num_nodes: 1 devices: [0] - enable_progress_bar: true - overfit_batches: 0.0 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 max_epochs: 30 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - detect_anomaly: false - plugins: null diff --git a/quadra/configs/model/anomalib/csflow.yaml b/quadra/configs/model/anomalib/csflow.yaml index 166911ac..3146d7f1 100644 --- a/quadra/configs/model/anomalib/csflow.yaml +++ b/quadra/configs/model/anomalib/csflow.yaml @@ -32,39 +32,3 @@ metrics: method: adaptive #options: [adaptive, manual] manual_image: null manual_pixel: null - -# PL Trainer Args. Don't add extra parameter here. -trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - benchmark: false - check_val_every_n_epoch: 1 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: [0] - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 1 # Grad clip value set based on the official implementation - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - max_epochs: 240 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - use_distributed_sampler: true - sync_batchnorm: false - val_check_interval: 1.0 From 1612955c811bc7f54b1468ec4646816132acc959 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Thu, 25 Jan 2024 08:15:25 +0000 Subject: [PATCH 11/18] refactor: Add all parameters to lightning trainer configuration --- quadra/configs/trainer/lightning_cpu.yaml | 28 +++++++++++++++++ quadra/configs/trainer/lightning_gpu.yaml | 29 +++++++++++++++++ .../configs/trainer/lightning_gpu_bf16.yaml | 31 ++++++++++++++++++- .../configs/trainer/lightning_gpu_fp16.yaml | 29 +++++++++++++++++ .../configs/trainer/lightning_multigpu.yaml | 28 +++++++++++++++++ 5 files changed, 144 insertions(+), 1 deletion(-) diff --git a/quadra/configs/trainer/lightning_cpu.yaml b/quadra/configs/trainer/lightning_cpu.yaml index ae2b6bd1..90a71872 100644 --- a/quadra/configs/trainer/lightning_cpu.yaml +++ b/quadra/configs/trainer/lightning_cpu.yaml @@ -6,3 +6,31 @@ max_epochs: 10 strategy: auto log_every_n_steps: 10 precision: 32 +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_gpu.yaml b/quadra/configs/trainer/lightning_gpu.yaml index 1ef20b47..1e366b6f 100644 --- a/quadra/configs/trainer/lightning_gpu.yaml +++ b/quadra/configs/trainer/lightning_gpu.yaml @@ -4,3 +4,32 @@ accelerator: gpu min_epochs: 1 max_epochs: 10 log_every_n_steps: 10 +strategy: auto +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_gpu_bf16.yaml b/quadra/configs/trainer/lightning_gpu_bf16.yaml index 7d851fc4..9507d2bf 100644 --- a/quadra/configs/trainer/lightning_gpu_bf16.yaml +++ b/quadra/configs/trainer/lightning_gpu_bf16.yaml @@ -1,7 +1,36 @@ _target_: pytorch_lightning.Trainer -devices: [0] accelerator: gpu +devices: [0] min_epochs: 1 max_epochs: 10 log_every_n_steps: 10 precision: bf16 +strategy: auto +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_gpu_fp16.yaml b/quadra/configs/trainer/lightning_gpu_fp16.yaml index 5da7940d..02814b80 100644 --- a/quadra/configs/trainer/lightning_gpu_fp16.yaml +++ b/quadra/configs/trainer/lightning_gpu_fp16.yaml @@ -5,3 +5,32 @@ min_epochs: 1 max_epochs: 10 log_every_n_steps: 10 precision: 16 +strategy: auto +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_multigpu.yaml b/quadra/configs/trainer/lightning_multigpu.yaml index 0fd5fda7..5c2d7643 100644 --- a/quadra/configs/trainer/lightning_multigpu.yaml +++ b/quadra/configs/trainer/lightning_multigpu.yaml @@ -7,3 +7,31 @@ strategy: _target_: pytorch_lightning.strategies.DDPStrategy find_unused_parameters: false log_every_n_steps: 10 +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: From e1518c304ef7a3fefab88e22c2041846573a8b1c Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Thu, 25 Jan 2024 08:41:32 +0000 Subject: [PATCH 12/18] refactor: Add missing trainer parameter to avoid issues --- quadra/configs/model/anomalib/efficient_ad.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/quadra/configs/model/anomalib/efficient_ad.yaml b/quadra/configs/model/anomalib/efficient_ad.yaml index 91233dc8..a519c42b 100644 --- a/quadra/configs/model/anomalib/efficient_ad.yaml +++ b/quadra/configs/model/anomalib/efficient_ad.yaml @@ -29,3 +29,9 @@ metrics: method: adaptive # options: [adaptive, manual] manual_image: null manual_pixel: null + +trainer: + # This will avoid issues with ModelSignatureWrapper + # As the default forward for EfficientAD is performed with a None attribute + # Which we currently can't handle + num_sanity_val_steps: 0 From 19949d55ed74df96e3ed2b78df468a568f14d33f Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Thu, 25 Jan 2024 08:56:13 +0000 Subject: [PATCH 13/18] build: Upgrade anomalib version --- poetry.lock | 8 ++++---- pyproject.toml | 8 ++------ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/poetry.lock b/poetry.lock index 54d65f10..7eb8e444 100644 --- a/poetry.lock +++ b/poetry.lock @@ -187,7 +187,7 @@ test = ["flake8 (==3.7.9)", "mock (==2.0.0)", "pylint (==1.9.3)"] [[package]] name = "anomalib" -version = "0.7.0+obx.1.2.11" +version = "0.7.0+obx.1.3.0" description = "anomalib - Anomaly Detection Library" optional = false python-versions = ">=3.7" @@ -214,8 +214,8 @@ openvino = ["defusedxml (==0.7.1)", "networkx (>=2.5,<3.0)", "nncf (>=2.1.0)", " [package.source] type = "git" url = "https://github.com/orobix/anomalib.git" -reference = "v0.7.0+obx.1.2.11" -resolved_reference = "715c3efee8c046aa98aaef7fad26f52613907ef4" +reference = "v0.7.0+obx.1.3.0" +resolved_reference = "04e2db7795c26a3d6cc1baf797134a895d1ad87a" [[package]] name = "antlr4-python3-runtime" @@ -7833,4 +7833,4 @@ test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_moc [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.11" -content-hash = "62d461320b686f7552f69976b4addbb823f34d1dd3ee8a84d6f66f5f33755135" +content-hash = "8a21ad86e700d0ea1ae59f94dab392f7d366c7e301a23f2c5470fc9276d4e85d" diff --git a/pyproject.toml b/pyproject.toml index 4945594e..d2b12cb9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,11 +40,6 @@ quadra = "quadra.main:main" [tool.poetry.dependencies] python = ">=3.9,<3.11" poetry = "1.7.1" -# TODO: We could support previous torch version using mutually exclusive python version but it's bad... -# TODO: Right now it seems that poetry will download every kind of possible dependency from cu116 -# To make it faster we could hardcode the correct version of the dependencies -# torch = { version = "~2.1", source = "torch_cu121" } -torchvision = { version = "~0.16", source = "torch_cu121" } torch = [ { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-linux_x86_64.whl", markers = "sys_platform == 'linux' and python_version == '3.10'" }, @@ -52,6 +47,7 @@ torch = [ { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-linux_x86_64.whl", markers = "sys_platform == 'linux' and python_version == '3.9'" }, { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-win_amd64.whl", markers = "sys_platform == 'win32' and python_version == '3.9'" }, ] +torchvision = { version = "~0.16", source = "torch_cu121" } pytorch_lightning = "~2.1" torchsummary = "~1.5" @@ -83,7 +79,7 @@ h5py = "~3.8" timm = "0.9.12" # Right now only this ref supports timm 0.9.12 segmentation_models_pytorch = { git = "https://github.com/qubvel/segmentation_models.pytorch", rev = "7b381f899ed472a477a89d381689caf535b5d0a6" } -anomalib = { git = "https://github.com/orobix/anomalib.git", tag = "v0.7.0+obx.1.2.11" } +anomalib = { git = "https://github.com/orobix/anomalib.git", tag = "v0.7.0+obx.1.3.0" } xxhash = "~3.2" torchinfo = "~1.8" From 3336d1d2d2c50ea9cb5bebcd6477038823b421e4 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Thu, 25 Jan 2024 08:57:31 +0000 Subject: [PATCH 14/18] refactor: Remove useless parameter --- quadra/configs/model/anomalib/efficient_ad.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/quadra/configs/model/anomalib/efficient_ad.yaml b/quadra/configs/model/anomalib/efficient_ad.yaml index a519c42b..91233dc8 100644 --- a/quadra/configs/model/anomalib/efficient_ad.yaml +++ b/quadra/configs/model/anomalib/efficient_ad.yaml @@ -29,9 +29,3 @@ metrics: method: adaptive # options: [adaptive, manual] manual_image: null manual_pixel: null - -trainer: - # This will avoid issues with ModelSignatureWrapper - # As the default forward for EfficientAD is performed with a None attribute - # Which we currently can't handle - num_sanity_val_steps: 0 From 39fb1f81f7dda0a023268b881602fb5333cc8ebc Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Thu, 25 Jan 2024 14:52:56 +0000 Subject: [PATCH 15/18] fix: Fix model checkpoint not loaded correctly due to weights in inference mode --- quadra/tasks/classification.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/quadra/tasks/classification.py b/quadra/tasks/classification.py index 43661e47..a48b2c68 100644 --- a/quadra/tasks/classification.py +++ b/quadra/tasks/classification.py @@ -4,6 +4,7 @@ import json import os import typing +from copy import deepcopy from pathlib import Path from typing import Any, Dict, Generic, List, Optional, cast @@ -332,6 +333,8 @@ def generate_report(self) -> None: if not self.run_test or self.config.trainer.get("fast_dev_run"): self.datamodule.setup(stage="test") + # Deepcopy to remove the inference mode from gradients causing issues when loading checkpoints + self.module.model = deepcopy(self.module.model) predictions_outputs = self.trainer.predict( model=self.module, datamodule=self.datamodule, ckpt_path=self.best_model_path ) From c049d732b9bcc891f8c4c0a9e8e085fa8950bd19 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Mon, 29 Jan 2024 09:55:36 +0000 Subject: [PATCH 16/18] build: Upgrade version --- pyproject.toml | 2 +- quadra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d2b12cb9..312cffbf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "quadra" -version = "2.0.0a4" +version = "2.0.0a5" description = "Deep Learning experiment orchestration library" authors = [ "Federico Belotti ", diff --git a/quadra/__init__.py b/quadra/__init__.py index 8c8be769..4b2ba431 100644 --- a/quadra/__init__.py +++ b/quadra/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.0.0a4" +__version__ = "2.0.0a5" def get_version(): From fcd9dac2260821643516a8d9ac17d067ef4ed9c1 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Mon, 29 Jan 2024 11:38:40 +0000 Subject: [PATCH 17/18] build: Upgrade version --- pyproject.toml | 2 +- quadra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 312cffbf..65ba1d11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "quadra" -version = "2.0.0a5" +version = "2.0.0a6" description = "Deep Learning experiment orchestration library" authors = [ "Federico Belotti ", diff --git a/quadra/__init__.py b/quadra/__init__.py index 4b2ba431..d951d5c4 100644 --- a/quadra/__init__.py +++ b/quadra/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.0.0a5" +__version__ = "2.0.0a6" def get_version(): From 1d2feff3179d47831bc3ec3a454343886844b610 Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Mon, 5 Feb 2024 10:53:08 +0000 Subject: [PATCH 18/18] build: Upgrade version --- pyproject.toml | 2 +- quadra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 65ba1d11..05239760 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "quadra" -version = "2.0.0a6" +version = "2.0.0a7" description = "Deep Learning experiment orchestration library" authors = [ "Federico Belotti ", diff --git a/quadra/__init__.py b/quadra/__init__.py index d951d5c4..cbb44fe6 100644 --- a/quadra/__init__.py +++ b/quadra/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.0.0a6" +__version__ = "2.0.0a7" def get_version():