diff --git a/pyproject.toml b/pyproject.toml index 83ea01751cd..fea61f9f0ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,6 +62,7 @@ filterwarnings = [ "ignore:.* is deprecated and will be removed in Pillow 10:DeprecationWarning:pretrainedmodels.datasets.utils", # https://github.com/pytorch/vision/pull/5898 "ignore:.* is deprecated and will be removed in Pillow 10:DeprecationWarning:torchvision.transforms.functional_pil", + "ignore:.* is deprecated and will be removed in Pillow 10:DeprecationWarning:torchvision.transforms._functional_pil", # https://github.com/rwightman/pytorch-image-models/pull/1256 "ignore:.* is deprecated and will be removed in Pillow 10:DeprecationWarning:timm.data", # https://github.com/pytorch/pytorch/issues/72906 @@ -101,8 +102,11 @@ filterwarnings = [ # Expected warnings # Lightning warns us about using num_workers=0, but it's faster on macOS "ignore:The dataloader, .*, does not have many workers which may be a bottleneck:UserWarning", - # Lightning warns us about using the CPU when a GPU is available + # Lightning warns us about using the CPU when GPU/MPS is available "ignore:GPU available but not used.:UserWarning", + "ignore:MPS available but not used.:UserWarning", + # Lightning warns us if TensorBoard is not installed + "ignore:Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `lightning.pytorch` package:UserWarning", # https://github.com/kornia/kornia/pull/1611 "ignore:`ColorJitter` is now following Torchvision implementation.:DeprecationWarning:kornia.augmentation._2d.intensity.color_jitter", # https://github.com/kornia/kornia/pull/1663 diff --git a/tests/datamodules/test_geo.py b/tests/datamodules/test_geo.py index 583e7050f9c..b27107d8b12 100644 --- a/tests/datamodules/test_geo.py +++ b/tests/datamodules/test_geo.py @@ -66,6 +66,12 @@ def __init__(self) -> None: class TestGeoDataModule: + @pytest.fixture(params=[SamplerGeoDataModule, BatchSamplerGeoDataModule]) + def datamodule(self, request: SubRequest) -> CustomGeoDataModule: + dm: CustomGeoDataModule = request.param() + dm.trainer = Trainer(accelerator="cpu", max_epochs=1) + return dm + @pytest.mark.parametrize("stage", ["fit", "validate", "test"]) def test_setup(self, stage: str) -> None: dm = CustomGeoDataModule() @@ -102,7 +108,13 @@ def test_no_datasets(self) -> None: class TestNonGeoDataModule: - @pytest.mark.parametrize("stage", ["fit", "validate", "test"]) + @pytest.fixture + def datamodule(self) -> CustomNonGeoDataModule: + dm = CustomNonGeoDataModule() + dm.trainer = Trainer(accelerator="cpu", max_epochs=1) + return dm + + @pytest.mark.parametrize("stage", ["fit", "validate", "test", "predict"]) def test_setup(self, stage: str) -> None: dm = CustomNonGeoDataModule() dm.prepare_data() diff --git a/tests/datamodules/test_oscd.py b/tests/datamodules/test_oscd.py index 2daf98e3f4f..6f6403aa208 100644 --- a/tests/datamodules/test_oscd.py +++ b/tests/datamodules/test_oscd.py @@ -25,7 +25,7 @@ def datamodule(self, request: SubRequest) -> OSCDDataModule: num_workers=0, ) dm.prepare_data() - dm.trainer = Trainer(max_epochs=1) + dm.trainer = Trainer(accelerator="cpu", max_epochs=1) return dm def test_train_dataloader(self, datamodule: OSCDDataModule) -> None: diff --git a/tests/trainers/test_byol.py b/tests/trainers/test_byol.py index 6b007e3e263..45eff2d366f 100644 --- a/tests/trainers/test_byol.py +++ b/tests/trainers/test_byol.py @@ -85,7 +85,12 @@ def test_trainer( model.backbone = SegmentationTestModel(**model_kwargs) # Instantiate trainer - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.fit(model=model, datamodule=datamodule) try: trainer.test(model=model, datamodule=datamodule) diff --git a/tests/trainers/test_classification.py b/tests/trainers/test_classification.py index fe5fcb6ab43..769cd136bc1 100644 --- a/tests/trainers/test_classification.py +++ b/tests/trainers/test_classification.py @@ -92,7 +92,12 @@ def test_trainer( model = ClassificationTask(**model_kwargs) # Instantiate trainer - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.fit(model=model, datamodule=datamodule) try: trainer.test(model=model, datamodule=datamodule) @@ -192,7 +197,12 @@ def test_no_rgb( root="tests/data/eurosat", batch_size=1, num_workers=0 ) model = ClassificationTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.validate(model=model, datamodule=datamodule) def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None: @@ -200,7 +210,12 @@ def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None root="tests/data/eurosat", batch_size=1, num_workers=0 ) model = ClassificationTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.predict(model=model, datamodule=datamodule) @@ -234,7 +249,12 @@ def test_trainer( model = MultiLabelClassificationTask(**model_kwargs) # Instantiate trainer - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.fit(model=model, datamodule=datamodule) try: trainer.test(model=model, datamodule=datamodule) @@ -269,7 +289,12 @@ def test_no_rgb( root="tests/data/bigearthnet", batch_size=1, num_workers=0 ) model = MultiLabelClassificationTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.validate(model=model, datamodule=datamodule) def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None: @@ -277,5 +302,10 @@ def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None root="tests/data/bigearthnet", batch_size=1, num_workers=0 ) model = MultiLabelClassificationTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.predict(model=model, datamodule=datamodule) diff --git a/tests/trainers/test_detection.py b/tests/trainers/test_detection.py index 73a9563e301..34262829a47 100644 --- a/tests/trainers/test_detection.py +++ b/tests/trainers/test_detection.py @@ -92,7 +92,12 @@ def test_trainer( model = ObjectDetectionTask(**model_kwargs) # Instantiate trainer - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.fit(model=model, datamodule=datamodule) try: trainer.test(model=model, datamodule=datamodule) @@ -131,7 +136,12 @@ def test_no_rgb( root="tests/data/nasa_marine_debris", batch_size=1, num_workers=0 ) model = ObjectDetectionTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.validate(model=model, datamodule=datamodule) def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None: @@ -139,5 +149,10 @@ def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None root="tests/data/nasa_marine_debris", batch_size=1, num_workers=0 ) model = ObjectDetectionTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.predict(model=model, datamodule=datamodule) diff --git a/tests/trainers/test_regression.py b/tests/trainers/test_regression.py index fa890acd76c..1d62822e399 100644 --- a/tests/trainers/test_regression.py +++ b/tests/trainers/test_regression.py @@ -67,7 +67,12 @@ def test_trainer( model.model = RegressionTestModel() # Instantiate trainer - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.fit(model=model, datamodule=datamodule) try: trainer.test(model=model, datamodule=datamodule) @@ -160,7 +165,12 @@ def test_no_rgb( root="tests/data/cyclone", batch_size=1, num_workers=0 ) model = RegressionTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.validate(model=model, datamodule=datamodule) def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None: @@ -168,5 +178,10 @@ def test_predict(self, model_kwargs: Dict[Any, Any], fast_dev_run: bool) -> None root="tests/data/cyclone", batch_size=1, num_workers=0 ) model = RegressionTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.predict(model=model, datamodule=datamodule) diff --git a/tests/trainers/test_segmentation.py b/tests/trainers/test_segmentation.py index f972c7e7339..16038b0d23e 100644 --- a/tests/trainers/test_segmentation.py +++ b/tests/trainers/test_segmentation.py @@ -90,7 +90,12 @@ def test_trainer( model = SemanticSegmentationTask(**model_kwargs) # Instantiate trainer - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.fit(model=model, datamodule=datamodule) try: trainer.test(model=model, datamodule=datamodule) @@ -147,5 +152,10 @@ def test_no_rgb( root="tests/data/sen12ms", batch_size=1, num_workers=0 ) model = SemanticSegmentationTask(**model_kwargs) - trainer = Trainer(fast_dev_run=fast_dev_run, log_every_n_steps=1, max_epochs=1) + trainer = Trainer( + accelerator="cpu", + fast_dev_run=fast_dev_run, + log_every_n_steps=1, + max_epochs=1, + ) trainer.validate(model=model, datamodule=datamodule)