diff --git a/pytorch_lightning/trainer/supporters.py b/pytorch_lightning/trainer/supporters.py index 2235f3f70561f..26f27ce56f1c7 100644 --- a/pytorch_lightning/trainer/supporters.py +++ b/pytorch_lightning/trainer/supporters.py @@ -257,7 +257,7 @@ def __init__(self, datasets: Union[Sequence, Mapping], mode: str): Args: datasets: a sequence/mapping datasets. Can be a collections of torch.utils.Dataset, Iterable or even None. - mode: whether to use the minimum number of batches in all samples or the maximum + mode: whether to use the minimum number of batches in all samples or the maximum number of batches in all samples. """ @@ -307,7 +307,7 @@ def __len__(self) -> int: class CombinedLoader(object): """ Combines different dataloaders and allows sampling in parallel. - + Supported modes are 'min_size', which raises StopIteration after the shortest loader (the one with the lowest number of batches) is done, and 'max_size_cycle` which raises StopIteration after the longest loader (the one with most batches) is done, while cycling diff --git a/tests/base/model_train_dataloaders.py b/tests/base/model_train_dataloaders.py index 65873cfa8d6c4..0cc6b7e9e14db 100644 --- a/tests/base/model_train_dataloaders.py +++ b/tests/base/model_train_dataloaders.py @@ -42,7 +42,7 @@ def train_dataloader__multiple_mapping(self): """Return a mapping loaders with different lengths""" return {'a': self.dataloader(train=True, num_samples=100), 'b': self.dataloader(train=True, num_samples=50)} - + def train_dataloader__multiple_sequence(self): return [self.dataloader(train=True, num_samples=100), self.dataloader(train=True, num_samples=50)] diff --git a/tests/base/model_train_steps.py b/tests/base/model_train_steps.py index e12d004db8f98..e39dd47aa565b 100644 --- a/tests/base/model_train_steps.py +++ b/tests/base/model_train_steps.py @@ -174,4 +174,3 @@ def training_step__multiple_dataloaders(self, batch, batch_idx, optimizer_idx=No } ) return output -