diff --git a/tests/algorithms/test_gradient_clipping.py b/tests/algorithms/test_gradient_clipping.py index d749362801..eb95c1359a 100644 --- a/tests/algorithms/test_gradient_clipping.py +++ b/tests/algorithms/test_gradient_clipping.py @@ -20,7 +20,7 @@ def simple_model_with_grads(): # Set up small NN with one linear layer with no bias + softmax, so only # one set of params and get some gradients. - N, hin, num_classes = 8, 4, 3 + N, hin, num_classes = 4, 2, 2 x = torch.rand((N, hin)) y = torch.randint(high=num_classes - 1, size=(N,)) model = nn.Sequential(nn.Linear(hin, num_classes, bias=False), nn.Softmax(dim=1)) @@ -47,8 +47,6 @@ def __init__(self, n_ch, num_fmaps, h, num_classes, filter_size): self.mlp = nn.Sequential( nn.Linear(num_fmaps, h), nn.ReLU(), - nn.Linear(h, h), - nn.ReLU(), nn.Linear(h, num_classes), nn.Softmax(dim=1), ) @@ -60,8 +58,8 @@ def forward(self, x): return out # Generate some gradients. - N, n_ch, num_fmaps, h, num_classes, filter_size = 8, 3, 4, 4, 3, 3 - x = torch.rand((N, n_ch, 16, 16)) + N, n_ch, num_fmaps, h, num_classes, filter_size = 4, 1, 2, 2, 2, 2 + x = torch.rand((N, n_ch, 8, 8)) y = torch.randint(high=num_classes - 1, size=(N,)) model = myNN(n_ch, num_fmaps, h, num_classes, filter_size) diff --git a/tests/test_precision.py b/tests/test_precision.py index 0a2f40a559..a23ab5f11b 100644 --- a/tests/test_precision.py +++ b/tests/test_precision.py @@ -23,14 +23,14 @@ def get_trainer(precision: Precision, precision_config: Optional[dict[str, Any]] return Trainer( model=composer_resnet('resnet18'), train_dataloader=DataLoader( - dataset=RandomImageDataset(size=128), - batch_size=128, + dataset=RandomImageDataset(size=1024), + batch_size=512, persistent_workers=False, num_workers=0, ), eval_dataloader=DataLoader( - dataset=RandomImageDataset(size=128), - batch_size=128, + dataset=RandomImageDataset(size=1024), + batch_size=512, persistent_workers=False, num_workers=0, ),