Skip to content

Commit

Permalink
Skip megatron training test on CPU nodes
Browse files Browse the repository at this point in the history
Signed-off-by: John St John <jstjohn@nvidia.com>
  • Loading branch information
jstjohn committed Aug 14, 2024
1 parent fb07b24 commit 479e2d8
Showing 1 changed file with 2 additions and 0 deletions.
2 changes: 2 additions & 0 deletions tests/collections/llm/test_mnist_model_nemo2.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple, TypedDict, TypeVar, Union

import megatron.core.num_microbatches_calculator
import pytest
import pytorch_lightning as pl
import torch
import torch.distributed
Expand Down Expand Up @@ -492,6 +493,7 @@ def clean_parallel_state_context() -> Iterator[None]:
_teardown_apex_megatron_cuda()


@pytest.mark.skipif(not torch.cuda.is_available(), reason="No GPU available")
def test_train_mnist_litautoencoder_with_megatron_strategy_single_gpu(tmpdir):
with clean_parallel_state_context():
# Configure our custom Checkpointer
Expand Down

0 comments on commit 479e2d8

Please sign in to comment.