Skip to content

Commit

Permalink
rename org Lightning AI
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Jul 1, 2022
1 parent ae3f731 commit e260f39
Show file tree
Hide file tree
Showing 15 changed files with 32 additions and 32 deletions.
2 changes: 1 addition & 1 deletion pl_examples/basic_examples/mnist_datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class _MNIST(Dataset):
"""Carbon copy of ``tests.helpers.datasets.MNIST``.
We cannot import the tests as they are not distributed with the package.
See https://github.com/PyTorchLightning/pytorch-lightning/pull/7614#discussion_r671183652 for more context.
See https://github.com/Lightning-AI/lightning/pull/7614#discussion_r671183652 for more context.
"""

RESOURCES = (
Expand Down
2 changes: 1 addition & 1 deletion pl_examples/domain_templates/reinforce_learn_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def get_log_prob(self, pi: Normal, actions: torch.Tensor):


class ExperienceSourceDataset(IterableDataset):
"""Implementation from PyTorch Lightning Bolts: https://github.com/PyTorchLightning/lightning-
"""Implementation from PyTorch Lightning Bolts: https://github.com/Lightning-AI/lightning-
bolts/blob/master/pl_bolts/datamodules/experience_source.py.
Basic experience source dataset. Takes a generate_batch function that returns an iterator. The logic for the
Expand Down
22 changes: 11 additions & 11 deletions pytorch_lightning/loggers/neptune.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@

log = logging.getLogger(__name__)

_INTEGRATION_VERSION_KEY = "source_code/integrations/pytorch-lightning"
_INTEGRATION_VERSION_KEY = "source_code/integrations/lightning"

# kwargs used in previous NeptuneLogger version, now deprecated
_LEGACY_NEPTUNE_INIT_KWARGS = [
Expand Down Expand Up @@ -113,7 +113,7 @@ class NeptuneLogger(LightningLoggerBase):
neptune_logger = NeptuneLogger(
api_key="ANONYMOUS", # replace with your own
project="common/pytorch-lightning-integration", # format "<WORKSPACE/PROJECT>"
project="common/lightning-integration", # format "<WORKSPACE/PROJECT>"
tags=["training", "resnet"], # optional
)
trainer = Trainer(max_epochs=10, logger=neptune_logger)
Expand Down Expand Up @@ -157,7 +157,7 @@ def any_lightning_module_function_or_hook(self):
.. code-block:: python
neptune_logger = NeptuneLogger(project="common/pytorch-lightning-integration")
neptune_logger = NeptuneLogger(project="common/lightning-integration")
trainer = pl.Trainer(logger=neptune_logger)
model = ...
Expand All @@ -182,7 +182,7 @@ def any_lightning_module_function_or_hook(self):
.. code-block:: python
neptune_logger = NeptuneLogger(project="common/pytorch-lightning-integration", log_model_checkpoints=False)
neptune_logger = NeptuneLogger(project="common/lightning-integration", log_model_checkpoints=False)
**Pass additional parameters to the Neptune run**
Expand All @@ -194,7 +194,7 @@ def any_lightning_module_function_or_hook(self):
from pytorch_lightning.loggers import NeptuneLogger
neptune_logger = NeptuneLogger(
project="common/pytorch-lightning-integration",
project="common/lightning-integration",
name="lightning-run",
description="mlp quick run with pytorch-lightning",
tags=["mlp", "quick-run"],
Expand All @@ -216,10 +216,10 @@ def any_lightning_module_function_or_hook(self):
See Also:
- Read about
`what object you can log to Neptune <https://docs.neptune.ai/you-should-know/what-can-you-log-and-display>`_.
- Check `example run <https://app.neptune.ai/o/common/org/pytorch-lightning-integration/e/PTL-1/all>`_
- Check `example run <https://app.neptune.ai/o/common/org/lightning-integration/e/PTL-1/all>`_
with multiple types of metadata logged.
- For more detailed info check
`user guide <https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning>`_.
`user guide <https://docs.neptune.ai/integrations-and-supported-tools/model-training/lightning>`_.
Args:
api_key: Optional.
Expand Down Expand Up @@ -350,7 +350,7 @@ def _verify_input_arguments(
" - https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
"The NeptuneLogger was re-written to use the neptune.new Python API\n"
" - https://neptune.ai/blog/neptune-new\n"
" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning\n"
" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/lightning\n"
"You should use arguments accepted by either NeptuneLogger.init() or neptune.init()"
)

Expand All @@ -377,7 +377,7 @@ def _verify_input_arguments(
" - https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
"The NeptuneLogger was re-written to use the neptune.new Python API\n"
" - https://neptune.ai/blog/neptune-new\n"
" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning\n"
" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/lightning\n"
)

# check if user passed redundant neptune.init arguments when passed run
Expand Down Expand Up @@ -477,7 +477,7 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: #
neptune_logger = NeptuneLogger(
api_key="ANONYMOUS",
project="common/pytorch-lightning-integration"
project="common/lightning-integration"
)
neptune_logger.log_hyperparams(PARAMS)
Expand Down Expand Up @@ -627,7 +627,7 @@ def _signal_deprecated_api_usage(f_name, sample_code, raise_exception=False):
f" - https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
f"The NeptuneLogger was re-written to use the neptune.new Python API\n"
f" - https://neptune.ai/blog/neptune-new\n"
f" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning\n"
f" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/lightning\n"
f"Instead of `logger.{f_name}` you can use:\n"
f"\t{sample_code}"
)
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/loops/epoch/evaluation_epoch_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def _reload_dataloader_state_dict(self, data_fetcher: AbstractDataFetcher) -> No
if isinstance(dataloader, CombinedLoader):
raise MisconfigurationException(
"Reloading support hasn't been implemented for `CombinedLoader`. You can request it by opening an issue"
" in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
" in `https://github.com/Lightning-AI/lightning/issues`."
)
assert isinstance(dataloader, DataLoader)
_reload_dataloader_state_dict(dataloader, self._dataloader_state_dict)
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/plugins/io/torch_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def save_checkpoint(self, checkpoint: Dict[str, Any], path: _PATH, storage_optio
atomic_save(checkpoint, path)
except AttributeError as err:
# todo (sean): is this try catch necessary still?
# https://github.com/PyTorchLightning/pytorch-lightning/pull/431
# https://github.com/Lightning-AI/lightning/pull/431
key = pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY
checkpoint.pop(key, None)
rank_zero_warn(f"Warning, `{key}` dropped from checkpoint. An attribute is not picklable: {err}")
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/plugins/precision/ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def optimizer_step(
# we lack coverage here and IPUs are (currently) limited - something to explore if there's demand
raise MisconfigurationException(
"Skipping backward by returning `None` from your `training_step` is not implemented for IPUs."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`"
" Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`"
" requesting this feature."
)
return closure_result
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/plugins/precision/tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def optimizer_step(
# we lack coverage here so disable this - something to explore if there's demand
raise MisconfigurationException(
"Skipping backward by returning `None` from your `training_step` is not implemented for TPUs."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`"
" Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`"
" requesting this feature."
)
return closure_result
4 changes: 2 additions & 2 deletions pytorch_lightning/setup_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def _load_readme_description(path_dir: str, homepage: str, version: str) -> str:
# drop images from readme
text = text.replace("![PT to PL](docs/source/_static/images/general/pl_quick_start_full_compressed.gif)", "")

# https://github.com/PyTorchLightning/pytorch-lightning/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png
# https://github.com/Lightning-AI/lightning/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png
github_source_url = os.path.join(homepage, "raw", version)
# replace relative repository path to absolute link to the release
# do not replace all "docs" as in the readme we reger some other sources with particular path to docs
Expand All @@ -81,7 +81,7 @@ def _load_readme_description(path_dir: str, homepage: str, version: str) -> str:
# todo: wrap content as commented description
text = re.sub(rf"{skip_begin}.+?{skip_end}", "<!-- -->", text, flags=re.IGNORECASE + re.DOTALL)

# # https://github.com/Borda/pytorch-lightning/releases/download/1.1.0a6/codecov_badge.png
# # https://github.com/Borda/lightning/releases/download/1.1.0a6/codecov_badge.png
# github_release_url = os.path.join(homepage, "releases", "download", version)
# # download badge and replace url with local file
# text = _parse_for_badge(text, github_release_url)
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/strategies/launchers/spawn.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(self, strategy: Strategy) -> None:
def is_interactive_compatible(self) -> bool:
# The start method 'spawn' is currently the only one that works with DDP and CUDA support
# The start method 'fork' is the only one supported in Jupyter environments but not compatible with CUDA
# For more context, see https://github.com/PyTorchLightning/pytorch-lightning/issues/7550
# For more context, see https://github.com/Lightning-AI/lightning/issues/7550
return self._start_method == "fork" and self._strategy.root_device.type != "cuda"

def launch(self, function: Callable, *args: Any, trainer: Optional["pl.Trainer"] = None, **kwargs: Any) -> Any:
Expand Down
6 changes: 3 additions & 3 deletions pytorch_lightning/trainer/connectors/accelerator_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def _init_deterministic(self, deterministic: Optional[bool]) -> None:
torch.use_deterministic_algorithms(self.deterministic)
if self.deterministic:
# fixing non-deterministic part of horovod
# https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383
# https://github.com/Lightning-AI/lightning/pull/1572/files#r420279383
os.environ["HOROVOD_FUSION_THRESHOLD"] = "0"

# https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
Expand Down Expand Up @@ -605,7 +605,7 @@ def _check_strategy_and_fallback(self) -> None:
if _TPU_AVAILABLE:
raise MisconfigurationException(
"`accelerator='ddp_cpu'` is not supported on TPU machines. "
"Learn more: https://github.com/PyTorchLightning/pytorch-lightning/issues/7810"
"Learn more: https://github.com/Lightning-AI/lightning/issues/7810"
)
if self._devices_flag == 1 and self._num_nodes_flag > 1:
strategy_flag = DDPStrategy.strategy_name
Expand Down Expand Up @@ -725,7 +725,7 @@ def _validate_precision_choice(self) -> None:
if self._precision_flag == 64:
raise MisconfigurationException(
"`Trainer(accelerator='tpu', precision=64)` is not implemented."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`"
" Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`"
" requesting this feature."
)
if self._precision_plugin_flag and not isinstance(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def check_logging(cls, fx_name: str) -> None:
if fx_name not in cls.functions:
raise RuntimeError(
f"Logging inside `{fx_name}` is not implemented."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
" Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`."
)

if cls.functions[fx_name] is None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def __post_init__(self) -> None:
def _parse_reduce_fx(self) -> None:
error = (
"Only `self.log(..., reduce_fx={min,max,mean,sum})` are currently supported."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
" Please, open an issue in `https://github.com/Lightning-AI/lightning/issues`."
f" Found: {self.reduce_fx}"
)
if isinstance(self.reduce_fx, str):
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/utilities/migration.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class pl_legacy_patch:
unpickling old checkpoints. The following patches apply.
1. ``pytorch_lightning.utilities.argparse._gpus_arg_default``: Applies to all checkpoints saved prior to
version 1.2.8. See: https://github.com/PyTorchLightning/pytorch-lightning/pull/6898
version 1.2.8. See: https://github.com/Lightning-AI/lightning/pull/6898
2. ``pytorch_lightning.utilities.argparse_utils``: A module that was deprecated in 1.2 and removed in 1.4,
but still needs to be available for import for legacy checkpoints.
Expand Down
8 changes: 4 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def _load_py_module(fname, pkg="pytorch_lightning"):
author=about.__author__,
author_email=about.__author_email__,
url=about.__homepage__,
download_url="https://github.com/PyTorchLightning/pytorch-lightning",
download_url="https://github.com/Lightning-AI/lightning",
license=about.__license__,
packages=find_packages(exclude=["tests*", "pl_examples*", "legacy*"]),
include_package_data=True,
Expand All @@ -82,9 +82,9 @@ def _load_py_module(fname, pkg="pytorch_lightning"):
install_requires=setup_tools._load_requirements(_PATH_REQUIRE),
extras_require=extras,
project_urls={
"Bug Tracker": "https://github.com/PyTorchLightning/pytorch-lightning/issues",
"Documentation": "https://pytorch-lightning.rtfd.io/en/latest/",
"Source Code": "https://github.com/PyTorchLightning/pytorch-lightning",
"Bug Tracker": "https://github.com/Lightning-AI/lightning/issues",
"Documentation": "https://lightning.rtfd.io/en/latest/",
"Source Code": "https://github.com/Lightning-AI/lightning",
},
classifiers=[
"Environment :: Console",
Expand Down
4 changes: 2 additions & 2 deletions tests/loggers/test_neptune.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class Run:

def __setitem__(self, key, value):
# called once
assert key == "source_code/integrations/pytorch-lightning"
assert key == "source_code/integrations/lightning"
assert value == __version__

def wait(self):
Expand Down Expand Up @@ -89,7 +89,7 @@ def test_neptune_online(self, neptune):
self.assertEqual(created_run_mock.__getitem__.call_count, 2)
self.assertEqual(created_run_mock.__setitem__.call_count, 1)
created_run_mock.__getitem__.assert_has_calls([call("sys/id"), call("sys/name")], any_order=True)
created_run_mock.__setitem__.assert_called_once_with("source_code/integrations/pytorch-lightning", __version__)
created_run_mock.__setitem__.assert_called_once_with("source_code/integrations/lightning", __version__)

@patch("pytorch_lightning.loggers.neptune.Run", Run)
def test_online_with_custom_run(self, neptune):
Expand Down

0 comments on commit e260f39

Please sign in to comment.