Skip to content

Commit

Permalink
fix/enable - check F401 (#5201)
Browse files Browse the repository at this point in the history
* refactor - check F401

* missed

* fix
  • Loading branch information
Borda authored Dec 21, 2020
1 parent 35fd6e9 commit 0f36525
Show file tree
Hide file tree
Showing 73 changed files with 115 additions and 189 deletions.
1 change: 0 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
# import m2r
import builtins
import glob
import inspect
import os
import shutil
import sys
Expand Down
5 changes: 0 additions & 5 deletions pl_examples/pytorch_ecosystem/pytorch_geometric/cora_dna.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,12 @@
model can be easily torch-scripted, thanks to Pytorch Geometric.
"""
# python imports
import os
import os.path as osp
import sys
from functools import partial
from collections import namedtuple
from argparse import ArgumentParser
from typing import List, Optional, NamedTuple

# thrid parties libraries
import numpy as np
from torch import nn
import torch
from torch import Tensor
Expand All @@ -32,7 +28,6 @@
try:
# Pytorch Geometric imports
from torch_geometric.nn import DNAConv, MessagePassing
from torch_geometric.data import DataLoader
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.data import NeighborSampler
Expand Down
24 changes: 12 additions & 12 deletions pytorch_lightning/accelerators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.accelerators.cpu_accelerator import CPUAccelerator
from pytorch_lightning.accelerators.ddp2_accelerator import DDP2Accelerator
from pytorch_lightning.accelerators.ddp_accelerator import DDPAccelerator
from pytorch_lightning.accelerators.ddp_spawn_accelerator import DDPSpawnAccelerator
from pytorch_lightning.accelerators.ddp_cpu_spawn_accelerator import DDPCPUSpawnAccelerator
from pytorch_lightning.accelerators.dp_accelerator import DataParallelAccelerator
from pytorch_lightning.accelerators.gpu_accelerator import GPUAccelerator
from pytorch_lightning.accelerators.tpu_accelerator import TPUAccelerator
from pytorch_lightning.accelerators.horovod_accelerator import HorovodAccelerator
from pytorch_lightning.accelerators.ddp_hpc_accelerator import DDPHPCAccelerator
from pytorch_lightning.accelerators.ddp_cpu_hpc_accelerator import DDPCPUHPCAccelerator
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.accelerators.cpu_accelerator import CPUAccelerator # noqa: F401
from pytorch_lightning.accelerators.ddp2_accelerator import DDP2Accelerator # noqa: F401
from pytorch_lightning.accelerators.ddp_accelerator import DDPAccelerator # noqa: F401
from pytorch_lightning.accelerators.ddp_spawn_accelerator import DDPSpawnAccelerator # noqa: F401
from pytorch_lightning.accelerators.ddp_cpu_spawn_accelerator import DDPCPUSpawnAccelerator # noqa: F401
from pytorch_lightning.accelerators.dp_accelerator import DataParallelAccelerator # noqa: F401
from pytorch_lightning.accelerators.gpu_accelerator import GPUAccelerator # noqa: F401
from pytorch_lightning.accelerators.tpu_accelerator import TPUAccelerator # noqa: F401
from pytorch_lightning.accelerators.horovod_accelerator import HorovodAccelerator # noqa: F401
from pytorch_lightning.accelerators.ddp_hpc_accelerator import DDPHPCAccelerator # noqa: F401
from pytorch_lightning.accelerators.ddp_cpu_hpc_accelerator import DDPCPUHPCAccelerator # noqa: F401
from pytorch_lightning.accelerators.accelerator import Accelerator # noqa: F401
1 change: 0 additions & 1 deletion pytorch_lightning/accelerators/accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from typing import Any, Optional, Union

import torch
import torch.distributed as torch_distrib
from torch.optim import Optimizer

from pytorch_lightning.cluster_environments import ClusterEnvironment
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/accelerators/cpu_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.utilities import AMPType, rank_zero_warn
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.exceptions import MisconfigurationException


Expand Down
7 changes: 1 addition & 6 deletions pytorch_lightning/accelerators/ddp2_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import Any, List, Optional, Union

import torch
Expand All @@ -26,13 +25,9 @@
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.plugins.ddp_plugin import DDPPlugin
from pytorch_lightning.plugins.rpc_plugin import RPCPlugin
from pytorch_lightning.utilities import _HYDRA_AVAILABLE, AMPType
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, rank_zero_only, sync_ddp_if_available

if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path


class DDP2Accelerator(Accelerator):

Expand Down
6 changes: 0 additions & 6 deletions pytorch_lightning/accelerators/ddp_cpu_hpc_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,9 @@
# limitations under the License
from typing import Optional

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.ddp_hpc_accelerator import DDPHPCAccelerator
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.plugins.ddp_plugin import DDPPlugin
from pytorch_lightning.utilities import _HYDRA_AVAILABLE

if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path


class DDPCPUHPCAccelerator(DDPHPCAccelerator):
Expand Down
6 changes: 1 addition & 5 deletions pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.plugins.ddp_plugin import DDPPlugin
from pytorch_lightning.plugins.rpc_plugin import RPCPlugin
from pytorch_lightning.utilities import _HYDRA_AVAILABLE, AMPType
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import (
all_gather_ddp_if_available,
find_free_network_port,
Expand All @@ -35,10 +35,6 @@
sync_ddp_if_available,
)

if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path


class DDPCPUSpawnAccelerator(Accelerator):

Expand Down
7 changes: 1 addition & 6 deletions pytorch_lightning/accelerators/ddp_hpc_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import Any, List, Optional, Union

import torch
Expand All @@ -26,13 +25,9 @@
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.plugins.ddp_plugin import DDPPlugin
from pytorch_lightning.plugins.rpc_plugin import RPCPlugin
from pytorch_lightning.utilities import _HYDRA_AVAILABLE, AMPType
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, rank_zero_only, sync_ddp_if_available

if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path


class DDPHPCAccelerator(Accelerator):

Expand Down
6 changes: 1 addition & 5 deletions pytorch_lightning/accelerators/ddp_spawn_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from pytorch_lightning.distributed import LightningDistributed
from pytorch_lightning.plugins.ddp_plugin import DDPPlugin
from pytorch_lightning.plugins.rpc_plugin import RPCPlugin
from pytorch_lightning.utilities import _HYDRA_AVAILABLE, AMPType
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.cloud_io import atomic_save
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.distributed import (
Expand All @@ -39,10 +39,6 @@
)
from pytorch_lightning.utilities.seed import seed_everything

if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path


class DDPSpawnAccelerator(Accelerator):

Expand Down
3 changes: 1 addition & 2 deletions pytorch_lightning/accelerators/dp_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from typing import Optional

import torch
from torch import optim

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.core.lightning import LightningModule
Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/accelerators/gpu_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

import torch

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.distributed.dist import LightningDistributed
Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/accelerators/horovod_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import torch
from torch.optim.lr_scheduler import _LRScheduler

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.utilities import _HOROVOD_AVAILABLE, AMPType
Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/accelerators/tpu_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.core import LightningModule
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities import (
_TPU_AVAILABLE,
move_data_to_device,
Expand Down
2 changes: 0 additions & 2 deletions pytorch_lightning/callbacks/early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,10 @@
Monitor a metric and stop training when it stops improving.
"""
import os

import numpy as np
import torch

from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_info, rank_zero_warn, _TPU_AVAILABLE

Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/callbacks/model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only, rank_zero_warn
from pytorch_lightning.plugins.rpc_plugin import RPCPlugin
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.exceptions import MisconfigurationException

Expand Down
6 changes: 3 additions & 3 deletions pytorch_lightning/cluster_environments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.cluster_environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.cluster_environments.slurm_environment import SLURMEnvironment
from pytorch_lightning.cluster_environments.torchelastic_environment import TorchElasticEnvironment
from pytorch_lightning.cluster_environments.cluster_environment import ClusterEnvironment # noqa: F401
from pytorch_lightning.cluster_environments.slurm_environment import SLURMEnvironment # noqa: F401
from pytorch_lightning.cluster_environments.torchelastic_environment import TorchElasticEnvironment # noqa: F401
8 changes: 2 additions & 6 deletions pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from abc import ABC
from argparse import Namespace
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from typing import Any, Callable, Dict, List, Optional, Tuple, Union

import torch
from torch import ScriptModule, Tensor
Expand All @@ -37,14 +37,11 @@
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES, ModelIO
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.utilities import _TPU_AVAILABLE, rank_zero_warn
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args

if _TPU_AVAILABLE:
import torch_xla.core.xla_model as xm


class LightningModule(
ABC,
Expand Down Expand Up @@ -1455,7 +1452,6 @@ def save_hyperparameters(self, *args, frame=None) -> None:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or argumenst from class `__init__`
>>> from collections import OrderedDict
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/core/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import types
from typing import Any, Callable, Optional
from typing import Callable, Optional
from weakref import proxy

from torch.optim.optimizer import Optimizer
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/distributed/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.distributed.dist import LightningDistributed
from pytorch_lightning.distributed.dist import LightningDistributed # noqa: F401
10 changes: 5 additions & 5 deletions pytorch_lightning/loggers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,35 +28,35 @@
# needed to prevent ImportError and duplicated logs.
environ["COMET_DISABLE_AUTO_LOGGING"] = "1"

from pytorch_lightning.loggers.comet import CometLogger
from pytorch_lightning.loggers.comet import CometLogger # noqa: F401
except ImportError: # pragma: no-cover
del environ["COMET_DISABLE_AUTO_LOGGING"] # pragma: no-cover
else:
__all__.append('CometLogger')

try:
from pytorch_lightning.loggers.mlflow import MLFlowLogger
from pytorch_lightning.loggers.mlflow import MLFlowLogger # noqa: F401
except ImportError: # pragma: no-cover
pass # pragma: no-cover
else:
__all__.append('MLFlowLogger')

try:
from pytorch_lightning.loggers.neptune import NeptuneLogger
from pytorch_lightning.loggers.neptune import NeptuneLogger # noqa: F401
except ImportError: # pragma: no-cover
pass # pragma: no-cover
else:
__all__.append('NeptuneLogger')

try:
from pytorch_lightning.loggers.test_tube import TestTubeLogger
from pytorch_lightning.loggers.test_tube import TestTubeLogger # noqa: F401
except ImportError: # pragma: no-cover
pass # pragma: no-cover
else:
__all__.append('TestTubeLogger')

try:
from pytorch_lightning.loggers.wandb import WandbLogger
from pytorch_lightning.loggers.wandb import WandbLogger # noqa: F401
except ImportError: # pragma: no-cover
pass # pragma: no-cover
else:
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/loggers/neptune.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
--------------
"""
from argparse import Namespace
from typing import Any, Dict, Iterable, List, Optional, Union
from typing import Any, Dict, Iterable, Optional, Union

try:
import neptune
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/loggers/wandb.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"""
import os
from argparse import Namespace
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, Optional, Union

import torch.nn as nn

Expand Down
6 changes: 3 additions & 3 deletions pytorch_lightning/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.metrics.metric import Metric
from pytorch_lightning.metrics.metric import Metric # noqa: F401

from pytorch_lightning.metrics.classification import (
from pytorch_lightning.metrics.classification import ( # noqa: F401
Accuracy,
Precision,
Recall,
Expand All @@ -25,7 +25,7 @@
F1,
)

from pytorch_lightning.metrics.regression import (
from pytorch_lightning.metrics.regression import ( # noqa: F401
MeanSquaredError,
MeanAbsoluteError,
MeanSquaredLogError,
Expand Down
14 changes: 7 additions & 7 deletions pytorch_lightning/metrics/classification/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.metrics.classification.accuracy import Accuracy
from pytorch_lightning.metrics.classification.average_precision import AveragePrecision
from pytorch_lightning.metrics.classification.confusion_matrix import ConfusionMatrix
from pytorch_lightning.metrics.classification.f_beta import FBeta, Fbeta, F1
from pytorch_lightning.metrics.classification.precision_recall import Precision, Recall
from pytorch_lightning.metrics.classification.precision_recall_curve import PrecisionRecallCurve
from pytorch_lightning.metrics.classification.roc import ROC
from pytorch_lightning.metrics.classification.accuracy import Accuracy # noqa: F401
from pytorch_lightning.metrics.classification.average_precision import AveragePrecision # noqa: F401
from pytorch_lightning.metrics.classification.confusion_matrix import ConfusionMatrix # noqa: F401
from pytorch_lightning.metrics.classification.f_beta import FBeta, Fbeta, F1 # noqa: F401
from pytorch_lightning.metrics.classification.precision_recall import Precision, Recall # noqa: F401
from pytorch_lightning.metrics.classification.precision_recall_curve import PrecisionRecallCurve # noqa: F401
from pytorch_lightning.metrics.classification.roc import ROC # noqa: F401
Loading

0 comments on commit 0f36525

Please sign in to comment.