Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Npu dev #2306

Merged
merged 3 commits into from
Sep 29, 2022
Merged

Npu dev #2306

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 0 additions & 25 deletions mmcv/device/npu/_functions.py

This file was deleted.

4 changes: 2 additions & 2 deletions mmcv/device/npu/data_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@

import torch

from mmcv.device.scatter_gather import scatter_kwargs
from mmcv.parallel import MMDataParallel
from .scatter_gather import scatter_kwargs


def _check_balance(*args, **kwargs):
Expand All @@ -19,7 +19,7 @@ def _check_balance(*args, **kwargs):
# _check_balance function in DataParallel to make initialization pass.
for m in sys.modules:
if m.startswith('torch') or 'mmcv' in m:
if getattr(sys.modules[m], '_check_balance', None) is not None:
if hasattr(sys.modules[m], '_check_balance'):
setattr(sys.modules[m], '_check_balance', _check_balance)


Expand Down
2 changes: 1 addition & 1 deletion mmcv/device/npu/distributed.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Copyright Huawei Technologies Co., Ltd. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved.

from mmcv.device.scatter_gather import scatter_kwargs
from mmcv.parallel import MMDistributedDataParallel
from .scatter_gather import scatter_kwargs


class NPUDistributedDataParallel(MMDistributedDataParallel):
Expand Down
60 changes: 0 additions & 60 deletions mmcv/device/npu/scatter_gather.py

This file was deleted.

7 changes: 5 additions & 2 deletions mmcv/runner/fp16_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,18 @@
import torch.nn as nn
from torch.nn.parameter import Parameter

from mmcv.utils import TORCH_VERSION, digit_version
from mmcv.utils import IS_NPU_AVAILABLE, TORCH_VERSION, digit_version
from .dist_utils import allreduce_grads as _allreduce_grads

try:
# If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported
# and used; otherwise, auto fp16 will adopt mmcv's implementation.
# Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16
# manually, so the behavior may not be consistent with real amp.
from torch.cuda.amp import autocast
if IS_NPU_AVAILABLE:
from torch.npu.amp import autocast
else:
from torch.cuda.amp import autocast
except ImportError:
pass

Expand Down