Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

style: require explicit device and dtype #4001

Merged
merged 3 commits into from
Jul 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -144,5 +144,12 @@ repos:
entry: DeepMD|DeepMd|Pytorch|Tensorflow|Numpy|Github|Lammps|I-Pi|I-PI|i-Pi
# unclear why PairDeepMD is used instead of PairDeePMD
exclude: .pre-commit-config.yaml|source/lmp
# customized pylint rules
- repo: https://github.com/pylint-dev/pylint/
rev: v3.2.6
hooks:
- id: pylint
entry: env PYTHONPATH=source/checker pylint
files: ^deepmd/
ci:
autoupdate_branch: devel
4 changes: 2 additions & 2 deletions deepmd/dpmodel/atomic_model/base_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def init_out_stat(self):
[self.atomic_output_def()[kk].size for kk in self.bias_keys]
)
self.n_out = len(self.bias_keys)
out_bias_data = np.zeros([self.n_out, ntypes, self.max_out_size])
out_std_data = np.ones([self.n_out, ntypes, self.max_out_size])
out_bias_data = np.zeros([self.n_out, ntypes, self.max_out_size]) # pylint: disable=no-explicit-dtype
out_std_data = np.ones([self.n_out, ntypes, self.max_out_size]) # pylint: disable=no-explicit-dtype
self.out_bias = out_bias_data
self.out_std = out_std_data

Expand Down
2 changes: 1 addition & 1 deletion deepmd/dpmodel/atomic_model/linear_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@
"""This should be a list of user defined weights that matches the number of models to be combined."""
nmodels = len(self.models)
nframes, nloc, _ = nlists_[0].shape
return [np.ones((nframes, nloc, 1)) / nmodels for _ in range(nmodels)]
return [np.ones((nframes, nloc, 1)) / nmodels for _ in range(nmodels)] # pylint: disable=no-explicit-dtype

Check warning on line 288 in deepmd/dpmodel/atomic_model/linear_atomic_model.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/atomic_model/linear_atomic_model.py#L288

Added line #L288 was not covered by tests

def get_dim_fparam(self) -> int:
"""Get the number (dimension) of frame parameters of this atomic model."""
Expand Down
4 changes: 2 additions & 2 deletions deepmd/dpmodel/atomic_model/pairtab_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def forward_atomic(

# (nframes, nloc, nnei)
j_type = extended_atype[
np.arange(extended_atype.shape[0])[:, None, None], masked_nlist
np.arange(extended_atype.shape[0])[:, None, None], masked_nlist # pylint: disable=no-explicit-dtype
]

raw_atomic_energy = self._pair_tabulated_inter(
Expand Down Expand Up @@ -301,7 +301,7 @@ def _get_pairwise_dist(coords: np.ndarray, nlist: np.ndarray) -> np.ndarray:
np.ndarray
The pairwise distance between the atoms (nframes, nloc, nnei).
"""
batch_indices = np.arange(nlist.shape[0])[:, None, None]
batch_indices = np.arange(nlist.shape[0])[:, None, None] # pylint: disable=no-explicit-dtype
neighbor_atoms = coords[batch_indices, nlist]
loc_atoms = coords[:, : nlist.shape[1], :]
pairwise_dr = loc_atoms[:, :, None, :] - neighbor_atoms
Expand Down
12 changes: 6 additions & 6 deletions deepmd/dpmodel/fitting/general_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,18 +144,18 @@ def __init__(
net_dim_out = self._net_out_dim()
# init constants
if bias_atom_e is None:
self.bias_atom_e = np.zeros([self.ntypes, net_dim_out])
self.bias_atom_e = np.zeros([self.ntypes, net_dim_out]) # pylint: disable=no-explicit-dtype
else:
assert bias_atom_e.shape == (self.ntypes, net_dim_out)
self.bias_atom_e = bias_atom_e
if self.numb_fparam > 0:
self.fparam_avg = np.zeros(self.numb_fparam)
self.fparam_inv_std = np.ones(self.numb_fparam)
self.fparam_avg = np.zeros(self.numb_fparam) # pylint: disable=no-explicit-dtype
self.fparam_inv_std = np.ones(self.numb_fparam) # pylint: disable=no-explicit-dtype
else:
self.fparam_avg, self.fparam_inv_std = None, None
if self.numb_aparam > 0:
self.aparam_avg = np.zeros(self.numb_aparam)
self.aparam_inv_std = np.ones(self.numb_aparam)
self.aparam_avg = np.zeros(self.numb_aparam) # pylint: disable=no-explicit-dtype
self.aparam_inv_std = np.ones(self.numb_aparam) # pylint: disable=no-explicit-dtype
else:
self.aparam_avg, self.aparam_inv_std = None, None
# init networks
Expand Down Expand Up @@ -405,7 +405,7 @@ def _call_common(

# calcualte the prediction
if not self.mixed_types:
outs = np.zeros([nf, nloc, net_dim_out])
outs = np.zeros([nf, nloc, net_dim_out]) # pylint: disable=no-explicit-dtype
for type_i in range(self.ntypes):
mask = np.tile(
(atype == type_i).reshape([nf, nloc, 1]), [1, 1, net_dim_out]
Expand Down
2 changes: 1 addition & 1 deletion deepmd/dpmodel/fitting/polarizability_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def call(
bias = self.constant_matrix[atype]
# (nframes, nloc, 1)
bias = np.expand_dims(bias, axis=-1) * self.scale[atype]
eye = np.eye(3)
eye = np.eye(3) # pylint: disable=no-explicit-dtype
eye = np.tile(eye, (nframes, nloc, 1, 1))
# (nframes, nloc, 3, 3)
bias = np.expand_dims(bias, axis=-1) * eye
Expand Down
6 changes: 4 additions & 2 deletions deepmd/dpmodel/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,11 +343,13 @@
if batch_output[dp_name] is not None:
out = batch_output[dp_name].reshape(shape)
else:
out = np.full(shape, np.nan)
out = np.full(shape, np.nan) # pylint: disable=no-explicit-dtype
results.append(out)
else:
shape = self._get_output_shape(odef, nframes, natoms)
results.append(np.full(np.abs(shape), np.nan)) # this is kinda hacky
results.append(

Check warning on line 350 in deepmd/dpmodel/infer/deep_eval.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/infer/deep_eval.py#L350

Added line #L350 was not covered by tests
np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype
) # this is kinda hacky
return tuple(results)

def _get_output_shape(self, odef, nframes, natoms):
Expand Down
11 changes: 6 additions & 5 deletions deepmd/dpmodel/utils/nlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,8 @@ def build_neighbor_list(
nlist = nlist[:, :, :nsel]
else:
rr = np.concatenate(
[rr, np.ones([batch_size, nloc, nsel - nnei]) + rcut], axis=-1
[rr, np.ones([batch_size, nloc, nsel - nnei]) + rcut], # pylint: disable=no-explicit-dtype
axis=-1,
)
nlist = np.concatenate(
[nlist, np.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype)],
Expand Down Expand Up @@ -262,7 +263,7 @@ def extend_coord_with_ghosts(

"""
nf, nloc = atype.shape
aidx = np.tile(np.arange(nloc)[np.newaxis, :], (nf, 1))
aidx = np.tile(np.arange(nloc)[np.newaxis, :], (nf, 1)) # pylint: disable=no-explicit-dtype
if cell is None:
nall = nloc
extend_coord = coord.copy()
Expand All @@ -274,9 +275,9 @@ def extend_coord_with_ghosts(
to_face = to_face_distance(cell)
nbuff = np.ceil(rcut / to_face).astype(int)
nbuff = np.max(nbuff, axis=0)
xi = np.arange(-nbuff[0], nbuff[0] + 1, 1)
yi = np.arange(-nbuff[1], nbuff[1] + 1, 1)
zi = np.arange(-nbuff[2], nbuff[2] + 1, 1)
xi = np.arange(-nbuff[0], nbuff[0] + 1, 1) # pylint: disable=no-explicit-dtype
yi = np.arange(-nbuff[1], nbuff[1] + 1, 1) # pylint: disable=no-explicit-dtype
zi = np.arange(-nbuff[2], nbuff[2] + 1, 1) # pylint: disable=no-explicit-dtype
xyz = np.outer(xi, np.array([1, 0, 0]))[:, np.newaxis, np.newaxis, :]
xyz = xyz + np.outer(yi, np.array([0, 1, 0]))[np.newaxis, :, np.newaxis, :]
xyz = xyz + np.outer(zi, np.array([0, 0, 1]))[np.newaxis, np.newaxis, :, :]
Expand Down
4 changes: 2 additions & 2 deletions deepmd/infer/model_devi.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def calc_model_devi(
forces = np.array(forces)
virials = np.array(virials)

devi = [np.arange(coord.shape[0]) * frequency]
devi = [np.arange(coord.shape[0]) * frequency] # pylint: disable=no-explicit-dtype
if real_data is None:
devi += list(calc_model_devi_v(virials, relative=relative_v))
devi_f = list(calc_model_devi_f(forces, relative=relative, atomic=atomic))
Expand Down Expand Up @@ -503,7 +503,7 @@ def make_model_devi(
nframes_tot += coord.shape[0]
devis.append(devi)
devis = np.vstack(devis)
devis[:, 0] = np.arange(nframes_tot) * frequency
devis[:, 0] = np.arange(nframes_tot) * frequency # pylint: disable=no-explicit-dtype
write_model_devi_out(devis, output, header=system, atomic=atomic)
devis_coll.append(devis)
return devis_coll
20 changes: 12 additions & 8 deletions deepmd/pt/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,9 @@ def _eval_model(
results.append(out)
else:
shape = self._get_output_shape(odef, nframes, natoms)
results.append(np.full(np.abs(shape), np.nan)) # this is kinda hacky
results.append(
np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype
) # this is kinda hacky
return tuple(results)

def _eval_model_spin(
Expand Down Expand Up @@ -502,7 +504,9 @@ def _eval_model_spin(
results.append(out)
else:
shape = self._get_output_shape(odef, nframes, natoms)
results.append(np.full(np.abs(shape), np.nan)) # this is kinda hacky
results.append(
np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype
) # this is kinda hacky
return tuple(results)

def _get_output_shape(self, odef, nframes, natoms):
Expand Down Expand Up @@ -666,28 +670,28 @@ def eval_model(
logits_out.append(batch_output["logits"])
if not return_tensor:
energy_out = (
np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1])
np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1]) # pylint: disable=no-explicit-dtype
)
atomic_energy_out = (
np.concatenate(atomic_energy_out)
if atomic_energy_out
else np.zeros([nframes, natoms, 1])
else np.zeros([nframes, natoms, 1]) # pylint: disable=no-explicit-dtype
)
force_out = (
np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3])
np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype
)
force_mag_out = (
np.concatenate(force_mag_out)
if force_mag_out
else np.zeros([nframes, natoms, 3])
else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype
)
virial_out = (
np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3])
np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3]) # pylint: disable=no-explicit-dtype
)
atomic_virial_out = (
np.concatenate(atomic_virial_out)
if atomic_virial_out
else np.zeros([nframes, natoms, 3, 3])
else np.zeros([nframes, natoms, 3, 3]) # pylint: disable=no-explicit-dtype
)
updated_coord_out = (
np.concatenate(updated_coord_out) if updated_coord_out else None
Expand Down
4 changes: 2 additions & 2 deletions deepmd/pt/model/atomic_model/linear_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def __init__(
self.rcuts = torch.tensor(
self.get_model_rcuts(), dtype=torch.float64, device=env.DEVICE
)
self.nsels = torch.tensor(self.get_model_nsels(), device=env.DEVICE)
self.nsels = torch.tensor(self.get_model_nsels(), device=env.DEVICE) # pylint: disable=no-explicit-dtype

def mixed_types(self) -> bool:
"""If true, the model
Expand Down Expand Up @@ -285,7 +285,7 @@ def remap_atype(ori_map: List[str], new_map: List[str]) -> torch.Tensor:
"""
type_2_idx = {atp: idx for idx, atp in enumerate(ori_map)}
# this maps the atype in the new map to the original map
mapping = torch.tensor(
mapping = torch.tensor( # pylint: disable=no-explicit-dtype
[type_2_idx[new_map[idx]] for idx in range(len(new_map))], device=env.DEVICE
)
return mapping
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/model/atomic_model/pairtab_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def forward_atomic(
# i_type : (nframes, nloc), this is atype.
# j_type : (nframes, nloc, nnei)
j_type = extended_atype[
torch.arange(extended_atype.size(0), device=extended_coord.device)[
torch.arange(extended_atype.size(0), device=extended_coord.device)[ # pylint: disable=no-explicit-dtype
:, None, None
],
masked_nlist,
Expand Down
4 changes: 2 additions & 2 deletions deepmd/pt/model/descriptor/descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ def share_params(self, base_class, shared_level, resume=False):
base_env.stats[kk] += self.get_stats()[kk]
mean, stddev = base_env()
if not base_class.set_davg_zero:
base_class.mean.copy_(torch.tensor(mean, device=env.DEVICE))
base_class.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
base_class.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype
base_class.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype
# must share, even if not do stat
self.mean = base_class.mean
self.stddev = base_class.stddev
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pt/model/descriptor/gaussian_lcc.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
sel = [sel]

self.ntypes = ntypes
self.sec = torch.tensor(sel)
self.sec = torch.tensor(sel) # pylint: disable=no-explicit-dtype,no-explicit-device

Check warning on line 110 in deepmd/pt/model/descriptor/gaussian_lcc.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/descriptor/gaussian_lcc.py#L110

Added line #L110 was not covered by tests
self.nnei = sum(sel)

if self.do_tag_embedding:
Expand Down Expand Up @@ -199,7 +199,7 @@
nall = extended_coord.shape[1]
nlist2 = torch.cat(
[
torch.arange(0, nloc, device=nlist.device)
torch.arange(0, nloc, device=nlist.device) # pylint: disable=no-explicit-dtype
.reshape(1, nloc, 1)
.expand(nframes, -1, -1),
nlist,
Expand All @@ -208,7 +208,7 @@
)
nlist_loc2 = torch.cat(
[
torch.arange(0, nloc, device=nlist_loc.device)
torch.arange(0, nloc, device=nlist_loc.device) # pylint: disable=no-explicit-dtype
.reshape(1, nloc, 1)
.expand(nframes, -1, -1),
nlist_loc,
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/model/descriptor/repformer_layer_old_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def __init__(
sel = [sel] if isinstance(sel, int) else sel
self.nnei = sum(sel)
assert len(sel) == 1
self.sel = torch.tensor(sel, device=env.DEVICE)
self.sel = torch.tensor(sel, device=env.DEVICE) # pylint: disable=no-explicit-dtype
self.sec = self.sel
self.axis_neuron = axis_neuron
self.set_davg_zero = set_davg_zero
Expand Down
8 changes: 4 additions & 4 deletions deepmd/pt/model/descriptor/repformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,8 +485,8 @@
comm_dict["recv_num"],
g1,
comm_dict["communicator"],
torch.tensor(nloc),
torch.tensor(nall - nloc),
torch.tensor(nloc), # pylint: disable=no-explicit-dtype,no-explicit-device
torch.tensor(nall - nloc), # pylint: disable=no-explicit-dtype,no-explicit-device
)
g1_ext = ret[0].unsqueeze(0)
g1, g2, h2 = ll.forward(
Expand Down Expand Up @@ -543,8 +543,8 @@
self.stats = env_mat_stat.stats
mean, stddev = env_mat_stat()
if not self.set_davg_zero:
self.mean.copy_(torch.tensor(mean, device=env.DEVICE))
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

Check warning on line 546 in deepmd/pt/model/descriptor/repformers.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/descriptor/repformers.py#L546

Added line #L546 was not covered by tests
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

def get_stats(self) -> Dict[str, StatItem]:
"""Get the statistics of the descriptor."""
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pt/model/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,8 +578,8 @@
self.stats = env_mat_stat.stats
mean, stddev = env_mat_stat()
if not self.set_davg_zero:
self.mean.copy_(torch.tensor(mean, device=env.DEVICE))
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

def get_stats(self) -> Dict[str, StatItem]:
"""Get the statistics of the descriptor."""
Expand Down Expand Up @@ -635,7 +635,7 @@
dmatrix = dmatrix.view(
-1, self.ndescrpt
) # shape is [nframes*nall, self.ndescrpt]
xyz_scatter = torch.empty(
xyz_scatter = torch.empty( # pylint: disable=no-explicit-dtype

Check warning

Code scanning / CodeQL

Variable defined multiple times Warning

This assignment to 'xyz_scatter' is unnecessary as it is
redefined
before this value is used.
njzjz marked this conversation as resolved.
Show resolved Hide resolved
1,
device=env.DEVICE,
)
Expand Down
4 changes: 2 additions & 2 deletions deepmd/pt/model/descriptor/se_atten.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,8 +407,8 @@
self.stats = env_mat_stat.stats
mean, stddev = env_mat_stat()
if not self.set_davg_zero:
self.mean.copy_(torch.tensor(mean, device=env.DEVICE))
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

Check warning on line 410 in deepmd/pt/model/descriptor/se_atten.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/descriptor/se_atten.py#L410

Added line #L410 was not covered by tests
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

def get_stats(self) -> Dict[str, StatItem]:
"""Get the statistics of the descriptor."""
Expand Down
8 changes: 4 additions & 4 deletions deepmd/pt/model/descriptor/se_r.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,8 +208,8 @@
base_env.stats[kk] += self.get_stats()[kk]
mean, stddev = base_env()
if not base_class.set_davg_zero:
base_class.mean.copy_(torch.tensor(mean, device=env.DEVICE))
base_class.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
base_class.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype
base_class.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

Check warning on line 212 in deepmd/pt/model/descriptor/se_r.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/descriptor/se_r.py#L211-L212

Added lines #L211 - L212 were not covered by tests
self.mean = base_class.mean
self.stddev = base_class.stddev
# self.load_state_dict(base_class.state_dict()) # this does not work, because it only inits the model
Expand Down Expand Up @@ -268,8 +268,8 @@
self.stats = env_mat_stat.stats
mean, stddev = env_mat_stat()
if not self.set_davg_zero:
self.mean.copy_(torch.tensor(mean, device=env.DEVICE))
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

def get_stats(self) -> Dict[str, StatItem]:
"""Get the statistics of the descriptor."""
Expand Down
4 changes: 2 additions & 2 deletions deepmd/pt/model/descriptor/se_t.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,8 +605,8 @@
self.stats = env_mat_stat.stats
mean, stddev = env_mat_stat()
if not self.set_davg_zero:
self.mean.copy_(torch.tensor(mean, device=env.DEVICE))
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE))
self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) # pylint: disable=no-explicit-dtype
self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) # pylint: disable=no-explicit-dtype

Check warning on line 609 in deepmd/pt/model/descriptor/se_t.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/descriptor/se_t.py#L608-L609

Added lines #L608 - L609 were not covered by tests

def get_stats(self) -> Dict[str, StatItem]:
"""Get the statistics of the descriptor."""
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/model/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def get_spin_model(model_params):
if not model_params["spin"]["use_spin"] or isinstance(
model_params["spin"]["use_spin"][0], int
):
use_spin = np.full(len(model_params["type_map"]), False)
use_spin = np.full(len(model_params["type_map"]), False) # pylint: disable=no-explicit-dtype
use_spin[model_params["spin"]["use_spin"]] = True
model_params["spin"]["use_spin"] = use_spin.tolist()
# include virtual spin and placeholder types
Expand Down
Loading