Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Feb 1, 2024
1 parent 51643af commit 8abff4c
Show file tree
Hide file tree
Showing 8 changed files with 388 additions and 262 deletions.
8 changes: 5 additions & 3 deletions deepmd/model_format/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,21 @@
PRECISION_DICT,
NativeOP,
)
from .dpa1 import (
DescrptDPA1,
)
from .env_mat import (
EnvMat,
)
from .fitting import (
InvarFitting,
)
from .network import (
EmbdLayer,
EmbeddingNet,
FittingNet,
NativeLayer,
EmbdLayer,
LayerNorm,
NativeLayer,
NativeNet,
NetworkCollection,
load_dp_model,
Expand All @@ -37,7 +40,6 @@
from .se_e2_a import (
DescrptSeA,
)
from .dpa1 import DescrptDPA1

__all__ = [
"InvarFitting",
Expand Down
23 changes: 15 additions & 8 deletions deepmd/model_format/dpa1.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
EnvMat,
)
from .network import (
EmbdLayer,
EmbeddingNet,
NetworkCollection,
EmbdLayer,
)


Expand Down Expand Up @@ -147,6 +147,7 @@ class DescrptDPA1(NativeOP):
DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular Simulation.
arXiv preprint arXiv:2208.08236.
"""

def __init__(
self,
rcut: float,
Expand Down Expand Up @@ -183,7 +184,7 @@ def __init__(
if spin is not None:
raise NotImplementedError("spin is not implemented")
# TODO
if tebd_input_mode != 'concat':
if tebd_input_mode != "concat":
raise NotImplementedError("tebd_input_mode != 'concat' not implemented")
if not smooth:
raise NotImplementedError("smooth == False not implemented")
Expand Down Expand Up @@ -215,8 +216,10 @@ def __init__(
self.concat_output_tebd = concat_output_tebd
self.spin = spin

self.type_embedding = EmbdLayer(ntypes, tebd_dim, padding=True, precision=precision)
in_dim = 1 + self.tebd_dim * 2 if self.tebd_input_mode in ['concat'] else 1
self.type_embedding = EmbdLayer(
ntypes, tebd_dim, padding=True, precision=precision
)
in_dim = 1 + self.tebd_dim * 2 if self.tebd_input_mode in ["concat"] else 1
self.embeddings = NetworkCollection(
ndim=0,
ntypes=self.ntypes,
Expand Down Expand Up @@ -255,8 +258,11 @@ def __getitem__(self, key):
@property
def dim_out(self):
"""Returns the output dimension of this descriptor."""
return self.neuron[-1] * self.axis_neuron + self.tebd_dim * 2 \
if self.concat_output_tebd else self.neuron[-1] * self.axis_neuron
return (
self.neuron[-1] * self.axis_neuron + self.tebd_dim * 2
if self.concat_output_tebd
else self.neuron[-1] * self.axis_neuron
)

def cal_g(
self,
Expand Down Expand Up @@ -302,7 +308,6 @@ def call(
sw
The smooth switch function.
"""

# nf x nloc x nnei x 4
rr, ww = self.env_mat.call(coord_ext, atype_ext, nlist, self.davg, self.dstd)
nf, nloc, nnei, _ = rr.shape
Expand All @@ -318,7 +323,9 @@ def call(
nlist_masked[nlist_masked == -1] = 0
index = np.tile(nlist_masked.reshape(nf, -1, 1), (1, 1, self.tebd_dim))
# nf x nloc x nnei x tebd_dim
atype_embd_nlist = np.take_along_axis(atype_embd_ext, index, axis=1).reshape(nf, nloc, nnei, self.tebd_dim)
atype_embd_nlist = np.take_along_axis(atype_embd_ext, index, axis=1).reshape(
nf, nloc, nnei, self.tebd_dim
)
ng = self.neuron[-1]
ss = rr[..., 0:1]
ss = np.concatenate([ss, atype_embd_nlist, atype_embd_nnei], axis=-1)
Expand Down
58 changes: 26 additions & 32 deletions deepmd/model_format/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,16 +342,17 @@ def __init__(
) -> None:
self.padding = padding
self.num_channel = num_channel + 1 if self.padding else num_channel
super().__init__(num_in=self.num_channel,
num_out=num_out,
bias=False,
use_timestep=False,
activation_function=None,
resnet=False,
precision=precision,
)
super().__init__(
num_in=self.num_channel,
num_out=num_out,
bias=False,
use_timestep=False,
activation_function=None,
resnet=False,
precision=precision,
)
if self.padding:
self.w[-1] = 0.
self.w[-1] = 0.0

def serialize(self) -> dict:
"""Serialize the layer to a dict.
Expand All @@ -361,9 +362,7 @@ def serialize(self) -> dict:
dict
The serialized layer.
"""
data = {
"w": self.w
}
data = {"w": self.w}
return {
"padding": self.padding,
"precision": self.precision,
Expand All @@ -390,9 +389,7 @@ def deserialize(cls, data: dict) -> "EmbdLayer":
padding=False,
**data,
)
obj.w, = (
variables["w"],
)
(obj.w,) = (variables["w"],)
obj.padding = padding
obj.check_shape_consistency()
return obj
Expand Down Expand Up @@ -464,18 +461,19 @@ def __init__(
self.eps = eps
self.uni_init = uni_init
self.num_in = num_in
super().__init__(num_in=1,
num_out=num_in,
bias=True,
use_timestep=False,
activation_function=None,
resnet=False,
precision=precision,
)
super().__init__(
num_in=1,
num_out=num_in,
bias=True,
use_timestep=False,
activation_function=None,
resnet=False,
precision=precision,
)
self.w = self.w.squeeze(0) # keep the weight shape to be [num_in]
if self.uni_init:
self.w = 1.
self.b = 0.
self.w = 1.0
self.b = 0.0

def serialize(self) -> dict:
"""Serialize the layer to a dict.
Expand Down Expand Up @@ -510,17 +508,13 @@ def deserialize(cls, data: dict) -> "LayerNorm":
assert len(variables["w"].shape) == 1
if variables["b"] is not None:
assert len(variables["b"].shape) == 1
num_in, = variables["w"].shape
(num_in,) = variables["w"].shape
obj = cls(
num_in,
**data,
)
obj.w, = (
variables["w"],
)
obj.b, = (
variables["b"],
)
(obj.w,) = (variables["w"],)
(obj.b,) = (variables["b"],)
obj._check_shape_consistency()
return obj

Expand Down
32 changes: 21 additions & 11 deletions deepmd/pt/model/descriptor/dpa1.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,25 @@

import torch

from deepmd.model_format import EnvMat as DPEnvMat
from deepmd.pt.model.descriptor import (
Descriptor,
)
from deepmd.pt.model.network.mlp import (
EmbdLayer,
NetworkCollection,
)
from deepmd.pt.model.network.network import (
TypeEmbedNet,
)
from deepmd.pt.utils import (
env,
)

from .se_atten import DescrptBlockSeAtten, NeighborGatedAttention
from deepmd.pt.model.network.mlp import EmbdLayer, NetworkCollection
from deepmd.model_format import (
EnvMat as DPEnvMat,
from .se_atten import (
DescrptBlockSeAtten,
NeighborGatedAttention,
)
from deepmd.pt.utils import env


@Descriptor.register("dpa1")
Expand Down Expand Up @@ -74,15 +80,17 @@ def __init__(
normalize=normalize,
temperature=temperature,
old_impl=old_impl,
**kwargs
**kwargs,
)
self.type_embedding_old = None
self.type_embedding = None
self.old_impl = old_impl
if self.old_impl:
self.type_embedding_old = TypeEmbedNet(ntypes, tebd_dim)
else:
self.type_embedding = EmbdLayer(ntypes, tebd_dim, padding=True, precision=precision)
self.type_embedding = EmbdLayer(
ntypes, tebd_dim, padding=True, precision=precision
)
self.tebd_dim = tebd_dim
self.concat_output_tebd = concat_output_tebd

Expand Down Expand Up @@ -195,9 +203,9 @@ def forward(
return g1, rot_mat, g2, h2, sw

def set_stat_mean_and_stddev(
self,
mean: torch.Tensor,
stddev: torch.Tensor,
self,
mean: torch.Tensor,
stddev: torch.Tensor,
) -> None:
self.se_atten.mean = mean
self.se_atten.stddev = stddev
Expand Down Expand Up @@ -253,5 +261,7 @@ def deserialize(cls, data: dict) -> "DescrptDPA1":
obj.se_atten["davg"] = t_cvt(variables["davg"])
obj.se_atten["dstd"] = t_cvt(variables["dstd"])
obj.se_atten.filter_layers = NetworkCollection.deserialize(embeddings)
obj.se_atten.dpa1_attention = NeighborGatedAttention.deserialize(attention_layers)
obj.se_atten.dpa1_attention = NeighborGatedAttention.deserialize(
attention_layers
)
return obj
Loading

0 comments on commit 8abff4c

Please sign in to comment.