Skip to content

Commit

Permalink
feat pt : Support property fitting (#3867)
Browse files Browse the repository at this point in the history
Solve issue #3866

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit


- **New Features**
- Introduced property fitting neural networks with the new
`PropertyFittingNet` class.
- Added the `DeepProperty` class for evaluating properties of structures
using a deep learning model.
- Implemented the `PropertyModel` class to integrate properties specific
to atomic models.

- **Enhancements**
- Added an `intensive` property to several classes to indicate whether a
fitting property is intensive or extensive.
- Enhanced output property definitions and manipulation methods in
various models for improved property evaluation.
- Expanded loss function capabilities to handle the "property" loss
type.
- Improved argument definitions for fitting and loss functionalities,
enhancing configurability.
- Updated the model selection logic to include the new `PropertyModel`.
- Enhanced the `DeepEvalWrapper` class to support additional model
evaluation features, including new methods for retrieving model
characteristics.

- **Documentation**
- Updated class docstrings to reflect new attributes and parameters,
improving clarity and usability.

- **Tests**
- Expanded the set of test examples related to the "property" category
to improve test coverage.
- Introduced new test classes and parameterized tests for improved
validation of property-related functionalities.

<!-- end of auto-generated comment: release notes by coderabbit.ai -->

---------

Signed-off-by: Chenqqian Zhang <100290172+Chengqian-Zhang@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Chengqian-Zhang and pre-commit-ci[bot] authored Sep 5, 2024
1 parent 46632f9 commit f4139fa
Show file tree
Hide file tree
Showing 63 changed files with 2,375 additions and 2 deletions.
14 changes: 14 additions & 0 deletions deepmd/dpmodel/atomic_model/property_atomic_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from deepmd.dpmodel.fitting.property_fitting import (
PropertyFittingNet,
)

from .dp_atomic_model import (
DPAtomicModel,
)


class DPPropertyAtomicModel(DPAtomicModel):
def __init__(self, descriptor, fitting, type_map, **kwargs):
assert isinstance(fitting, PropertyFittingNet)
super().__init__(descriptor, fitting, type_map, **kwargs)
4 changes: 4 additions & 0 deletions deepmd/dpmodel/fitting/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
from .polarizability_fitting import (
PolarFitting,
)
from .property_fitting import (
PropertyFittingNet,
)

__all__ = [
"InvarFitting",
Expand All @@ -25,4 +28,5 @@
"EnergyFittingNet",
"PolarFitting",
"DOSFittingNet",
"PropertyFittingNet",
]
136 changes: 136 additions & 0 deletions deepmd/dpmodel/fitting/property_fitting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import copy
from typing import (
List,
Optional,
Union,
)

import numpy as np

from deepmd.dpmodel.common import (
DEFAULT_PRECISION,
)
from deepmd.dpmodel.fitting.invar_fitting import (
InvarFitting,
)
from deepmd.utils.version import (
check_version_compatibility,
)


@InvarFitting.register("property")
class PropertyFittingNet(InvarFitting):
r"""Fitting the rotationally invariant porperties of `task_dim` of the system.
Parameters
----------
ntypes
The number of atom types.
dim_descrpt
The dimension of the input descriptor.
task_dim
The dimension of outputs of fitting net.
neuron
Number of neurons :math:`N` in each hidden layer of the fitting net
bias_atom_p
Average property per atom for each element.
rcond
The condition number for the regression of atomic energy.
trainable
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
intensive
Whether the fitting property is intensive.
bias_method
The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'.
If 'normal' is used, the computed bias will be added to the atomic output.
If 'no_bias' is used, no bias will be added to the atomic output.
resnet_dt
Time-step `dt` in the resnet construction:
:math:`y = x + dt * \phi (Wx + b)`
numb_fparam
Number of frame parameter
numb_aparam
Number of atomic parameter
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
The precision of the embedding net parameters. Supported options are |PRECISION|
mixed_types
If false, different atomic types uses different fitting net, otherwise different atom types share the same fitting net.
exclude_types: List[int]
Atomic contributions of the excluded atom types are set zero.
type_map: List[str], Optional
A list of strings. Give the name to each type of atoms.
"""

def __init__(
self,
ntypes: int,
dim_descrpt: int,
task_dim: int = 1,
neuron: List[int] = [128, 128, 128],
bias_atom_p: Optional[np.ndarray] = None,
rcond: Optional[float] = None,
trainable: Union[bool, List[bool]] = True,
intensive: bool = False,
bias_method: str = "normal",
resnet_dt: bool = True,
numb_fparam: int = 0,
numb_aparam: int = 0,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
mixed_types: bool = True,
exclude_types: List[int] = [],
type_map: Optional[List[str]] = None,
# not used
seed: Optional[int] = None,
):
self.task_dim = task_dim
self.intensive = intensive
self.bias_method = bias_method
super().__init__(
var_name="property",
ntypes=ntypes,
dim_descrpt=dim_descrpt,
dim_out=task_dim,
neuron=neuron,
bias_atom=bias_atom_p,
resnet_dt=resnet_dt,
numb_fparam=numb_fparam,
numb_aparam=numb_aparam,
rcond=rcond,
trainable=trainable,
activation_function=activation_function,
precision=precision,
mixed_types=mixed_types,
exclude_types=exclude_types,
type_map=type_map,
)

@classmethod
def deserialize(cls, data: dict) -> "PropertyFittingNet":
data = copy.deepcopy(data)
check_version_compatibility(data.pop("@version"), 2, 1)
data.pop("dim_out")
data.pop("var_name")
data.pop("tot_ener_zero")
data.pop("layer_name")
data.pop("use_aparam_as_mask", None)
data.pop("spin", None)
data.pop("atom_ener", None)
obj = super().deserialize(data)

return obj

def serialize(self) -> dict:
"""Serialize the fitting to dict."""
dd = {
**InvarFitting.serialize(self),
"type": "property",
"task_dim": self.task_dim,
}

return dd
4 changes: 4 additions & 0 deletions deepmd/dpmodel/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,16 @@
from .make_model import (
make_model,
)
from .property_model import (
PropertyModel,
)
from .spin_model import (
SpinModel,
)

__all__ = [
"EnergyModel",
"PropertyModel",
"DPModelCommon",
"SpinModel",
"make_model",
Expand Down
27 changes: 27 additions & 0 deletions deepmd/dpmodel/model/property_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from deepmd.dpmodel.atomic_model.dp_atomic_model import (
DPAtomicModel,
)
from deepmd.dpmodel.model.base_model import (
BaseModel,
)

from .dp_model import (
DPModelCommon,
)
from .make_model import (
make_model,
)

DPPropertyModel_ = make_model(DPAtomicModel)


@BaseModel.register("property")
class PropertyModel(DPModelCommon, DPPropertyModel_):
def __init__(
self,
*args,
**kwargs,
):
DPModelCommon.__init__(self)
DPPropertyModel_.__init__(self, *args, **kwargs)
7 changes: 7 additions & 0 deletions deepmd/dpmodel/output_def.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,8 @@ class OutputVariableDef:
If hessian is requred
magnetic : bool
If the derivatives of variable have magnetic parts.
intensive : bool
It indicates whether the fitting property is intensive or extensive.
"""

def __init__(
Expand All @@ -199,6 +201,7 @@ def __init__(
category: int = OutputVariableCategory.OUT.value,
r_hessian: bool = False,
magnetic: bool = False,
intensive: bool = False,
):
self.name = name
self.shape = list(shape)
Expand All @@ -211,13 +214,17 @@ def __init__(
self.reducible = reducible
self.r_differentiable = r_differentiable
self.c_differentiable = c_differentiable
self.intensive = intensive
if self.c_differentiable and not self.r_differentiable:
raise ValueError("c differentiable requires r_differentiable")
if self.reducible and not self.atomic:
raise ValueError("a reducible variable should be atomic")
if self.intensive and not self.reducible:
raise ValueError("an intensive variable should be reducible")
self.category = category
self.r_hessian = r_hessian
self.magnetic = magnetic
self.intensive = intensive
if self.r_hessian:
if not self.reducible:
raise ValueError("only reducible variable can calculate hessian")
Expand Down
Loading

0 comments on commit f4139fa

Please sign in to comment.