Skip to content

Commit

Permalink
Solve conflict
Browse files Browse the repository at this point in the history
  • Loading branch information
Chengqian-Zhang committed Jun 13, 2024
2 parents ce14dc6 + 34ea4d8 commit 7b9e59a
Show file tree
Hide file tree
Showing 35 changed files with 1,574 additions and 2 deletions.
4 changes: 4 additions & 0 deletions deepmd/dpmodel/fitting/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
from .polarizability_fitting import (
PolarFitting,
)
from .property_fitting import (
PropertyFittingNet,
)

__all__ = [
"InvarFitting",
Expand All @@ -25,4 +28,5 @@
"EnergyFittingNet",
"PolarFitting",
"DOSFittingNet",
"PropertyFittingNet",
]
133 changes: 133 additions & 0 deletions deepmd/dpmodel/fitting/property_fitting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import copy
from typing import (
TYPE_CHECKING,
Any,
Callable,
List,
Optional,
Union,
)

from deepmd.dpmodel.common import (
DEFAULT_PRECISION,
)
from deepmd.dpmodel.fitting.invar_fitting import (
InvarFitting,
)
from deepmd.dpmodel.output_def import (
FittingOutputDef,
OutputVariableDef,
)
from deepmd.utils.path import (
DPPath,
)

if TYPE_CHECKING:
from deepmd.dpmodel.fitting.general_fitting import (
GeneralFitting,
)

from deepmd.utils.version import (
check_version_compatibility,
)


@InvarFitting.register("property")
class PropertyFittingNet(InvarFitting):
def __init__(
self,
ntypes: int,
dim_descrpt: int,
task_dim: int = 1,
neuron: List[int] = [128, 128, 128],
resnet_dt: bool = True,
numb_fparam: int = 0,
numb_aparam: int = 0,
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[float]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
use_aparam_as_mask: bool = False,
spin: Any = None,
mixed_types: bool = False,
exclude_types: List[int] = [],
# not used
seed: Optional[int] = None,
):
self.task_dim = task_dim
super().__init__(
var_name="property",
ntypes=ntypes,
dim_descrpt=dim_descrpt,
dim_out=task_dim,
neuron=neuron,
resnet_dt=resnet_dt,
numb_fparam=numb_fparam,
numb_aparam=numb_aparam,
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
use_aparam_as_mask=use_aparam_as_mask,
spin=spin,
mixed_types=mixed_types,
exclude_types=exclude_types,
)

@classmethod
def deserialize(cls, data: dict) -> "GeneralFitting":
data = copy.deepcopy(data)
check_version_compatibility(data.pop("@version", 1), 1, 1)
data.pop("var_name")
data.pop("dim_out")
return super().deserialize(data)

def serialize(self) -> dict:
"""Serialize the fitting to dict."""
return {**super().serialize(), "type": "property", "task_dim": self.task_dim}

def output_def(self) -> FittingOutputDef:
return FittingOutputDef(
[
OutputVariableDef(
self.var_name,
[self.dim_out],
reduciable=True,
r_differentiable=False,
c_differentiable=False,
),
]
)

def compute_output_stats(
self,
merged: Union[Callable[[], List[dict]], List[dict]],
stat_file_path: Optional[DPPath] = None,
):
"""
Compute the output statistics (e.g. energy bias) for the fitting net from packed data.
Parameters
----------
merged : Union[Callable[[], List[dict]], List[dict]]
- List[dict]: A list of data samples from various data systems.
Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor`
originating from the `i`-th data system.
- Callable[[], List[dict]]: A lazy function that returns data samples in the above format
only when needed. Since the sampling process can be slow and memory-intensive,
the lazy function helps by only sampling once.
stat_file_path : Optional[DPPath]
The path to the stat file.
"""
pass

# make jit happy with torch 2.0.0
exclude_types: List[int]
2 changes: 2 additions & 0 deletions deepmd/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ class DeepEvalBackend(ABC):
"dipole_derv_c_redu": "virial",
"dos": "atom_dos",
"dos_redu": "dos",
"property": "atom_property",
"property_redu": "property",
"mask_mag": "mask_mag",
"mask": "mask",
# old models in v1
Expand Down
136 changes: 136 additions & 0 deletions deepmd/infer/deep_property.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from typing import (
Any,
Dict,
List,
Optional,
Tuple,
Union,
)

import numpy as np

from deepmd.dpmodel.output_def import (
FittingOutputDef,
ModelOutputDef,
OutputVariableDef,
)

from .deep_eval import (
DeepEval,
)


class DeepProperty(DeepEval):
"""Properties of structures.
Parameters
----------
model_file : Path
The name of the frozen model file.
*args : list
Positional arguments.
auto_batch_size : bool or int or AutoBatchSize, default: True
If True, automatic batch size will be used. If int, it will be used
as the initial batch size.
neighbor_list : ase.neighborlist.NewPrimitiveNeighborList, optional
The ASE neighbor list class to produce the neighbor list. If None, the
neighbor list will be built natively in the model.
**kwargs : dict
Keyword arguments.
"""

@property
def output_def(self) -> ModelOutputDef:
"""Get the output definition of this model."""
return ModelOutputDef(
FittingOutputDef(
[
OutputVariableDef(
"property",
shape=[-1],
reduciable=True,
atomic=True,
),
]
)
)

@property
def numb_task(self) -> int:
"""Get the number of task."""
return self.get_numb_task()

def eval(
self,
coords: np.ndarray,
cells: Optional[np.ndarray],
atom_types: Union[List[int], np.ndarray],
atomic: bool = False,
fparam: Optional[np.ndarray] = None,
aparam: Optional[np.ndarray] = None,
mixed_type: bool = False,
**kwargs: Dict[str, Any],
) -> Tuple[np.ndarray, ...]:
"""Evaluate properties. If atomic is True, also return atomic property.
Parameters
----------
coords : np.ndarray
The coordinates of the atoms, in shape (nframes, natoms, 3).
cells : np.ndarray
The cell vectors of the system, in shape (nframes, 9). If the system
is not periodic, set it to None.
atom_types : List[int] or np.ndarray
The types of the atoms. If mixed_type is False, the shape is (natoms,);
otherwise, the shape is (nframes, natoms).
atomic : bool, optional
Whether to return atomic property, by default False.
fparam : np.ndarray, optional
The frame parameters, by default None.
aparam : np.ndarray, optional
The atomic parameters, by default None.
mixed_type : bool, optional
Whether the atom_types is mixed type, by default False.
**kwargs : Dict[str, Any]
Keyword arguments.
Returns
-------
property
The properties of the system, in shape (nframes, num_tasks).
"""
(
coords,
cells,
atom_types,
fparam,
aparam,
nframes,
natoms,
) = self._standard_input(coords, cells, atom_types, fparam, aparam, mixed_type)
results = self.deep_eval.eval(
coords,
cells,
atom_types,
atomic,
fparam=fparam,
aparam=aparam,
**kwargs,
)
atomic_property = results["property"].reshape(nframes, natoms, -1)
property = results["property_redu"].reshape(nframes, -1)

if atomic:
return (
property,
atomic_property,
)
else:
return (property,)

def get_numb_task(self) -> int:
return self.deep_eval.get_numb_task()


__all__ = ["DeepProperty"]
5 changes: 5 additions & 0 deletions deepmd/pt/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@
from deepmd.infer.deep_pot import (
DeepPot,
)
from deepmd.infer.deep_property import (
DeepProperty,
)
from deepmd.infer.deep_wfc import (
DeepWFC,
)
Expand Down Expand Up @@ -181,6 +184,8 @@ def model_type(self) -> Type["DeepEvalWrapper"]:
return DeepGlobalPolar
elif "wfc" in model_output_type:
return DeepWFC
elif "property" in model_output_type:
return DeepProperty
else:
raise RuntimeError("Unknown model type")

Expand Down
3 changes: 3 additions & 0 deletions deepmd/pt/loss/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@
"EnergySpinLoss",
"TensorLoss",
"TaskLoss",
<<<<<<< HEAD
"DOSLoss",
=======
>>>>>>> 34ea4d82ba9c6083357c5221280d494be51b4a81
"PropertyLoss",
]
Loading

0 comments on commit 7b9e59a

Please sign in to comment.