Skip to content

Commit

Permalink
update special implementation
Browse files Browse the repository at this point in the history
  • Loading branch information
iProzd committed Sep 19, 2024
1 parent c9f7c5e commit 898009b
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 3 deletions.
26 changes: 26 additions & 0 deletions deepmd/pt/model/task/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ def __init__(
assert self.ntypes == len(bias_atom_e), "Element count mismatches!"
bias_atom_e = torch.tensor(bias_atom_e, device=env.DEVICE)
self.register_buffer("bias_atom_e", bias_atom_e)
self.exclude_types = []

filter_layers_dipole = []
for type_i in range(self.ntypes):
Expand Down Expand Up @@ -178,6 +179,14 @@ def output_def(self):
]
)

def get_dim_fparam(self) -> int:
"""Get the number (dimension) of frame parameters of this atomic model."""
return 0

def get_dim_aparam(self) -> int:
"""Get the number (dimension) of atomic parameters of this atomic model."""
return 0

def serialize(self) -> dict:
raise NotImplementedError

Expand All @@ -192,6 +201,23 @@ def change_type_map(
def get_type_map(self) -> List[str]:
raise NotImplementedError

# make jit happy
exclude_types: List[int]

def get_sel_type(self) -> List[int]:
"""Get the selected atom types of this model.
Only atoms with selected atom types have atomic contribution
to the result of the model.
If returning an empty list, all atom types are selected.
"""
# make jit happy
sel_type: List[int] = []
for ii in range(self.ntypes):
if ii not in self.exclude_types:
sel_type.append(ii)
return sel_type

def forward(
self,
inputs: torch.Tensor,
Expand Down
4 changes: 1 addition & 3 deletions deepmd/pt/train/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -551,9 +551,7 @@ def collect_single_finetune_params(
target_state_dict,
)
state_dict = new_state_dict
state_dict["_extra_state"] = self.wrapper.state_dict()[
"_extra_state"
]
state_dict["_extra_state"] = self.wrapper.state_dict()["_extra_state"]

self.wrapper.load_state_dict(state_dict)

Expand Down
2 changes: 2 additions & 0 deletions deepmd/pt/utils/auto_batch_size.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ def execute_with_batch_size(
returned_dict = None
while index < total_size:
n_batch, result = self.execute(execute_with_batch_size, index, natoms)
if not n_batch:
continue
returned_dict = (
isinstance(result, dict) if returned_dict is None else returned_dict
)
Expand Down
77 changes: 77 additions & 0 deletions deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -1432,6 +1432,83 @@ def fitting_ener():
]


@fitting_args_plugin.register("direct_force_ener")
@fitting_args_plugin.register("direct_force")
def fitting_direct_force():
doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams."
doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams."
doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built."
doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.'
doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
doc_trainable = f"Whether the parameters in the fitting net are trainable. This option can be\n\n\
- bool: True if all parameters of the fitting net are trainable, False otherwise.\n\n\
- list of bool{doc_only_tf_supported}: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of this list should be equal to len(`neuron`)+1."
doc_rcond = "The condition number used to determine the inital energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details."
doc_seed = "Random seed for parameter initialization of the fitting net"
doc_atom_ener = "Specify the atomic energy in vacuum for each type"
doc_layer_name = (
"The name of the each layer. The length of this list should be equal to n_neuron + 1. "
"If two layers, either in the same fitting or different fittings, "
"have the same name, they will share the same neural network parameters. "
"The shape of these layers should be the same. "
"If null is given for a layer, parameters will not be shared."
)
doc_use_aparam_as_mask = (
"Whether to use the aparam as a mask in input."
"If True, the aparam will not be used in fitting net for embedding."
"When descrpt is se_a_mask, the aparam will be used as a mask to indicate the input atom is real/virtual. And use_aparam_as_mask should be set to True."
)

return [
Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam),
Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam),
Argument(
"neuron",
List[int],
optional=True,
default=[120, 120, 120],
alias=["n_neuron"],
doc=doc_neuron,
),
Argument(
"activation_function",
str,
optional=True,
default="tanh",
doc=doc_activation_function,
),
Argument("precision", str, optional=True, default="default", doc=doc_precision),
Argument("resnet_dt", bool, optional=True, default=True, doc=doc_resnet_dt),
Argument(
"trainable",
[List[bool], bool],
optional=True,
default=True,
doc=doc_trainable,
),
Argument(
"rcond", [float, type(None)], optional=True, default=None, doc=doc_rcond
),
Argument("seed", [int, None], optional=True, doc=doc_seed),
Argument(
"atom_ener",
List[Optional[float]],
optional=True,
default=[],
doc=doc_atom_ener,
),
Argument("layer_name", List[str], optional=True, doc=doc_layer_name),
Argument(
"use_aparam_as_mask",
bool,
optional=True,
default=False,
doc=doc_use_aparam_as_mask,
),
]


@fitting_args_plugin.register("dos")
def fitting_dos():
doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams."
Expand Down

0 comments on commit 898009b

Please sign in to comment.