diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 1262a56310..8a3a61400d 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -194,7 +194,7 @@ def get_sel_type(self) -> List[int]: def get_numb_dos(self) -> int: """Get the number of DOS.""" - return 0 + return self.dp.model["Default"].get_numb_dos() def get_has_efield(self): """Check if the model has efield.""" diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py index 9c8bbc9a2a..e64a129d51 100644 --- a/deepmd/pt/loss/__init__.py +++ b/deepmd/pt/loss/__init__.py @@ -2,6 +2,9 @@ from .denoise import ( DenoiseLoss, ) +from .dos import ( + DOSLoss, +) from .ener import ( EnergyStdLoss, ) @@ -21,4 +24,5 @@ "EnergySpinLoss", "TensorLoss", "TaskLoss", + "DOSLoss", ] diff --git a/deepmd/pt/loss/dos.py b/deepmd/pt/loss/dos.py new file mode 100644 index 0000000000..7fd2e04ff2 --- /dev/null +++ b/deepmd/pt/loss/dos.py @@ -0,0 +1,256 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, +) + +import torch + +from deepmd.pt.loss.loss import ( + TaskLoss, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + + +class DOSLoss(TaskLoss): + def __init__( + self, + starter_learning_rate: float, + numb_dos: int, + start_pref_dos: float = 1.00, + limit_pref_dos: float = 1.00, + start_pref_cdf: float = 1000, + limit_pref_cdf: float = 1.00, + start_pref_ados: float = 0.0, + limit_pref_ados: float = 0.0, + start_pref_acdf: float = 0.0, + limit_pref_acdf: float = 0.0, + inference=False, + **kwargs, + ): + r"""Construct a loss for local and global tensors. + + Parameters + ---------- + tensor_name : str + The name of the tensor in the model predictions to compute the loss. + tensor_size : int + The size (dimension) of the tensor. + label_name : str + The name of the tensor in the labels to compute the loss. + pref_atomic : float + The prefactor of the weight of atomic loss. It should be larger than or equal to 0. + pref : float + The prefactor of the weight of global loss. It should be larger than or equal to 0. + inference : bool + If true, it will output all losses found in output, ignoring the pre-factors. + **kwargs + Other keyword arguments. + """ + super().__init__() + self.starter_learning_rate = starter_learning_rate + self.numb_dos = numb_dos + self.inference = inference + + self.start_pref_dos = start_pref_dos + self.limit_pref_dos = limit_pref_dos + self.start_pref_cdf = start_pref_cdf + self.limit_pref_cdf = limit_pref_cdf + + self.start_pref_ados = start_pref_ados + self.limit_pref_ados = limit_pref_ados + self.start_pref_acdf = start_pref_acdf + self.limit_pref_acdf = limit_pref_acdf + + assert ( + self.start_pref_dos >= 0.0 + and self.limit_pref_dos >= 0.0 + and self.start_pref_cdf >= 0.0 + and self.limit_pref_cdf >= 0.0 + and self.start_pref_ados >= 0.0 + and self.limit_pref_ados >= 0.0 + and self.start_pref_acdf >= 0.0 + and self.limit_pref_acdf >= 0.0 + ), "Can not assign negative weight to `pref` and `pref_atomic`" + + self.has_dos = (start_pref_dos != 0.0 and limit_pref_dos != 0.0) or inference + self.has_cdf = (start_pref_cdf != 0.0 and limit_pref_cdf != 0.0) or inference + self.has_ados = (start_pref_ados != 0.0 and limit_pref_ados != 0.0) or inference + self.has_acdf = (start_pref_acdf != 0.0 and limit_pref_acdf != 0.0) or inference + + assert ( + self.has_dos or self.has_cdf or self.has_ados or self.has_acdf + ), AssertionError("Can not assian zero weight both to `pref` and `pref_atomic`") + + def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False): + """Return loss on local and global tensors. + + Parameters + ---------- + input_dict : dict[str, torch.Tensor] + Model inputs. + model : torch.nn.Module + Model to be used to output the predictions. + label : dict[str, torch.Tensor] + Labels. + natoms : int + The local atom number. + + Returns + ------- + model_pred: dict[str, torch.Tensor] + Model predictions. + loss: torch.Tensor + Loss for model to minimize. + more_loss: dict[str, torch.Tensor] + Other losses for display. + """ + model_pred = model(**input_dict) + + coef = learning_rate / self.starter_learning_rate + pref_dos = ( + self.limit_pref_dos + (self.start_pref_dos - self.limit_pref_dos) * coef + ) + pref_cdf = ( + self.limit_pref_cdf + (self.start_pref_cdf - self.limit_pref_cdf) * coef + ) + pref_ados = ( + self.limit_pref_ados + (self.start_pref_ados - self.limit_pref_ados) * coef + ) + pref_acdf = ( + self.limit_pref_acdf + (self.start_pref_acdf - self.limit_pref_acdf) * coef + ) + + loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] + more_loss = {} + if self.has_ados and "atom_dos" in model_pred and "atom_dos" in label: + find_local = label.get("find_atom_dos", 0.0) + pref_ados = pref_ados * find_local + local_tensor_pred_dos = model_pred["atom_dos"].reshape( + [-1, natoms, self.numb_dos] + ) + local_tensor_label_dos = label["atom_dos"].reshape( + [-1, natoms, self.numb_dos] + ) + diff = (local_tensor_pred_dos - local_tensor_label_dos).reshape( + [-1, self.numb_dos] + ) + if "mask" in model_pred: + diff = diff[model_pred["mask"].reshape([-1]).bool()] + l2_local_loss_dos = torch.mean(torch.square(diff)) + if not self.inference: + more_loss["l2_local_dos_loss"] = self.display_if_exist( + l2_local_loss_dos.detach(), find_local + ) + loss += pref_ados * l2_local_loss_dos + rmse_local_dos = l2_local_loss_dos.sqrt() + more_loss["rmse_local_dos"] = self.display_if_exist( + rmse_local_dos.detach(), find_local + ) + if self.has_acdf and "atom_dos" in model_pred and "atom_dos" in label: + find_local = label.get("find_atom_dos", 0.0) + pref_acdf = pref_acdf * find_local + local_tensor_pred_cdf = torch.cusum( + model_pred["atom_dos"].reshape([-1, natoms, self.numb_dos]), dim=-1 + ) + local_tensor_label_cdf = torch.cusum( + label["atom_dos"].reshape([-1, natoms, self.numb_dos]), dim=-1 + ) + diff = (local_tensor_pred_cdf - local_tensor_label_cdf).reshape( + [-1, self.numb_dos] + ) + if "mask" in model_pred: + diff = diff[model_pred["mask"].reshape([-1]).bool()] + l2_local_loss_cdf = torch.mean(torch.square(diff)) + if not self.inference: + more_loss["l2_local_cdf_loss"] = self.display_if_exist( + l2_local_loss_cdf.detach(), find_local + ) + loss += pref_acdf * l2_local_loss_cdf + rmse_local_cdf = l2_local_loss_cdf.sqrt() + more_loss["rmse_local_cdf"] = self.display_if_exist( + rmse_local_cdf.detach(), find_local + ) + if self.has_dos and "dos" in model_pred and "dos" in label: + find_global = label.get("find_dos", 0.0) + pref_dos = pref_dos * find_global + global_tensor_pred_dos = model_pred["dos"].reshape([-1, self.numb_dos]) + global_tensor_label_dos = label["dos"].reshape([-1, self.numb_dos]) + diff = global_tensor_pred_dos - global_tensor_label_dos + if "mask" in model_pred: + atom_num = model_pred["mask"].sum(-1, keepdim=True) + l2_global_loss_dos = torch.mean( + torch.sum(torch.square(diff) * atom_num, dim=0) / atom_num.sum() + ) + atom_num = torch.mean(atom_num.float()) + else: + atom_num = natoms + l2_global_loss_dos = torch.mean(torch.square(diff)) + if not self.inference: + more_loss["l2_global_dos_loss"] = self.display_if_exist( + l2_global_loss_dos.detach(), find_global + ) + loss += pref_dos * l2_global_loss_dos + rmse_global_dos = l2_global_loss_dos.sqrt() / atom_num + more_loss["rmse_global_dos"] = self.display_if_exist( + rmse_global_dos.detach(), find_global + ) + if self.has_cdf and "dos" in model_pred and "dos" in label: + find_global = label.get("find_dos", 0.0) + pref_cdf = pref_cdf * find_global + global_tensor_pred_cdf = torch.cusum( + model_pred["dos"].reshape([-1, self.numb_dos]), dim=-1 + ) + global_tensor_label_cdf = torch.cusum( + label["dos"].reshape([-1, self.numb_dos]), dim=-1 + ) + diff = global_tensor_pred_cdf - global_tensor_label_cdf + if "mask" in model_pred: + atom_num = model_pred["mask"].sum(-1, keepdim=True) + l2_global_loss_cdf = torch.mean( + torch.sum(torch.square(diff) * atom_num, dim=0) / atom_num.sum() + ) + atom_num = torch.mean(atom_num.float()) + else: + atom_num = natoms + l2_global_loss_cdf = torch.mean(torch.square(diff)) + if not self.inference: + more_loss["l2_global_cdf_loss"] = self.display_if_exist( + l2_global_loss_cdf.detach(), find_global + ) + loss += pref_cdf * l2_global_loss_cdf + rmse_global_dos = l2_global_loss_cdf.sqrt() / atom_num + more_loss["rmse_global_cdf"] = self.display_if_exist( + rmse_global_dos.detach(), find_global + ) + return model_pred, loss, more_loss + + @property + def label_requirement(self) -> List[DataRequirementItem]: + """Return data label requirements needed for this loss calculation.""" + label_requirement = [] + if self.has_ados or self.has_acdf: + label_requirement.append( + DataRequirementItem( + "atom_dos", + ndof=self.numb_dos, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.has_dos or self.has_cdf: + label_requirement.append( + DataRequirementItem( + "dos", + ndof=self.numb_dos, + atomic=False, + must=False, + high_prec=False, + ) + ) + return label_requirement diff --git a/deepmd/pt/model/model/dos_model.py b/deepmd/pt/model/model/dos_model.py index 680eac41f5..e043700bee 100644 --- a/deepmd/pt/model/model/dos_model.py +++ b/deepmd/pt/model/model/dos_model.py @@ -50,6 +50,11 @@ def forward( model_predict["updated_coord"] += coord return model_predict + @torch.jit.export + def get_numb_dos(self) -> int: + """Get the number of DOS for DOSFittingNet.""" + return self.get_fitting_net().dim_out + @torch.jit.export def forward_lower( self, diff --git a/deepmd/pt/model/task/dos.py b/deepmd/pt/model/task/dos.py index c37b05277a..196872d17c 100644 --- a/deepmd/pt/model/task/dos.py +++ b/deepmd/pt/model/task/dos.py @@ -2,11 +2,13 @@ import copy import logging from typing import ( + Callable, List, Optional, Union, ) +import numpy as np import torch from deepmd.dpmodel import ( @@ -28,6 +30,13 @@ from deepmd.pt.utils.utils import ( to_numpy_array, ) +from deepmd.utils.out_stat import ( + compute_stats_from_atomic, + compute_stats_from_redu, +) +from deepmd.utils.path import ( + DPPath, +) from deepmd.utils.version import ( check_version_compatibility, ) @@ -96,6 +105,63 @@ def output_def(self) -> FittingOutputDef: ] ) + def compute_output_stats( + self, + merged: Union[Callable[[], List[dict]], List[dict]], + stat_file_path: Optional[DPPath] = None, + ) -> None: + """ + Compute the output statistics (e.g. dos bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], List[dict]], List[dict]] + - List[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` + originating from the `i`-th data system. + - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + stat_file_path : Optional[DPPath] + The path to the stat file. + + """ + if stat_file_path is not None: + stat_file_path = stat_file_path / "bias_dos" + if stat_file_path is not None and stat_file_path.is_file(): + bias_dos = stat_file_path.load_numpy() + else: + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + for sys in range(len(sampled)): + nframs = sampled[sys]["atype"].shape[0] + + if "atom_dos" in sampled[sys]: + bias_dos = compute_stats_from_atomic( + sampled[sys]["atom_dos"].numpy(force=True), + sampled[sys]["atype"].numpy(force=True), + )[0] + else: + sys_type_count = np.zeros( + (nframs, self.ntypes), dtype=env.GLOBAL_NP_FLOAT_PRECISION + ) + for itype in range(self.ntypes): + type_mask = sampled[sys]["atype"] == itype + sys_type_count[:, itype] = type_mask.sum(dim=1).numpy( + force=True + ) + sys_bias_redu = sampled[sys]["dos"].numpy(force=True) + + bias_dos = compute_stats_from_redu( + sys_bias_redu, sys_type_count, rcond=self.rcond + )[0] + if stat_file_path is not None: + stat_file_path.save_numpy(bias_dos) + self.bias_dos = torch.tensor(bias_dos, device=env.DEVICE) + @classmethod def deserialize(cls, data: dict) -> "DOSFittingNet": data = copy.deepcopy(data) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 1bea24d717..d01f25be80 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -25,6 +25,7 @@ ) from deepmd.pt.loss import ( DenoiseLoss, + DOSLoss, EnergySpinLoss, EnergyStdLoss, TensorLoss, @@ -276,7 +277,8 @@ def get_loss(loss_params, start_lr, _ntypes, _model): return EnergyStdLoss(**loss_params) elif loss_type == "dos": loss_params["starter_learning_rate"] = start_lr - raise NotImplementedError() + loss_params["numb_dos"] = _model.model_output_def()["dos"].output_size + return DOSLoss(**loss_params) elif loss_type == "ener_spin": loss_params["starter_learning_rate"] = start_lr return EnergySpinLoss(**loss_params) diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index 3659e57305..3956dac654 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -113,6 +113,10 @@ def compute_stats_from_atomic( output_std = np.zeros((ntypes, ndim)) for type_i in range(ntypes): mask = atype == type_i - output_bias[type_i] = output[mask].mean(axis=0) - output_std[type_i] = output[mask].std(axis=0) + output_bias[type_i] = ( + output[mask].mean(axis=0) if output[mask].size > 0 else np.nan + ) + output_std[type_i] = ( + output[mask].std(axis=0) if output[mask].size > 0 else np.nan + ) return output_bias, output_std diff --git a/source/tests/pt/dos/data/set.000/atom_dos.npy b/examples/dos/data/heat-221-reformat/atomic_system/set.000/atom_dos.npy similarity index 100% rename from source/tests/pt/dos/data/set.000/atom_dos.npy rename to examples/dos/data/heat-221-reformat/atomic_system/set.000/atom_dos.npy diff --git a/source/tests/pt/dos/data/set.000/box.npy b/examples/dos/data/heat-221-reformat/atomic_system/set.000/box.npy similarity index 100% rename from source/tests/pt/dos/data/set.000/box.npy rename to examples/dos/data/heat-221-reformat/atomic_system/set.000/box.npy diff --git a/source/tests/pt/dos/data/set.000/coord.npy b/examples/dos/data/heat-221-reformat/atomic_system/set.000/coord.npy similarity index 100% rename from source/tests/pt/dos/data/set.000/coord.npy rename to examples/dos/data/heat-221-reformat/atomic_system/set.000/coord.npy diff --git a/source/tests/pt/dos/data/type.raw b/examples/dos/data/heat-221-reformat/atomic_system/type.raw similarity index 100% rename from source/tests/pt/dos/data/type.raw rename to examples/dos/data/heat-221-reformat/atomic_system/type.raw diff --git a/examples/dos/data/heat-221-reformat/atomic_system/type_map.raw b/examples/dos/data/heat-221-reformat/atomic_system/type_map.raw new file mode 100644 index 0000000000..e267321d2c --- /dev/null +++ b/examples/dos/data/heat-221-reformat/atomic_system/type_map.raw @@ -0,0 +1 @@ +Si diff --git a/examples/dos/data/heat-221-reformat/global_system/set.000/box.npy b/examples/dos/data/heat-221-reformat/global_system/set.000/box.npy new file mode 100644 index 0000000000..6265bf150e Binary files /dev/null and b/examples/dos/data/heat-221-reformat/global_system/set.000/box.npy differ diff --git a/examples/dos/data/heat-221-reformat/global_system/set.000/coord.npy b/examples/dos/data/heat-221-reformat/global_system/set.000/coord.npy new file mode 100644 index 0000000000..f33ce430bf Binary files /dev/null and b/examples/dos/data/heat-221-reformat/global_system/set.000/coord.npy differ diff --git a/source/tests/pt/dos/data/set.000/dos.npy b/examples/dos/data/heat-221-reformat/global_system/set.000/dos.npy similarity index 100% rename from source/tests/pt/dos/data/set.000/dos.npy rename to examples/dos/data/heat-221-reformat/global_system/set.000/dos.npy diff --git a/examples/dos/data/heat-221-reformat/global_system/type.raw b/examples/dos/data/heat-221-reformat/global_system/type.raw new file mode 100644 index 0000000000..de3c26ec4e --- /dev/null +++ b/examples/dos/data/heat-221-reformat/global_system/type.raw @@ -0,0 +1,32 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/examples/dos/data/heat-221-reformat/global_system/type_map.raw b/examples/dos/data/heat-221-reformat/global_system/type_map.raw new file mode 100644 index 0000000000..e267321d2c --- /dev/null +++ b/examples/dos/data/heat-221-reformat/global_system/type_map.raw @@ -0,0 +1 @@ +Si diff --git a/examples/dos/train/input_torch.json b/examples/dos/train/input_torch.json new file mode 100644 index 0000000000..99bc106e7d --- /dev/null +++ b/examples/dos/train/input_torch.json @@ -0,0 +1,75 @@ +{ + "model": { + "type_map": [ + "Si" + ], + "descriptor": { + "type": "se_a", + "sel": [ + 90 + ], + "rcut_smth": 1.8, + "rcut": 6.0, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 8, + "type_one_side": true, + "precision": "float64", + "seed": 1 + }, + "fitting_net": { + "type": "dos", + "numb_dos": 250, + "neuron": [ + 120, + 120, + 120 + ], + "resnet_dt": true, + "numb_fparam": 0, + "precision": "float64", + "seed": 1 + } + }, + "loss": { + "type": "dos", + "start_pref_dos": 0.0, + "limit_pref_dos": 0.0, + "start_pref_cdf": 0.0, + "limit_pref_cdf": 0.0, + "start_pref_ados": 1.0, + "limit_pref_ados": 1.0, + "start_pref_acdf": 0.0, + "limit_pref_acdf": 0.0 + }, + "learning_rate": { + "type": "exp", + "start_lr": 0.001, + "stop_lr": 1e-08 + }, + "training": { + "stop_batch": 100000, + "seed": 1, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "disp_training": true, + "time_training": true, + "profiling": false, + "profiling_file": "timeline.json", + "training_data": { + "systems": [ + "../data/heat-221-reformat/atomic_system/", + "../data/heat-221-reformat/global_system/" + ], + "set_prefix": "set", + "batch_size": 1 + } + }, + "_comment1": "that's all" +} diff --git a/source/tests/common/test_examples.py b/source/tests/common/test_examples.py index 91bb9c0174..6d5e34fedf 100644 --- a/source/tests/common/test_examples.py +++ b/source/tests/common/test_examples.py @@ -41,6 +41,7 @@ p_examples / "fparam" / "train" / "input_aparam.json", p_examples / "zinc_protein" / "zinc_se_a_mask.json", p_examples / "dos" / "train" / "input.json", + p_examples / "dos" / "train" / "input_torch.json", p_examples / "spin" / "se_e2_a" / "input_tf.json", p_examples / "spin" / "se_e2_a" / "input_torch.json", p_examples / "dprc" / "normal" / "input.json", diff --git a/source/tests/pt/dos/data/atomic_system/set.000/atom_dos.npy b/source/tests/pt/dos/data/atomic_system/set.000/atom_dos.npy new file mode 100644 index 0000000000..22809c1068 Binary files /dev/null and b/source/tests/pt/dos/data/atomic_system/set.000/atom_dos.npy differ diff --git a/source/tests/pt/dos/data/atomic_system/set.000/box.npy b/source/tests/pt/dos/data/atomic_system/set.000/box.npy new file mode 100644 index 0000000000..6265bf150e Binary files /dev/null and b/source/tests/pt/dos/data/atomic_system/set.000/box.npy differ diff --git a/source/tests/pt/dos/data/atomic_system/set.000/coord.npy b/source/tests/pt/dos/data/atomic_system/set.000/coord.npy new file mode 100644 index 0000000000..f33ce430bf Binary files /dev/null and b/source/tests/pt/dos/data/atomic_system/set.000/coord.npy differ diff --git a/source/tests/pt/dos/data/atomic_system/type.raw b/source/tests/pt/dos/data/atomic_system/type.raw new file mode 100644 index 0000000000..de3c26ec4e --- /dev/null +++ b/source/tests/pt/dos/data/atomic_system/type.raw @@ -0,0 +1,32 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/source/tests/pt/dos/data/type_map.raw b/source/tests/pt/dos/data/atomic_system/type_map.raw similarity index 100% rename from source/tests/pt/dos/data/type_map.raw rename to source/tests/pt/dos/data/atomic_system/type_map.raw diff --git a/source/tests/pt/dos/data/global_system/set.000/box.npy b/source/tests/pt/dos/data/global_system/set.000/box.npy new file mode 100644 index 0000000000..6265bf150e Binary files /dev/null and b/source/tests/pt/dos/data/global_system/set.000/box.npy differ diff --git a/source/tests/pt/dos/data/global_system/set.000/coord.npy b/source/tests/pt/dos/data/global_system/set.000/coord.npy new file mode 100644 index 0000000000..f33ce430bf Binary files /dev/null and b/source/tests/pt/dos/data/global_system/set.000/coord.npy differ diff --git a/source/tests/pt/dos/data/global_system/set.000/dos.npy b/source/tests/pt/dos/data/global_system/set.000/dos.npy new file mode 100644 index 0000000000..904b23e709 Binary files /dev/null and b/source/tests/pt/dos/data/global_system/set.000/dos.npy differ diff --git a/source/tests/pt/dos/data/global_system/type.raw b/source/tests/pt/dos/data/global_system/type.raw new file mode 100644 index 0000000000..de3c26ec4e --- /dev/null +++ b/source/tests/pt/dos/data/global_system/type.raw @@ -0,0 +1,32 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/source/tests/pt/dos/data/global_system/type_map.raw b/source/tests/pt/dos/data/global_system/type_map.raw new file mode 100644 index 0000000000..a9edc74f38 --- /dev/null +++ b/source/tests/pt/dos/data/global_system/type_map.raw @@ -0,0 +1 @@ +H diff --git a/source/tests/pt/dos/input.json b/source/tests/pt/dos/input.json index f9330003be..991f5acf70 100644 --- a/source/tests/pt/dos/input.json +++ b/source/tests/pt/dos/input.json @@ -36,8 +36,8 @@ }, "loss": { "type": "dos", - "start_pref_dos": 0.0, - "limit_pref_dos": 0.0, + "start_pref_dos": 1.0, + "limit_pref_dos": 1.0, "start_pref_cdf": 0.0, "limit_pref_cdf": 0.0, "start_pref_ados": 1.0, @@ -48,6 +48,7 @@ "learning_rate": { "type": "exp", "start_lr": 0.001, + "decay_steps": 5000, "stop_lr": 1e-08 }, "training": { @@ -56,21 +57,23 @@ "disp_file": "lcurve.out", "disp_freq": 100, "save_freq": 1000, - "save_ckpt": "model.ckpt", + "save_ckpt": "model", "disp_training": true, "time_training": true, "profiling": false, "profiling_file": "timeline.json", "training_data": { "systems": [ - "pt/dos/data/" + "pt/dos/data/atomic_system/", + "pt/dos/data/global_system/" ], "set_prefix": "set", "batch_size": 1 }, "validation_data": { "systems": [ - "pt/dos/data/" + "pt/dos/data/atomic_system/", + "pt/dos/data/global_system/" ], "set_prefix": "set", "batch_size": 1 diff --git a/source/tests/pt/model/test_jit.py b/source/tests/pt/model/test_jit.py index 81ea49a68e..41a5902a5a 100644 --- a/source/tests/pt/model/test_jit.py +++ b/source/tests/pt/model/test_jit.py @@ -20,6 +20,7 @@ ) from .test_permutation import ( + model_dos, model_dpa1, model_dpa2, model_hybrid, @@ -42,6 +43,8 @@ def tearDown(self): os.remove(f) if f in ["stat_files"]: shutil.rmtree(f) + if f in ["checkpoint"]: + os.remove(f) class TestEnergyModelSeA(unittest.TestCase, JITTest): @@ -60,6 +63,22 @@ def tearDown(self): JITTest.tearDown(self) +class TestDOSModelSeA(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent.parent / "dos/input.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent.parent / "dos/data/global_system")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dos) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + class TestEnergyModelDPA1(unittest.TestCase, JITTest): def setUp(self): input_json = str(Path(__file__).parent / "water/se_atten.json") diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index 3d9a4df11e..5e395eb8c0 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -53,7 +53,7 @@ "resnet_dt": True, "seed": 1, "type": "dos", - "numb_dos": 5, + "numb_dos": 250, }, "data_stat_nbatch": 20, } diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index a9ba2fd720..1635ad56ea 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -97,18 +97,18 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) -@unittest.skip("loss not implemented") class TestDOSModelSeA(unittest.TestCase, DPTrainTest): def setUp(self): input_json = str(Path(__file__).parent / "dos/input.json") with open(input_json) as f: self.config = json.load(f) - data_file = [str(Path(__file__).parent / "dos/data/")] + data_file = [str(Path(__file__).parent / "dos/data/atomic_system")] self.config["training"]["training_data"]["systems"] = data_file self.config["training"]["validation_data"]["systems"] = data_file self.config["model"] = deepcopy(model_dos) self.config["training"]["numb_steps"] = 1 self.config["training"]["save_freq"] = 1 + self.not_all_grad = True def tearDown(self) -> None: DPTrainTest.tearDown(self)