Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pt: fix params with no docstrs #3388

Merged
merged 2 commits into from
Mar 2, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
114 changes: 98 additions & 16 deletions deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,20 @@
doc_stripped_type_embedding = "Whether to strip the type embedding into a separated embedding network. Setting it to `False` will fall back to the previous version of `se_atten` which is non-compressible."
doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True."
doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"
doc_tebd_dim = "The dimension of atom type embedding."
doc_temperature = "The scaling factor of normalization in calculations of attention weights, which is used to scale the matmul(Q, K)."
doc_scaling_factor = (

Check warning on line 460 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L458-L460

Added lines #L458 - L460 were not covered by tests
"The scaling factor of normalization in calculations of attention weights, which is used to scale the matmul(Q, K). "
"If `temperature` is None, the scaling of attention weights is (N_hidden_dim * scaling_factor)**0.5. "
"Else, the scaling of attention weights is setting to `temperature`."
)
doc_normalize = (

Check warning on line 465 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L465

Added line #L465 was not covered by tests
"Whether to normalize the hidden vectors during attention calculation."
)
doc_concat_output_tebd = (

Check warning on line 468 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L468

Added line #L468 was not covered by tests
"Whether to concat type embedding at the output of the descriptor."
)
doc_deprecated = "This feature will be removed in a future release."

Check warning on line 471 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L471

Added line #L471 was not covered by tests

return [
*descrpt_se_atten_common_args(),
Expand All @@ -476,42 +490,81 @@
"set_davg_zero", bool, optional=True, default=True, doc=doc_set_davg_zero
),
# pt only
Argument("tebd_dim", int, optional=True, default=8, doc=doc_only_pt_supported),
Argument(
"tebd_dim",
int,
optional=True,
default=8,
doc=doc_only_pt_supported + doc_tebd_dim,
),
Argument(
"tebd_input_mode",
str,
optional=True,
default="concat",
doc=doc_only_pt_supported,
doc=doc_only_pt_supported + doc_deprecated,
),
Argument(
"post_ln",
bool,
optional=True,
default=True,
doc=doc_only_pt_supported + doc_deprecated,
),
Argument(
"post_ln", bool, optional=True, default=True, doc=doc_only_pt_supported
"ffn",
bool,
optional=True,
default=False,
doc=doc_only_pt_supported + doc_deprecated,
),
Argument("ffn", bool, optional=True, default=False, doc=doc_only_pt_supported),
Argument(
"ffn_embed_dim", int, optional=True, default=1024, doc=doc_only_pt_supported
"ffn_embed_dim",
int,
optional=True,
default=1024,
doc=doc_only_pt_supported + doc_deprecated,
),
Argument(
"scaling_factor",
float,
optional=True,
default=1.0,
doc=doc_only_pt_supported,
doc=doc_only_pt_supported + doc_scaling_factor,
),
Argument("head_num", int, optional=True, default=1, doc=doc_only_pt_supported),
Argument(
"normalize", bool, optional=True, default=True, doc=doc_only_pt_supported
"head_num",
int,
optional=True,
default=1,
doc=doc_only_pt_supported + doc_deprecated,
),
Argument("temperature", float, optional=True, doc=doc_only_pt_supported),
Argument(
"return_rot", bool, optional=True, default=False, doc=doc_only_pt_supported
"normalize",
bool,
optional=True,
default=True,
doc=doc_only_pt_supported + doc_normalize,
),
Argument(
"temperature",
float,
optional=True,
doc=doc_only_pt_supported + doc_temperature,
),
Argument(
"return_rot",
bool,
optional=True,
default=False,
doc=doc_only_pt_supported + doc_deprecated,
),
Argument(
"concat_output_tebd",
bool,
optional=True,
default=True,
doc=doc_only_pt_supported,
doc=doc_only_pt_supported + doc_concat_output_tebd,
),
]

Expand Down Expand Up @@ -2069,6 +2122,23 @@
"Weights will be normalized and minus ones will be ignored. "
"If not set, each fitting net will be equally selected when training."
)
doc_warmup_steps = (

Check warning on line 2125 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L2125

Added line #L2125 was not covered by tests
"The number of steps for learning rate warmup. During warmup, "
"the learning rate begins at zero and progressively increases linearly to `start_lr`, "
"rather than starting directly from `start_lr`"
)
doc_gradient_max_norm = (

Check warning on line 2130 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L2130

Added line #L2130 was not covered by tests
"Clips the gradient norm to a maximum value. "
"If the gradient norm exceeds this value, it will be clipped to this limit. "
"No gradient clipping will occur if set to 0."
)
doc_stat_file = (

Check warning on line 2135 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L2135

Added line #L2135 was not covered by tests
"The file path for saving the data statistics results. "
"If set, the results will be saved and directly loaded during the next training session, "
"avoiding the need to recalculate the statistics"
)
doc_opt_type = "The type of optimizer to use."
doc_kf_blocksize = "The blocksize for the Kalman filter."

Check warning on line 2141 in deepmd/utils/argcheck.py

View check run for this annotation

Codecov / codecov/patch

deepmd/utils/argcheck.py#L2140-L2141

Added lines #L2140 - L2141 were not covered by tests

arg_training_data = training_data_args()
arg_validation_data = validation_data_args()
Expand Down Expand Up @@ -2132,9 +2202,21 @@
),
Argument("data_dict", dict, optional=True, doc=doc_data_dict),
Argument("fitting_weight", dict, optional=True, doc=doc_fitting_weight),
Argument("warmup_steps", int, optional=True, doc=doc_only_pt_supported),
Argument("gradient_max_norm", float, optional=True, doc=doc_only_pt_supported),
Argument("stat_file", str, optional=True, doc=doc_only_pt_supported),
Argument(
"warmup_steps",
int,
optional=True,
doc=doc_only_pt_supported + doc_warmup_steps,
),
Argument(
"gradient_max_norm",
float,
optional=True,
doc=doc_only_pt_supported + doc_gradient_max_norm,
),
Argument(
"stat_file", str, optional=True, doc=doc_only_pt_supported + doc_stat_file
),
]
variants = [
Variant(
Expand All @@ -2149,7 +2231,7 @@
"kf_blocksize",
int,
optional=True,
doc=doc_only_pt_supported,
doc=doc_only_pt_supported + doc_kf_blocksize,
),
],
[],
Expand All @@ -2158,7 +2240,7 @@
],
optional=True,
default_tag="Adam",
doc=doc_only_pt_supported,
doc=doc_only_pt_supported + doc_opt_type,
)
]

Expand Down