Skip to content

args: add entire training parameters #829

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 30, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions dpgen/generator/arginfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,26 @@ def data_args() -> List[Argument]:


def training_args() -> List[Argument]:
"""Traning arguments.

Returns
-------
list[dargs.Argument]
List of training arguments.
"""
doc_numb_models = 'Number of models to be trained in 00.train. 4 is recommend.'
doc_training_iter0_model_path = 'The model used to init the first iter training. Number of element should be equal to numb_models.'
doc_training_init_model = 'Iteration > 0, the model parameters will be initilized from the model trained at the previous iteration. Iteration == 0, the model parameters will be initialized from training_iter0_model_path.'
doc_default_training_param = 'Training parameters for deepmd-kit in 00.train. You can find instructions from here: (https://github.com/deepmodeling/deepmd-kit).'
doc_dp_compress = 'Use dp compress to compress the model.'
doc_training_reuse_iter = "The minimal index of iteration that continues training models from old models of last iteration."
doc_reusing = " This option is only adopted when continuing training models from old models. This option will override default parameters."
doc_training_reuse_old_ratio = "The probability proportion of old data during training." + doc_reusing
doc_training_reuse_numb_steps = "Number of training batch." + doc_reusing
doc_training_reuse_start_lr = "The learning rate the start of the training." + doc_reusing
doc_training_reuse_start_pref_e = "The prefactor of energy loss at the start of the training." + doc_reusing
doc_training_reuse_start_pref_f = "The prefactor of force loss at the start of the training." + doc_reusing
doc_model_devi_activation_func = "The activation function in the model. The shape of list should be (N_models, 2), where 2 represents the embedding and fitting network. This option will override default parameters."

return [
Argument("numb_models", int, optional=False, doc=doc_numb_models),
Expand All @@ -75,6 +90,13 @@ def training_args() -> List[Argument]:
doc=doc_default_training_param),
Argument("dp_compress", bool, optional=True,
default=False, doc=doc_dp_compress),
Argument("training_reuse_iter", [None, int], optional=True, doc=doc_training_reuse_iter),
Argument("training_reuse_old_ratio", [None, float], optional=True, doc=doc_training_reuse_old_ratio),
Argument("training_reuse_numb_steps", [None, int], alias=["training_reuse_stop_batch"], optional=True, default=400000, doc=doc_training_reuse_numb_steps),
Argument("training_reuse_start_lr", [None, float], optional=True, default=1e-4, doc=doc_training_reuse_start_lr),
Argument("training_reuse_start_pref_e", [None, float, int], optional=True, default=0.1, doc=doc_training_reuse_start_pref_e),
Argument("training_reuse_start_pref_f", [None, float, int], optional=True, default=100, doc=doc_training_reuse_start_pref_f),
Argument("model_devi_activation_func", [None, list], optional=True, doc=doc_model_devi_activation_func),
]


Expand Down