diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 21275317dc..d5e78296a5 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -65,7 +65,7 @@ def __init__( trainable: bool = True, exclude_types: List[Tuple[int, int]] = [], stripped_type_embedding: bool = False, - smooth_type_embdding: bool = False, + smooth_type_embedding: bool = False, ): super().__init__() if resnet_dt: @@ -76,8 +76,8 @@ def __init__( raise NotImplementedError("precison is not supported.") if stripped_type_embedding: raise NotImplementedError("stripped_type_embedding is not supported.") - if smooth_type_embdding: - raise NotImplementedError("smooth_type_embdding is not supported.") + if smooth_type_embedding: + raise NotImplementedError("smooth_type_embedding is not supported.") del type self.se_atten = DescrptBlockSeAtten( rcut, diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 4018ebab14..0cd5a96632 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -122,7 +122,7 @@ class DescrptSeAtten(DescrptSeA): stripped_type_embedding Whether to strip the type embedding into a separated embedding network. Default value will be True in `se_atten_v2` descriptor. - smooth_type_embdding + smooth_type_embedding When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True. Default value will be True in `se_atten_v2` descriptor. @@ -156,7 +156,7 @@ def __init__( attn_mask: bool = False, multi_task: bool = False, stripped_type_embedding: bool = False, - smooth_type_embdding: bool = False, + smooth_type_embedding: bool = False, # not implemented post_ln=True, ffn=False, @@ -169,7 +169,9 @@ def __init__( concat_output_tebd: bool = True, **kwargs, ) -> None: - if not set_davg_zero and not (stripped_type_embedding and smooth_type_embdding): + if not set_davg_zero and not ( + stripped_type_embedding and smooth_type_embedding + ): warnings.warn( "Set 'set_davg_zero' False in descriptor 'se_atten' " "may cause unexpected incontinuity during model inference!" @@ -220,7 +222,7 @@ def __init__( if ntypes == 0: raise ValueError("`model/type_map` is not set or empty!") self.stripped_type_embedding = stripped_type_embedding - self.smooth = smooth_type_embdding + self.smooth = smooth_type_embedding self.ntypes = ntypes self.att_n = attn self.attn_layer = attn_layer diff --git a/deepmd/tf/descriptor/se_atten_v2.py b/deepmd/tf/descriptor/se_atten_v2.py index 784e02d84d..01c4d93ad8 100644 --- a/deepmd/tf/descriptor/se_atten_v2.py +++ b/deepmd/tf/descriptor/se_atten_v2.py @@ -110,6 +110,6 @@ def __init__( attn_mask=attn_mask, multi_task=multi_task, stripped_type_embedding=True, - smooth_type_embdding=True, + smooth_type_embedding=True, **kwargs, ) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 07add486c1..323ce44dfe 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -476,7 +476,7 @@ def descrpt_se_atten_common_args(): @descrpt_args_plugin.register("se_atten", alias=["dpa1"]) def descrpt_se_atten_args(): doc_stripped_type_embedding = "Whether to strip the type embedding into a separated embedding network. Setting it to `False` will fall back to the previous version of `se_atten` which is non-compressible." - doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True." + doc_smooth_type_embedding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True." doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used" doc_tebd_dim = "The dimension of atom type embedding." doc_temperature = "The scaling factor of normalization in calculations of attention weights, which is used to scale the matmul(Q, K)." @@ -503,11 +503,12 @@ def descrpt_se_atten_args(): doc=doc_only_tf_supported + doc_stripped_type_embedding, ), Argument( - "smooth_type_embdding", + "smooth_type_embedding", bool, optional=True, default=False, - doc=doc_only_tf_supported + doc_smooth_type_embdding, + alias=["smooth_type_embdding"], + doc=doc_only_tf_supported + doc_smooth_type_embedding, ), Argument( "set_davg_zero", bool, optional=True, default=True, doc=doc_set_davg_zero diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 364d35805b..1511ac7fac 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -159,7 +159,7 @@ We highly recommend using the version 2.0 of the attention-based descriptor `"se ```json "stripped_type_embedding": true, - "smooth_type_embdding": true, + "smooth_type_embedding": true, "set_davg_zero": false ``` diff --git a/source/tests/tf/test_model_compression_se_atten.py b/source/tests/tf/test_model_compression_se_atten.py index aa1f0afa38..03ddedad39 100644 --- a/source/tests/tf/test_model_compression_se_atten.py +++ b/source/tests/tf/test_model_compression_se_atten.py @@ -37,27 +37,27 @@ def _file_delete(file): { "se_atten precision": "float64", "type embedding precision": "float64", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, { "se_atten precision": "float64", "type embedding precision": "float64", - "smooth_type_embdding": False, + "smooth_type_embedding": False, }, { "se_atten precision": "float64", "type embedding precision": "float32", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, { "se_atten precision": "float32", "type embedding precision": "float64", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, { "se_atten precision": "float32", "type embedding precision": "float32", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, ] @@ -82,8 +82,8 @@ def _init_models(): jdata["model"]["descriptor"]["stripped_type_embedding"] = True jdata["model"]["descriptor"]["sel"] = 120 jdata["model"]["descriptor"]["attn_layer"] = 0 - jdata["model"]["descriptor"]["smooth_type_embdding"] = tests[i][ - "smooth_type_embdding" + jdata["model"]["descriptor"]["smooth_type_embedding"] = tests[i][ + "smooth_type_embedding" ] jdata["model"]["type_embedding"] = {} jdata["model"]["type_embedding"]["precision"] = tests[i][ diff --git a/source/tests/tf/test_model_se_atten.py b/source/tests/tf/test_model_se_atten.py index 36cf4887c0..d75dc0cfff 100644 --- a/source/tests/tf/test_model_se_atten.py +++ b/source/tests/tf/test_model_se_atten.py @@ -764,7 +764,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model(self): jdata["model"]["descriptor"].pop("type", None) jdata["model"]["descriptor"]["ntypes"] = 2 jdata["model"]["descriptor"]["stripped_type_embedding"] = True - jdata["model"]["descriptor"]["smooth_type_embdding"] = True + jdata["model"]["descriptor"]["smooth_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 1 jdata["model"]["descriptor"]["rcut"] = 6.0 jdata["model"]["descriptor"]["rcut_smth"] = 4.0 @@ -910,7 +910,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model_excluded_types(self) jdata["model"]["descriptor"].pop("type", None) jdata["model"]["descriptor"]["ntypes"] = 2 jdata["model"]["descriptor"]["stripped_type_embedding"] = True - jdata["model"]["descriptor"]["smooth_type_embdding"] = True + jdata["model"]["descriptor"]["smooth_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 1 jdata["model"]["descriptor"]["rcut"] = 6.0 jdata["model"]["descriptor"]["rcut_smth"] = 4.0