Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: atom_ener in energy fitting #3370

Merged
merged 3 commits into from
Mar 1, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 33 additions & 1 deletion deepmd/dpmodel/fitting/general_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
Dict,
List,
Optional,
Union,
)

import numpy as np
Expand Down Expand Up @@ -73,7 +74,10 @@
different fitting nets for different atom types.
exclude_types: List[int]
Atomic contributions of the excluded atom types are set zero.

remove_vaccum_contribution: bool or List[bool]
Remove vaccum contribution before the bias is added. If it is a list and
not mixed_types, only remove the vaccum contribution for the atom types
in the list.
"""

def __init__(
Expand All @@ -95,6 +99,7 @@
spin: Any = None,
mixed_types: bool = True,
exclude_types: List[int] = [],
remove_vaccum_contribution: Union[bool, List[bool]] = False,
):
self.var_name = var_name
self.ntypes = ntypes
Expand All @@ -119,6 +124,7 @@
self.exclude_types = exclude_types
if self.spin is not None:
raise NotImplementedError("spin is not supported")
self.remove_vaccum_contribution = remove_vaccum_contribution

Check warning on line 127 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L127

Added line #L127 was not covered by tests

self.emask = AtomExcludeMask(self.ntypes, self.exclude_types)

Expand Down Expand Up @@ -298,6 +304,14 @@
"which is not consistent with {self.dim_descrpt}."
)
xx = descriptor
if self.remove_vaccum_contribution is not False:

Check warning on line 307 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L307

Added line #L307 was not covered by tests
# TODO: Idealy, the input for vaccum should be computed;
# we consider it as always zero for convenience.
# Needs a compute_input_stats for vaccum passed from the
# descriptor.
xx_zeros = np.zeros_like(xx)

Check warning on line 312 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L312

Added line #L312 was not covered by tests
else:
xx_zeros = None

Check warning on line 314 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L314

Added line #L314 was not covered by tests
# check fparam dim, concate to input descriptor
if self.numb_fparam > 0:
assert fparam is not None, "fparam should not be None"
Expand All @@ -312,6 +326,11 @@
[xx, fparam],
axis=-1,
)
if xx_zeros is not None:
xx_zeros = np.concatenate(

Check warning on line 330 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L329-L330

Added lines #L329 - L330 were not covered by tests
[xx_zeros, fparam],
axis=-1,
)
# check aparam dim, concate to input descriptor
if self.numb_aparam > 0:
assert aparam is not None, "aparam should not be None"
Expand All @@ -326,6 +345,11 @@
[xx, aparam],
axis=-1,
)
if xx_zeros is not None:
xx_zeros = np.concatenate(

Check warning on line 349 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L348-L349

Added lines #L348 - L349 were not covered by tests
[xx_zeros, aparam],
axis=-1,
)

# calcualte the prediction
if not self.mixed_types:
Expand All @@ -335,11 +359,19 @@
(atype == type_i).reshape([nf, nloc, 1]), [1, 1, net_dim_out]
)
atom_property = self.nets[(type_i,)](xx)
if xx_zeros is not None and not (

Check warning on line 362 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L362

Added line #L362 was not covered by tests
isinstance(self.remove_vaccum_contribution, list)
and len(self.remove_vaccum_contribution) > type_i
and not self.remove_vaccum_contribution[type_i]
):
atom_property -= self.nets[(type_i,)](xx_zeros)

Check warning on line 367 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L367

Added line #L367 was not covered by tests
atom_property = atom_property + self.bias_atom_e[type_i]
atom_property = atom_property * mask
outs = outs + atom_property # Shape is [nframes, natoms[0], 1]
else:
outs = self.nets[()](xx) + self.bias_atom_e[atype]
if xx_zeros is not None:
outs -= self.nets[()](xx_zeros)

Check warning on line 374 in deepmd/dpmodel/fitting/general_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/general_fitting.py#L373-L374

Added lines #L373 - L374 were not covered by tests
# nf x nloc
exclude_mask = self.emask.build_type_exclude_mask(atype)
# nf x nloc x nod
Expand Down
5 changes: 3 additions & 2 deletions deepmd/dpmodel/fitting/invar_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,6 @@ def __init__(
raise NotImplementedError("use_aparam_as_mask is not implemented")
if layer_name is not None:
raise NotImplementedError("layer_name is not implemented")
if atom_ener is not None and atom_ener != []:
raise NotImplementedError("atom_ener is not implemented")

self.dim_out = dim_out
self.atom_ener = atom_ener
Expand All @@ -159,6 +157,9 @@ def __init__(
spin=spin,
mixed_types=mixed_types,
exclude_types=exclude_types,
remove_vaccum_contribution=False
if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0
else [x is not None for x in atom_ener],
)

def serialize(self) -> dict:
Expand Down
3 changes: 3 additions & 0 deletions deepmd/pt/model/task/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ def __init__(
rcond=rcond,
seed=seed,
exclude_types=exclude_types,
remove_vaccum_contribution=False
if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0
else [x is not None for x in atom_ener],
**kwargs,
)

Expand Down
35 changes: 34 additions & 1 deletion deepmd/pt/model/task/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import (
List,
Optional,
Union,
)

import numpy as np
Expand Down Expand Up @@ -246,7 +247,10 @@
Random seed.
exclude_types: List[int]
Atomic contributions of the excluded atom types are set zero.

remove_vaccum_contribution: bool or List[bool]
Remove vaccum contribution before the bias is added. If it is a list and
not mixed_types, only remove the vaccum contribution for the atom types
in the list.
"""

def __init__(
Expand All @@ -265,6 +269,7 @@
rcond: Optional[float] = None,
seed: Optional[int] = None,
exclude_types: List[int] = [],
remove_vaccum_contribution: Union[bool, List[bool]] = False,
**kwargs,
):
super().__init__()
Expand All @@ -282,6 +287,7 @@
self.rcond = rcond
# order matters, should be place after the assignment of ntypes
self.reinit_exclude(exclude_types)
self.remove_vaccum_contribution = remove_vaccum_contribution

Check warning on line 290 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L290

Added line #L290 was not covered by tests

net_dim_out = self._net_out_dim()
# init constants
Expand Down Expand Up @@ -487,6 +493,14 @@
aparam: Optional[torch.Tensor] = None,
):
xx = descriptor
if self.remove_vaccum_contribution:

Check warning on line 496 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L496

Added line #L496 was not covered by tests
# TODO: Idealy, the input for vaccum should be computed;
# we consider it as always zero for convenience.
# Needs a compute_input_stats for vaccum passed from the
# descriptor.
xx_zeros = torch.zeros_like(xx)

Check warning on line 501 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L501

Added line #L501 was not covered by tests
else:
xx_zeros = None

Check warning on line 503 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L503

Added line #L503 was not covered by tests
nf, nloc, nd = xx.shape
net_dim_out = self._net_out_dim()

Expand Down Expand Up @@ -515,6 +529,11 @@
[xx, fparam],
dim=-1,
)
if xx_zeros is not None:
xx_zeros = torch.cat(

Check warning on line 533 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L532-L533

Added lines #L532 - L533 were not covered by tests
[xx_zeros, fparam],
dim=-1,
)
# check aparam dim, concate to input descriptor
if self.numb_aparam > 0:
assert aparam is not None, "aparam should not be None"
Expand All @@ -534,6 +553,11 @@
[xx, aparam],
dim=-1,
)
if xx_zeros is not None:
xx_zeros = torch.cat(

Check warning on line 557 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L556-L557

Added lines #L556 - L557 were not covered by tests
[xx_zeros, aparam],
dim=-1,
)

outs = torch.zeros(
(nf, nloc, net_dim_out),
Expand All @@ -542,6 +566,7 @@
) # jit assertion
if self.old_impl:
assert self.filter_layers_old is not None
assert xx_zeros is None

Check warning on line 569 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L569

Added line #L569 was not covered by tests
if self.mixed_types:
atom_property = self.filter_layers_old[0](xx) + self.bias_atom_e[atype]
outs = outs + atom_property # Shape is [nframes, natoms[0], 1]
Expand All @@ -557,6 +582,8 @@
atom_property = (
self.filter_layers.networks[0](xx) + self.bias_atom_e[atype]
)
if xx_zeros is not None:
atom_property -= self.filter_layers.networks[0](xx_zeros)

Check warning on line 586 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L585-L586

Added lines #L585 - L586 were not covered by tests
outs = (
outs + atom_property
) # Shape is [nframes, natoms[0], net_dim_out]
Expand All @@ -565,6 +592,12 @@
mask = (atype == type_i).unsqueeze(-1)
mask = torch.tile(mask, (1, 1, net_dim_out))
atom_property = ll(xx)
if xx_zeros is not None and not (

Check warning on line 595 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L595

Added line #L595 was not covered by tests
isinstance(self.remove_vaccum_contribution, list)
and len(self.remove_vaccum_contribution) > type_i
and not self.remove_vaccum_contribution[type_i]
):
atom_property -= ll(xx_zeros)

Check warning on line 600 in deepmd/pt/model/task/fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/fitting.py#L600

Added line #L600 was not covered by tests
atom_property = atom_property + self.bias_atom_e[type_i]
atom_property = atom_property * mask
outs = (
Expand Down
2 changes: 1 addition & 1 deletion deepmd/tf/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -1003,7 +1003,7 @@ def serialize(self, suffix: str = "") -> dict:
"rcond": self.rcond,
"tot_ener_zero": self.tot_ener_zero,
"trainable": self.trainable,
"atom_ener": self.atom_ener,
"atom_ener": self.atom_ener_v,
"activation_function": self.activation_function_name,
"precision": self.fitting_precision.name,
"layer_name": self.layer_name,
Expand Down
11 changes: 11 additions & 0 deletions source/tests/consistent/fitting/test_ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
("float64", "float32"), # precision
(True, False), # mixed_types
(0, 1), # numb_fparam
([], [-12345.6, None]), # atom_ener
)
class TestEner(CommonTest, FittingTest, unittest.TestCase):
@property
Expand All @@ -52,13 +53,15 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
return {
"neuron": [5, 5, 5],
"resnet_dt": resnet_dt,
"precision": precision,
"numb_fparam": numb_fparam,
"seed": 20240217,
"atom_ener": atom_ener,
}

@property
Expand All @@ -68,6 +71,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
# TODO: mixed_types
return mixed_types or CommonTest.skip_pt
Expand All @@ -79,6 +83,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,

Check notice

Code scanning / CodeQL

Unused local variable Note test

Variable atom_ener is not used.
) = self.param
return CommonTest.skip_pt

Expand All @@ -105,6 +110,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
return {
"ntypes": self.ntypes,
Expand All @@ -118,6 +124,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
return self.build_tf_fitting(
obj,
Expand All @@ -134,6 +141,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
return (
pt_obj(
Expand All @@ -154,6 +162,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
return dp_obj(
self.inputs,
Expand All @@ -175,6 +184,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
if precision == "float64":
return 1e-10
Expand All @@ -191,6 +201,7 @@
precision,
mixed_types,
numb_fparam,
atom_ener,
) = self.param
if precision == "float64":
return 1e-10
Expand Down
Loading