diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index cca46d3710..d39e236d07 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -79,27 +79,6 @@ def mixed_types(self) -> bool: """ return self.descriptor.mixed_types() - def set_out_bias(self, out_bias: np.ndarray, add=False) -> None: - """ - Modify the output bias for the atomic model. - - Parameters - ---------- - out_bias : np.ndarray - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - self.fitting["bias_atom_e"] = ( - out_bias + self.fitting["bias_atom_e"] if add else out_bias - ) - - def get_out_bias(self) -> np.ndarray: - """Return the output bias of the atomic model.""" - return self.fitting["bias_atom_e"] - def forward_atomic( self, extended_coord: np.ndarray, diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index d01bd67826..e6296316a5 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -175,15 +175,7 @@ def forward_atomic( )["energy"] ) self.weights = self._compute_weight(extended_coord, extended_atype, nlists_) - atype = extended_atype[:, :nloc] - bias_list = [] - for idx, model in enumerate(self.models): - bias_atom_e = model.get_out_bias() - ener_list[idx] += bias_atom_e[atype] - bias_list.append(bias_atom_e[atype]) - - self.atomic_bias = np.sum(np.stack(bias_list) * np.stack(self.weights), axis=0) fit_ret = { "energy": np.sum(np.stack(ener_list) * np.stack(self.weights), axis=0), } # (nframes, nloc, 1) @@ -279,26 +271,6 @@ def get_sel_type(self) -> List[int]: # join all the selected types return list(set().union(*[model.get_sel_type() for model in self.models])) - def set_out_bias(self, out_bias: np.ndarray, add=False) -> None: - """ - Modify the output bias for all the models in the linear atomic model. - - Parameters - ---------- - out_bias : torch.Tensor - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - for model in self.models: - model.set_out_bias(out_bias, add=add) - - def get_out_bias(self) -> np.ndarray: - """Return the weighted output bias of the linear atomic model.""" - return self.atomic_bias - def is_aparam_nall(self) -> bool: """Check whether the shape of atomic parameters is (nframes, nall, ndim). diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index 3e02a5d076..936c2b0943 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -95,25 +95,6 @@ def get_sel_type(self) -> List[int]: If returning an empty list, all atom types are selected. """ - @abstractmethod - def set_out_bias(self, out_bias: t_tensor, add=False) -> None: - """ - Modify the output bias for the atomic model. - - Parameters - ---------- - out_bias : t_tensor - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - - @abstractmethod - def get_out_bias(self) -> t_tensor: - """Return the output bias of the atomic model.""" - @abstractmethod def is_aparam_nall(self) -> bool: """Check whether the shape of atomic parameters is (nframes, nall, ndim). diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 9c1f355f0d..51e46901e9 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -131,25 +131,6 @@ def mixed_types(self) -> bool: # to match DPA1 and DPA2. return True - def set_out_bias(self, out_bias: np.ndarray, add=False) -> None: - """ - Modify the output bias for the atomic model. - - Parameters - ---------- - out_bias : torch.Tensor - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - self.bias_atom_e = out_bias + self.bias_atom_e if add else out_bias - - def get_out_bias(self) -> np.ndarray: - """Return the output bias of the atomic model.""" - return self.bias_atom_e - def serialize(self) -> dict: dd = BaseAtomicModel.serialize(self) dd.update( diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 57ca21a826..e750b6a54e 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -366,7 +366,6 @@ def change_out_bias( rcond=self.rcond, preset_bias=self.preset_out_bias, ) - # self.set_out_bias(delta_bias, add=True) self._store_out_stat(delta_bias, out_std, add=True) elif bias_adjust_mode == "set-by-statistic": bias_out, std_out = compute_output_stats( @@ -377,7 +376,6 @@ def change_out_bias( rcond=self.rcond, preset_bias=self.preset_out_bias, ) - # self.set_out_bias(bias_out) self._store_out_stat(bias_out, std_out) else: raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 82758672a1..14975732bc 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -217,27 +217,6 @@ def wrapped_sampler(): self.descriptor.compute_input_stats(wrapped_sampler, stat_file_path) self.compute_or_load_out_stat(wrapped_sampler, stat_file_path) - def set_out_bias(self, out_bias: torch.Tensor, add=False) -> None: - """ - Modify the output bias for the atomic model. - - Parameters - ---------- - out_bias : torch.Tensor - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - self.fitting_net["bias_atom_e"] = ( - out_bias + self.fitting_net["bias_atom_e"] if add else out_bias - ) - - def get_out_bias(self) -> torch.Tensor: - """Return the output bias of the atomic model.""" - return self.fitting_net.bias_atom_e - def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" return self.fitting_net.get_dim_fparam() diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 709b59cb45..58d81a5037 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -192,8 +192,7 @@ def forward_atomic( for i, model in enumerate(self.models): mapping = self.mapping_list[i] - ener_list.append( - model.forward_atomic( + raw_ret = model.forward_atomic( extended_coord, mapping[extended_atype], nlists_[i], @@ -201,25 +200,37 @@ def forward_atomic( fparam, aparam, )["energy"] + # apply bias to each individual model + ener_list.append( + model.apply_out_stat( + raw_ret, mapping[extended_atype] + ) ) - weights = self._compute_weight(extended_coord, extended_atype, nlists_) - atype = extended_atype[:, :nloc] - bias_list = [] - for idx, model in enumerate(self.models): - bias_atom_e = model.get_out_bias() - - ener_list[idx] += bias_atom_e[atype] - bias_list.append(bias_atom_e[atype]) - - self.atomic_bias = torch.sum( - torch.stack(bias_list) * torch.stack(weights), dim=0 - ) fit_ret = { "energy": torch.sum(torch.stack(ener_list) * torch.stack(weights), dim=0), } # (nframes, nloc, 1) return fit_ret + + def apply_out_stat( + self, + ret: Dict[str, torch.Tensor], + atype: torch.Tensor, + ): + """Apply the stat to each atomic output. + The developer may override the method to define how the bias is applied + to the atomic output of the model. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc + + """ + return ret @staticmethod def remap_atype(ori_map: List[str], new_map: List[str]) -> torch.Tensor: @@ -292,26 +303,6 @@ def _compute_weight( for _ in range(nmodels) ] - def set_out_bias(self, out_bias: torch.Tensor, add=False) -> None: - """ - Modify the output bias for all the models in the linear atomic model. - - Parameters - ---------- - out_bias : torch.Tensor - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - for model in self.models: - model.set_out_bias(out_bias, add=add) - - def get_out_bias(self) -> torch.Tensor: - """Return the weighted output bias of the linear atomic model.""" - return self.atomic_bias - def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" # tricky... diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 627dffd620..579e3efda4 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -226,25 +226,6 @@ def compute_or_load_stat( """ self.compute_or_load_out_stat(merged, stat_file_path) - def set_out_bias(self, out_bias: torch.Tensor, add=False) -> None: - """ - Modify the output bias for the atomic model. - - Parameters - ---------- - out_bias : torch.Tensor - The new bias to be applied. - add : bool, optional - Whether to add the new bias to the existing one. - If False, the output bias will be directly replaced by the new bias. - If True, the new bias will be added to the existing one. - """ - self.bias_atom_e = out_bias + self.bias_atom_e if add else out_bias - - def get_out_bias(self) -> torch.Tensor: - """Return the output bias of the atomic model.""" - return self.bias_atom_e - def forward_atomic( self, extended_coord: torch.Tensor, diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 25a8ec9201..0c36b9783c 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -172,8 +172,6 @@ def forward_common( model_predict = self.output_type_cast(model_predict, input_prec) return model_predict - def get_out_bias(self) -> torch.Tensor: - return self.atomic_model.get_out_bias() def change_out_bias( self,