From 1b233e737908925a46d941920cbe651f960d125b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 7 Apr 2024 12:30:33 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../model/atomic_model/linear_atomic_model.py | 9 ++++---- .../pt/model/test_linear_atomic_model_stat.py | 22 +++++++++---------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 9bf033953e..c5abc4575c 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -1,12 +1,12 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( + Callable, Dict, List, Optional, Tuple, Union, - Callable, ) import torch @@ -297,7 +297,8 @@ def _compute_weight( nmodels = len(self.models) nframes, nloc, _ = nlists_[0].shape return [ - torch.ones((nframes, nloc, 1), dtype=torch.float64, device=env.DEVICE) / nmodels + torch.ones((nframes, nloc, 1), dtype=torch.float64, device=env.DEVICE) + / nmodels for _ in range(nmodels) ] @@ -336,7 +337,7 @@ def is_aparam_nall(self) -> bool: If False, the shape is (nframes, nloc, ndim). """ return False - + def compute_or_load_out_stat( self, merged: Union[Callable[[], List[dict]], List[dict]], @@ -360,7 +361,7 @@ def compute_or_load_out_stat( """ for md in self.models: md.compute_or_load_out_stat(merged, stat_file_path) - + def compute_or_load_stat( self, sampled_func, diff --git a/source/tests/pt/model/test_linear_atomic_model_stat.py b/source/tests/pt/model/test_linear_atomic_model_stat.py index ae1ca84419..d8137c17c0 100644 --- a/source/tests/pt/model/test_linear_atomic_model_stat.py +++ b/source/tests/pt/model/test_linear_atomic_model_stat.py @@ -17,7 +17,6 @@ OutputVariableDef, ) from deepmd.pt.model.atomic_model import ( - BaseAtomicModel, DPAtomicModel, LinearEnergyAtomicModel, ) @@ -85,9 +84,10 @@ def forward( .to(env.GLOBAL_PT_FLOAT_PRECISION) .to(env.DEVICE) ) - + return ret + class FooFittingB(torch.nn.Module, BaseFitting): def output_def(self): return FittingOutputDef( @@ -128,9 +128,10 @@ def forward( .to(env.GLOBAL_PT_FLOAT_PRECISION) .to(env.DEVICE) ) - + return ret + class TestAtomicModelStat(unittest.TestCase, TestCaseSingleFrameWithNlist): def tearDown(self): self.tempdir.cleanup() @@ -153,7 +154,6 @@ def setUp(self): ), # bias of foo: 1, 3 "energy": to_torch_tensor(np.array([5.0, 7.0]).reshape(2, 1)), - } ] self.tempdir = tempfile.TemporaryDirectory() @@ -183,9 +183,9 @@ def test_linear_atomic_model_stat_with_bias(self): ft_b, type_map=type_map, ).to(env.DEVICE) - linear_model = LinearEnergyAtomicModel( - [md0,md1],type_map=type_map - ).to(env.DEVICE) + linear_model = LinearEnergyAtomicModel([md0, md1], type_map=type_map).to( + env.DEVICE + ) args = [ to_torch_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] @@ -196,7 +196,7 @@ def test_linear_atomic_model_stat_with_bias(self): # 1. test run without bias # nf x na x odim ret0 = linear_model.forward_common_atomic(*args) - + ret0 = to_numpy_array(ret0["energy"]) ret_no_bias = [] for md in linear_model.models: @@ -207,7 +207,7 @@ def test_linear_atomic_model_stat_with_bias(self): [7.0, 8.0, 9.0], ] ).reshape([nf, nloc] + linear_model.fitting_output_def()["energy"].shape) - + np.testing.assert_almost_equal(ret0, expected_ret0) # 2. test bias is applied @@ -222,8 +222,8 @@ def test_linear_atomic_model_stat_with_bias(self): ret = to_numpy_array(ret["energy"]) linear_ret.append(ret_no_bias[idx] + ener_bias[at]) np.testing.assert_almost_equal((ret_no_bias[idx] + ener_bias[at]), ret) - + # linear model not adding bias again ret1 = linear_model.forward_common_atomic(*args) ret1 = to_numpy_array(ret1["energy"]) - np.testing.assert_almost_equal(torch.mean(torch.stack(linear_ret),dim=0), ret1) + np.testing.assert_almost_equal(torch.mean(torch.stack(linear_ret), dim=0), ret1)