Skip to content

Commit

Permalink
test: add LAMMPS MPI tests (#3572)
Browse files Browse the repository at this point in the history
Fix #3509.

Note: 0 atoms in a processor with the PyTorch backend is currently
broken. I commented with a TODO tag.

---------

Signed-off-by: Jinzhe Zeng <[email protected]>
  • Loading branch information
njzjz authored Mar 21, 2024
1 parent 145f501 commit fb61efb
Show file tree
Hide file tree
Showing 5 changed files with 279 additions and 2 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test_cc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
# TODO: remove ase version when ase has new release
- run: |
python -m pip install -U pip
python -m pip install -e .[cpu,test,lmp] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
python -m pip install -e .[cpu,test,lmp] mpi4py "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
env:
DP_BUILD_TESTING: 1
if: ${{ !matrix.check_memleak }}
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/test_cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ jobs:
if: false # skip as we use nvidia image
- run: python -m pip install -U "pip>=21.3.1,!=23.0.0"
- run: python -m pip install "tensorflow>=2.15.0rc0" "torch>=2.2.0"
- run: python -m pip install -v -e .[gpu,test,lmp,cu12,torch] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
- run: python -m pip install -v -e .[gpu,test,lmp,cu12,torch] mpi4py "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
env:
DP_VARIANT: cuda
DP_ENABLE_NATIVE_OPTIMIZATION: 1
Expand Down Expand Up @@ -81,6 +81,7 @@ jobs:
TF_INTRA_OP_PARALLELISM_THREADS: 1
TF_INTER_OP_PARALLELISM_THREADS: 1
LAMMPS_PLUGIN_PATH: ${{ github.workspace }}/dp_test/lib/deepmd_lmp
CUDA_VISIBLE_DEVICES: 0
pass:
name: Pass testing on CUDA
needs: [test_cuda]
Expand Down
61 changes: 61 additions & 0 deletions source/lmp/tests/run_mpi_pair_deepmd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
"""Use mpi4py to run a LAMMPS pair_deepmd + model deviation (atomic, relative) task."""

import argparse

import numpy as np
from lammps import (
PyLammps,
)
from mpi4py import (
MPI,
)

comm = MPI.COMM_WORLD
rank = comm.Get_rank()

parser = argparse.ArgumentParser()
parser.add_argument("DATAFILE", type=str)
parser.add_argument("PBFILE", type=str)
parser.add_argument("PBFILE2", type=str)
parser.add_argument("MD_FILE", type=str)
parser.add_argument("OUTPUT", type=str)
parser.add_argument("--balance", action="store_true")

args = parser.parse_args()
data_file = args.DATAFILE
pb_file = args.PBFILE
pb_file2 = args.PBFILE2
md_file = args.MD_FILE
output = args.OUTPUT
balance = args.balance

lammps = PyLammps()
if balance:
# 4 and 2 atoms
lammps.processors("2 1 1")
else:
# 6 and 0 atoms
lammps.processors("1 2 1")
lammps.units("metal")
lammps.boundary("p p p")
lammps.atom_style("atomic")
lammps.neighbor("2.0 bin")
lammps.neigh_modify("every 10 delay 0 check no")
lammps.read_data(data_file)
lammps.mass("1 16")
lammps.mass("2 2")
lammps.timestep(0.0005)
lammps.fix("1 all nve")

relative = 1.0
lammps.pair_style(
f"deepmd {pb_file} {pb_file2} out_file {md_file} out_freq 1 atomic relative {relative}"
)
lammps.pair_coeff("* *")
lammps.run(0)
pe = lammps.eval("pe")
if rank == 0:
arr = [pe]
np.savetxt(output, np.array(arr))
MPI.Finalize()
52 changes: 52 additions & 0 deletions source/lmp/tests/test_lammps.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import importlib
import os
import shutil
import subprocess as sp
import sys
import tempfile
from pathlib import (
Path,
)
Expand Down Expand Up @@ -671,3 +674,52 @@ def test_pair_deepmd_si(lammps_si):
expected_f[lammps_si.atoms[ii].id - 1] * constants.force_metal2si
)
lammps_si.run(1)


@pytest.mark.skipif(
shutil.which("mpirun") is None, reason="MPI is not installed on this system"
)
@pytest.mark.skipif(
importlib.util.find_spec("mpi4py") is None, reason="mpi4py is not installed"
)
@pytest.mark.parametrize(
("balance_args",),
[(["--balance"],), ([],)],
)
def test_pair_deepmd_mpi(balance_args: list):
with tempfile.NamedTemporaryFile() as f:
sp.check_call(
[
"mpirun",
"-n",
"2",
sys.executable,
Path(__file__).parent / "run_mpi_pair_deepmd.py",
data_file,
pb_file,
pb_file2,
md_file,
f.name,
*balance_args,
]
)
arr = np.loadtxt(f.name, ndmin=1)
pe = arr[0]

relative = 1.0
assert pe == pytest.approx(expected_e)
# load model devi
md = np.loadtxt(md_file.resolve())
norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1)
expected_md_f = np.linalg.norm(np.std([expected_f, expected_f2], axis=0), axis=1)
expected_md_f /= norm + relative
assert md[7:] == pytest.approx(expected_md_f)
assert md[4] == pytest.approx(np.max(expected_md_f))
assert md[5] == pytest.approx(np.min(expected_md_f))
assert md[6] == pytest.approx(np.mean(expected_md_f))
expected_md_v = (
np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) / 6
)
assert md[1] == pytest.approx(np.max(expected_md_v))
assert md[2] == pytest.approx(np.min(expected_md_v))
assert md[3] == pytest.approx(np.sqrt(np.mean(np.square(expected_md_v))))
Loading

0 comments on commit fb61efb

Please sign in to comment.