Skip to content

Commit

Permalink
update .bashrc
Browse files Browse the repository at this point in the history
update bash script

add test files

fixed training and inference for same commit sha

remove setting

update model path
  • Loading branch information
lidanqing-intel committed May 16, 2022
1 parent d93b7e8 commit 62dd83f
Show file tree
Hide file tree
Showing 15 changed files with 152 additions and 71 deletions.
52 changes: 52 additions & 0 deletions .bashrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# .bashrc

# User specific aliases and functions

alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'

# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
export http_proxy=http://child-prc.intel.com:913
export https_proxy=http://child-prc.intel.com:913

export PADDLEPADDLE_TP_CACHE="/home/guest/tp_cache"
#export LD_LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev530+g8d8f289.d20220512-py3.8-linux-x86_64.egg/deepmd/op:$LD_LIBRARY_PATH
#export LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev530+g8d8f289.d20220512-py3.8-linux-x86_64.egg/deepmd/op:$LIBRARY_PATH
#export DEEP_MD_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev530+g8d8f289.d20220512-py3.8-linux-x86_64.egg/deepmd/op
#export LD_LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev491+g1a06aa4-py3.8-linux-x86_64.egg/deepmd/op:$LD_LIBRARY_PATH
#export LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev491+g1a06aa4-py3.8-linux-x86_64.egg/deepmd/op:$LIBRARY_PATH
#export DEEP_MD_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev491+g1a06aa4-py3.8-linux-x86_64.egg/deepmd/op
#
#1.2.3.dev530+g8d8f289.d20220512
export tensorflow_root=/home/tensorflowroot
export deepmd_root=/home/deepmdroot
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/root/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/root/anaconda3/etc/profile.d/conda.sh" ]; then
. "/root/anaconda3/etc/profile.d/conda.sh"
else
export PATH="/root/anaconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
export PATH=/home/jessie/cmake-3.21.0-linux-x86_64/bin:$PATH

export PATH=/home/lammps-stable_29Oct2020/src:$PATH
#export LD_LIBRARY_PATH=/home/paddle-deepmd/source/build/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/deepmd-kit/source/build/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/Paddle/build/paddle_inference_install_dir/paddle/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/Paddle/build/paddle_inference_install_dir/third_party/install/mkldnn/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/Paddle/build/paddle_inference_install_dir/third_party/install/mklml/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/Paddle/build/paddle/fluid/pybind/:$LD_LIBRARY_PATH
#export LD_LIBRARY_PATH=/home/paddle-deepmd/source/build:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/deepmd-kit/source/build:$LD_LIBRARY_PATH

3 changes: 1 addition & 2 deletions compile_deepmd.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
rm -rf /home/deepmdroot/ && mkdir /home/deepmdroot && deepmd_root=/home/deepmdroot
cd /home/paddle-deepmd/source && rm -rf build && mkdir build && cd build
#cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root -DPADDLE_ROOT=/home/Paddle/build/paddle_inference_install_dir -DUSE_CUDA_TOOLKIT=FALSE ..
cd /home/deepmd-kit/source && rm -rf build && mkdir build && cd build
cmake -DPADDLE_ROOT=/home/Paddle/build/paddle_inference_install_dir -DUSE_CUDA_TOOLKIT=FALSE ..
make -j 4 && make install
make lammps
Expand Down
6 changes: 4 additions & 2 deletions compile_lammps.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ cd /home
rm -rf lammps-stable_29Oct2020/
tar -xzvf stable_29Oct2020.tar.gz
cd lammps-stable_29Oct2020/src/
cp -r /home/paddle-deepmd/source/build/USER-DEEPMD .
cp -r /home/deepmd-kit/source/build/USER-DEEPMD .
#cp -r /home/paddle-deepmd/source/build/USER-DEEPMD .
make yes-kspace yes-user-deepmd
make serial -j 20
#make serial -j 20
make mpi -j 20
4 changes: 2 additions & 2 deletions examples/water/lmp/in.lammps
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# bulk water

echo screen
units metal
boundary p p p
atom_style atomic
Expand All @@ -11,7 +11,7 @@ read_data water.lmp
mass 1 16
mass 2 2

pair_style deepmd frozen_model.pb
pair_style deepmd /home/deepmd-kit/examples/water/lmp/model.pdmodel /home/deepmd-kit/examples/water/lmp/model.pdiparams
pair_coeff

velocity all create 330.0 23456789
Expand Down
Binary file added examples/water/lmp/model.pdiparams
Binary file not shown.
Binary file added examples/water/lmp/model.pdiparams.info
Binary file not shown.
Binary file added examples/water/lmp/model.pdmodel
Binary file not shown.
128 changes: 66 additions & 62 deletions source/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@ set(CMAKE_LINK_WHAT_YOU_USE TRUE)
# build cpp or python interfaces
if (NOT DEFINED BUILD_CPP_IF)
set(BUILD_CPP_IF TRUE)
endif (NOT DEFINED BUILD_CPP_IF)
message(WARNING "WARNING! -DON_INFER is set")
add_definitions("-DON_INFER")
endif()

if (NOT DEFINED BUILD_PY_IF)
set(BUILD_PY_IF FALSE)
endif (NOT DEFINED BUILD_PY_IF)
Expand Down Expand Up @@ -72,72 +75,72 @@ if (USE_CUDA_TOOLKIT)
add_definitions("-D GOOGLE_CUDA")
endif()

# find paddle
if(BUILD_PY_IF)
find_package(tensorflow REQUIRED)
else()
find_package(Fluid REQUIRED)

# find tensorflow, I need tf abi info
if (BUILD_PY_IF)
find_package(tensorflow REQUIRED)
endif (BUILD_PY_IF)
endif()

# find threads
find_package(Threads)

# auto op_cxx_abi
#if (NOT DEFINED OP_CXX_ABI)
# if (BUILD_PY_IF)
# if (DEFINED TENSORFLOW_ROOT)
# set(FIND_ABI_CMD "import sys,os; sys.path.insert(0, os.path.join('${TENSORFLOW_ROOT}', '..')); import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')" )
# else()
# set(FIND_ABI_CMD "import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')")
# endif()
# execute_process(
# COMMAND ${PYTHON_EXECUTABLE} "-c" "${FIND_ABI_CMD}"
# WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
# OUTPUT_VARIABLE PY_CXX_ABI
# RESULT_VARIABLE PY_CXX_ABI_RESULT_VAR
# ERROR_VARIABLE PY_CXX_ABI_ERROR_VAR
# )
# if (NOT ${PY_CXX_ABI_RESULT_VAR} EQUAL 0)
# message(FATAL_ERROR "Cannot determine cxx abi, error message: ${PY_CXX_ABI_ERROR_VAR}")
# endif()
# set(OP_CXX_ABI ${PY_CXX_ABI})
# endif()
# if (BUILD_CPP_IF)
# try_run(
# CPP_CXX_ABI_RUN_RESULT_VAR CPP_CXX_ABI_COMPILE_RESULT_VAR
# ${CMAKE_CURRENT_BINARY_DIR}/tf_cxx_abi
# "${CMAKE_CURRENT_SOURCE_DIR}/cmake/tf_cxx_abi.cpp"
# LINK_LIBRARIES ${TensorFlowFramework_LIBRARY}
# CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${TensorFlow_INCLUDE_DIRS}"
# RUN_OUTPUT_VARIABLE CPP_CXX_ABI
# COMPILE_OUTPUT_VARIABLE CPP_CXX_ABI_COMPILE_OUTPUT_VAR
# )
# if (NOT ${CPP_CXX_ABI_COMPILE_RESULT_VAR})
# message(FATAL_ERROR "Failed to compile: \n ${CPP_CXX_ABI_COMPILE_OUTPUT_VAR}" )
# endif()
# if (NOT ${CPP_CXX_ABI_RUN_RESULT_VAR} EQUAL "0")
# message(FATAL_ERROR "Failed to run, return code: ${CPP_CXX_ABI}" )
# endif()
# if (DEFINED PY_CXX_ABI)
# if (NOT (${CPP_CXX_ABI} EQUAL ${PY_CXX_ABI}))
# message (WARNNING "NOT consistent CXX_ABIs: python interface of tf uses ${PY_CXX_ABI}, while c++ interface of tf uses ${CPP_CXX_ABI}, we follow c++ interface ")
# endif()
# endif()
# set(OP_CXX_ABI ${CPP_CXX_ABI})
# endif()
# message (STATUS "Automatically determined OP_CXX_ABI=${OP_CXX_ABI} ")
#else()
# message (STATUS "User set OP_CXX_ABI=${OP_CXX_ABI} ")
#endif()
# message (STATUS "No set OP_CXX_ABI=${OP_CXX_ABI} ")
# # message the cxx_abi used during compiling
# if (${OP_CXX_ABI} EQUAL 0)
# message (STATUS "Set GLIBCXX_USE_CXX_ABI=0 when compiling ops")
# else ()
# set (OP_CXX_ABI 1)
# message (STATUS "Set GLIBCXX_USE_CXX_ABI=1 when compiling ops")
# endif ()
if (BUILD_PY_IF)
if (NOT DEFINED OP_CXX_ABI)
if (BUILD_PY_IF)
if (DEFINED TENSORFLOW_ROOT)
set(FIND_ABI_CMD "import sys,os; sys.path.insert(0, os.path.join('${TENSORFLOW_ROOT}', '..')); import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')" )
else()
set(FIND_ABI_CMD "import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')")
endif()
execute_process(
COMMAND ${PYTHON_EXECUTABLE} "-c" "${FIND_ABI_CMD}"
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE PY_CXX_ABI
RESULT_VARIABLE PY_CXX_ABI_RESULT_VAR
ERROR_VARIABLE PY_CXX_ABI_ERROR_VAR
)
if (NOT ${PY_CXX_ABI_RESULT_VAR} EQUAL 0)
message(FATAL_ERROR "Cannot determine cxx abi, error message: ${PY_CXX_ABI_ERROR_VAR}")
endif()
set(OP_CXX_ABI ${PY_CXX_ABI})
endif()
if (BUILD_CPP_IF)
try_run(
CPP_CXX_ABI_RUN_RESULT_VAR CPP_CXX_ABI_COMPILE_RESULT_VAR
${CMAKE_CURRENT_BINARY_DIR}/tf_cxx_abi
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/tf_cxx_abi.cpp"
LINK_LIBRARIES ${TensorFlowFramework_LIBRARY}
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${TensorFlow_INCLUDE_DIRS}"
RUN_OUTPUT_VARIABLE CPP_CXX_ABI
COMPILE_OUTPUT_VARIABLE CPP_CXX_ABI_COMPILE_OUTPUT_VAR
)
if (NOT ${CPP_CXX_ABI_COMPILE_RESULT_VAR})
message(FATAL_ERROR "Failed to compile: \n ${CPP_CXX_ABI_COMPILE_OUTPUT_VAR}" )
endif()
if (NOT ${CPP_CXX_ABI_RUN_RESULT_VAR} EQUAL "0")
message(FATAL_ERROR "Failed to run, return code: ${CPP_CXX_ABI}" )
endif()
if (DEFINED PY_CXX_ABI)
if (NOT (${CPP_CXX_ABI} EQUAL ${PY_CXX_ABI}))
message (WARNNING "NOT consistent CXX_ABIs: python interface of tf uses ${PY_CXX_ABI}, while c++ interface of tf uses ${CPP_CXX_ABI}, we follow c++ interface ")
endif()
endif()
set(OP_CXX_ABI ${CPP_CXX_ABI})
endif()
message (STATUS "Automatically determined OP_CXX_ABI=${OP_CXX_ABI} ")
else()
message (STATUS "User set OP_CXX_ABI=${OP_CXX_ABI} ")
endif()
message (STATUS "No set OP_CXX_ABI=${OP_CXX_ABI} ")
# message the cxx_abi used during compiling
if (${OP_CXX_ABI} EQUAL 0)
message (STATUS "Set GLIBCXX_USE_CXX_ABI=0 when compiling ops")
else ()
set (OP_CXX_ABI 1)
message (STATUS "Set GLIBCXX_USE_CXX_ABI=1 when compiling ops")
endif ()
endif()

# define USE_TTM
if (NOT DEFINED USE_TTM)
Expand Down Expand Up @@ -193,7 +196,7 @@ endif (BUILD_CPP_IF)

# include
include_directories(${DeePMD_INCLUDE_DIRS})
#include_directories(${TensorFlow_INCLUDE_DIRS})
include_directories(${TensorFlow_INCLUDE_DIRS})

# define names of libs
set (LIB_DEEPMD "deepmd")
Expand All @@ -216,6 +219,7 @@ endif (BUILD_CPP_IF)
# add_subdirectory (op/)
add_subdirectory (lib/)
if (BUILD_PY_IF)
add_subdirectory (op/)
add_subdirectory (config/)
# add_subdirectory (tests/)
endif (BUILD_PY_IF)
Expand Down
2 changes: 2 additions & 0 deletions source/cmake/FindFluid.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ function(third_party_library TARGET_NAME TARGET_DIRNAME)
set(PADDLE_THIRD_PARTY_LIBRARIES ${PADDLE_THIRD_PARTY_LIBRARIES} ${local_third_party_libraries} PARENT_SCOPE)
endfunction()

if(NOT BUILD_PY_IF)
set(OP_DIR "${PROJECT_SOURCE_DIR}/op/paddle_ops/srcs")
if(USE_CUDA_TOOLKIT)
file(GLOB CUSTOM_OPERATOR_FILES ${OP_DIR}/*.cc)
Expand All @@ -135,6 +136,7 @@ else()
# set(CUSTOM_OPERATOR_FILES "${OP_DIR}/pd_prod_env_mat_multi_devices_cpu.cc;${OP_DIR}/pd_prod_force_se_a_multi_devices_cpu.cc;${OP_DIR}/pd_prod_virial_se_a_multi_devices_cpu.cc;")
endif()
add_library(pd_infer_custom_op SHARED ${CUSTOM_OPERATOR_FILES})
endif()

third_party_library(mklml ${THIRD_PARTY_ROOT}/install/mklml/lib libiomp5.so libmklml_intel.so)
third_party_library(mkldnn ${THIRD_PARTY_ROOT}/install/mkldnn/lib libmkldnn.so.0)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
//#include "paddle/extension.h"
#ifdef ON_INFER
#include "paddle/include/experimental/ext_all.h"
#else
#include "paddle/extension.h"
#endif
// #include "paddle/include/experimental/ext_all.h"
#include "utilities.h"
#include "coord.h"
#include "region.h"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
#define GOOGLE_CUDA 1
#ifdef ON_INFER
#include "paddle/include/experimental/ext_all.h"
#else
#include "paddle/extension.h"
#endif
#include "utilities.h"
#include "coord.h"
#include "region.h"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
#include <assert.h>
#include "prod_force.h"
#include "prod_force_grad.h"
//#include "paddle/extension.h"
#ifdef ON_INFER
#include "paddle/include/experimental/ext_all.h"
#else
#include "paddle/extension.h"
#endif

#define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
#define CHECK_INPUT_READY(x) PD_CHECK(x.is_initialized(), #x " must be initialized before usage.")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
#define GOOGLE_CUDA 1
#include "prod_force.h"
#ifdef ON_INFER
#include "paddle/include/experimental/ext_all.h"
#else
#include "paddle/extension.h"
#endif
#include <assert.h>

#define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
#include <assert.h>
#include "prod_virial.h"
#include "prod_virial_grad.h"
//#include "paddle/extension.h"
#ifdef ON_INFER
#include "paddle/include/experimental/ext_all.h"
#else
#include "paddle/extension.h"
#endif


#define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
#define GOOGLE_CUDA 1
#include <assert.h>
#include "prod_virial.h"
#ifdef ON_INFER
#include "paddle/include/experimental/ext_all.h"
#else
#include "paddle/extension.h"
#endif

#define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.")
#define CHECK_INPUT_DIM(x, value) PD_CHECK(x.shape().size() == value, #x "'s dim should be " #value ".")
Expand Down

0 comments on commit 62dd83f

Please sign in to comment.