Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CI测试不review [fluid_ops] fleet_wrapper5 #68554

Open
wants to merge 4 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/auto_parallel/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ add_subdirectory(spmd_rules)
cc_library(
op_dist_attr
SRCS dist_attr.cc
DEPS phi common auto_parallel_proto proto_desc)
DEPS phi_core common auto_parallel_proto proto_desc)

cc_library(auto_parallel DEPS op_dist_attr dist_tensor_spec)
7 changes: 4 additions & 3 deletions paddle/fluid/distributed/ps/service/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@ set_source_files_properties(${BRPC_SRCS})

if(WITH_HETERPS)

set(BRPC_DEPS ${EXTERNAL_BRPC_DEPS} phi common zlib device_context rocksdb)
set(BRPC_DEPS ${EXTERNAL_BRPC_DEPS} phi_core common zlib device_context
rocksdb)

else()

set(BRPC_DEPS ${EXTERNAL_BRPC_DEPS} phi common zlib device_context)
set(BRPC_DEPS ${EXTERNAL_BRPC_DEPS} phi_core common zlib device_context)

endif()

Expand Down Expand Up @@ -102,7 +103,7 @@ cc_library(
simple_threadpool
simple_rpc
scope
phi
phi_core
common
ps_gpu_wrapper
fleet
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/distributed/ps/table/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ set_source_files_properties(
cc_library(
graph_node
SRCS ${graphDir}/graph_node.cc
DEPS WeightedSampler phi common)
DEPS WeightedSampler phi_core common)
set_source_files_properties(
memory_dense_table.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
set_source_files_properties(
Expand Down Expand Up @@ -46,7 +46,7 @@ cc_library(
string_helper
simple_threadpool
xxhash
phi
phi_core
common)

set_source_files_properties(
Expand Down Expand Up @@ -90,7 +90,7 @@ cc_library(
ps_framework_proto
string_helper
device_context
phi
phi_core
common
glog
framework_io
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/api/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
cc_library(
global_utils
SRCS global_utils.cc
DEPS phi common tracer)
DEPS phi_core common tracer)

if(NOT (NOT WITH_PYTHON AND ON_INFER))
cc_library(
hook_utils
SRCS hook_utils.cc
DEPS phi common autograd_meta grad_node_info utils accumulation_node)
DEPS phi_core common autograd_meta grad_node_info utils accumulation_node)
else()
cc_library(
hook_utils
SRCS hook_utils.cc
DEPS phi common autograd_meta grad_node_info utils)
DEPS phi_core common autograd_meta grad_node_info utils)
endif()
54 changes: 26 additions & 28 deletions paddle/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ proto_library(trainer_desc_proto SRCS trainer_desc.proto DEPS framework_proto
cc_library(
string_array
SRCS string_array.cc
DEPS utf8proc phi common)
DEPS utf8proc phi_core common)

cc_library(
data_type
Expand All @@ -123,27 +123,27 @@ cc_library(
cc_library(
tensor
SRCS tensor_util.cc
DEPS data_type device_context phi common)
DEPS data_type phi_core common)

cc_library(
lod_tensor
SRCS lod_tensor.cc
DEPS phi common tensor framework_proto version)
DEPS phi_core common tensor framework_proto version)

cc_library(
garbage_collector
SRCS garbage_collector.cc
DEPS device_context phi common glog)
DEPS phi_core common glog)

cc_library(
reader
SRCS reader.cc
DEPS lod_tensor phi common)
DEPS lod_tensor phi_core common)

cc_library(
var_type_traits
SRCS var_type_traits.cc
DEPS framework_proto scope phi common)
DEPS framework_proto scope phi_core common)

# every source file that includes "dnnl.h" must depends on onednn
# or, the first one should depends on onednn
Expand All @@ -169,7 +169,7 @@ endif()
cc_library(
scope
SRCS scope.cc
DEPS glog phi common xxhash var_type_traits)
DEPS glog phi_core common xxhash var_type_traits)
cc_library(
device_worker
SRCS device_worker.cc
Expand Down Expand Up @@ -220,7 +220,7 @@ endif()
cc_library(
data_layout_transform
SRCS data_layout_transform.cc
DEPS tensor phi common)
DEPS tensor phi_core common)

cc_library(
data_transform
Expand All @@ -231,13 +231,13 @@ cc_library(
data_device_transform
data_type_transform
data_layout_transform
phi
phi_core
common)

cc_library(
attribute
SRCS attribute.cc
DEPS framework_proto phi common)
DEPS framework_proto phi_core common)
cc_library(
op_version_proto
SRCS op_version_proto.cc
Expand All @@ -263,7 +263,7 @@ cc_library(
cc_library(
shape_inference
SRCS shape_inference.cc
DEPS phi common attribute selected_rows_utils)
DEPS phi_core common attribute selected_rows_utils)

# every source file that includes "dnnl.h" must depends on onednn
# or, the first one should depends on onednn
Expand All @@ -284,15 +284,15 @@ cc_library(
cc_library(
op_kernel_type
SRCS op_kernel_type.cc
DEPS device_context phi common)
DEPS phi_core common)

if(WITH_XPU)
cc_library(
phi_utils
SRCS phi_utils.cc
DEPS lod_tensor
selected_rows_utils
phi
phi_core
common
var_type_traits
op_info
Expand All @@ -301,7 +301,7 @@ else()
cc_library(
phi_utils
SRCS phi_utils.cc
DEPS lod_tensor selected_rows_utils phi common var_type_traits op_info)
DEPS lod_tensor selected_rows_utils phi_core common var_type_traits op_info)
endif()

if(WITH_XPU)
Expand All @@ -311,7 +311,6 @@ if(WITH_XPU)
DEPS xpu_op_list
op_info
proto_desc
device_context
tensor
scope
glog
Expand All @@ -327,7 +326,7 @@ if(WITH_XPU)
detail_op_handle
phi_utils
infershape_utils
phi
phi_core
common
op_compat_infos
type_info)
Expand All @@ -337,7 +336,6 @@ else()
SRCS operator.cc
DEPS op_info
proto_desc
device_context
tensor
scope
glog
Expand All @@ -353,7 +351,7 @@ else()
detail_op_handle
phi_utils
infershape_utils
phi
phi_core
common
op_compat_infos
type_info)
Expand All @@ -372,7 +370,7 @@ add_dependencies(
glog
version
xxhash
phi
phi_core
common)

cc_library(
Expand All @@ -387,7 +385,7 @@ cc_library(
version
xxhash
op_dist_attr
phi
phi_core
common
op_version_proto
op_version_registry)
Expand All @@ -400,7 +398,7 @@ cc_library(
cc_library(
op_call_stack
SRCS op_call_stack.cc
DEPS op_proto_maker phi common)
DEPS op_proto_maker phi_core common)

cc_library(
program_utils
Expand Down Expand Up @@ -507,7 +505,7 @@ cc_library(
cc_library(
variable_helper
SRCS variable_helper.cc
DEPS lod_tensor)
DEPS lod_tensor type_info)

set(NAIVE_EXECUTOR_DEPS
op_registry
Expand Down Expand Up @@ -672,7 +670,7 @@ if(WITH_DISTRIBUTE)
fleet
heter_server
${${EXTERNAL_BRPC_DEPS}}
phi
phi_core
common)
set(DISTRIBUTE_COMPILE_FLAGS "")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
Expand Down Expand Up @@ -870,7 +868,7 @@ cc_library(
cc_library(
selected_rows_utils
SRCS selected_rows_utils.cc
DEPS phi common device_context)
DEPS phi_core common device_context)

cc_library(
dlpack_tensor
Expand All @@ -889,7 +887,7 @@ cc_library(
selected_rows_utils
attribute
var_type_traits
phi
phi_core
common
phi_utils
op_info
Expand Down Expand Up @@ -923,7 +921,7 @@ cc_library(
op_registry
operator
string_helper
phi
phi_core
common
imperative_flag
layer
Expand All @@ -940,13 +938,13 @@ if(WITH_ONEDNN)
endif()

set(FLUID_FRAMEWORK_MODULES
proto_desc
lod_tensor
executor
data_feed_proto
layer
phi
common
custom_operator)
custom_operator
phi_fleet_ops)

cc_library(paddle_framework DEPS ${FLUID_FRAMEWORK_MODULES})
3 changes: 1 addition & 2 deletions paddle/fluid/framework/details/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,10 @@ set(op_handle_srcs nan_inf_utils_detail.cc var_handle.cc op_handle_base.cc
set(op_handle_deps
pass
operator
phi
phi_core
common
framework_proto
node
device_context
op_registry
lod_tensor
selected_rows_utils
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/fleet/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ else()
cc_library(
fleet_wrapper
SRCS fleet_wrapper.cc
DEPS framework_proto variable_helper scope)
DEPS framework_proto proto_desc variable_helper scope)
endif()

if(WITH_HETERPS)
Expand Down Expand Up @@ -88,7 +88,7 @@ if(WITH_GLOO)
cc_library(
gloo_wrapper
SRCS gloo_wrapper.cc
DEPS framework_proto variable_helper scope gloo)
DEPS framework_proto variable_helper scope gloo framework_io)
cc_library(
metrics
SRCS metrics.cc
Expand All @@ -97,7 +97,7 @@ else()
cc_library(
gloo_wrapper
SRCS gloo_wrapper.cc
DEPS framework_proto variable_helper scope)
DEPS framework_proto variable_helper scope framework_io)
cc_library(
metrics
SRCS metrics.cc
Expand Down
27 changes: 26 additions & 1 deletion paddle/fluid/framework/fleet/fleet_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1246,6 +1246,32 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync(
const std::vector<std::string>& input_names,
std::vector<const phi::DenseTensor*>* inputs,
std::vector<const phi::DenseTensor*>* outputs) {
framework::Variable* var = scope.FindVar(click_name);
PushSparseFromTensorWithLabelAsyncByVar(var,
table_id,
fea_dim,
padding_id,
scale_sparse,
accessor,
click_name,
place,
input_names,
inputs,
outputs);
}

void FleetWrapper::PushSparseFromTensorWithLabelAsyncByVar(
framework::Variable* var,
const uint64_t table_id,
int fea_dim,
uint64_t padding_id,
bool scale_sparse,
const std::string& accessor,
const std::string& click_name,
phi::Place place,
const std::vector<std::string>& input_names,
std::vector<const phi::DenseTensor*>* inputs,
std::vector<const phi::DenseTensor*>* outputs) {
#ifdef PADDLE_WITH_PSLIB
int show_index = 0;
int click_index = 1;
Expand Down Expand Up @@ -1324,7 +1350,6 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync(

std::vector<float> fea_labels;
fea_labels.reserve(MAX_FEASIGN_NUM / 100);
framework::Variable* var = scope.FindVar(click_name);
size_t global_idx = 0;
if (click_name != "") {
PADDLE_ENFORCE_NOT_NULL(
Expand Down
Loading