diff --git a/.clang-tidy b/.clang-tidy index f4b350fdea7a64..c3b23735c1bab3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -201,6 +201,7 @@ modernize-use-override, -performance-trivially-destructible, -performance-type-promotion-in-math-fn, -performance-unnecessary-copy-initialization, +cppcoreguidelines-pro-type-cstyle-cast, readability-container-size-empty, ' HeaderFilterRegex: '^(paddle/(?!cinn)).*$' diff --git a/paddle/fluid/distributed/fleet_executor/test/interceptor_ping_pong_with_brpc_test.cc b/paddle/fluid/distributed/fleet_executor/test/interceptor_ping_pong_with_brpc_test.cc index 62c23068d7d4a9..6ae3f123f242a9 100644 --- a/paddle/fluid/distributed/fleet_executor/test/interceptor_ping_pong_with_brpc_test.cc +++ b/paddle/fluid/distributed/fleet_executor/test/interceptor_ping_pong_with_brpc_test.cc @@ -81,7 +81,9 @@ TEST(InterceptorTest, PingPong) { address.sin_family = AF_INET; address.sin_addr.s_addr = INADDR_ANY; address.sin_port = htons(port0); - while (bind(server_fd, (struct sockaddr*)&address, sizeof(address)) == -1) { + while (bind(server_fd, + reinterpret_cast(&address), + sizeof(address)) == -1) { port0++; address.sin_port = htons(port0); } @@ -93,7 +95,9 @@ TEST(InterceptorTest, PingPong) { setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); port1 = port0 + 1; address.sin_port = htons(port1); - while (bind(server_fd, (struct sockaddr*)&address, sizeof(address)) == -1) { + while (bind(server_fd, + reinterpret_cast(&address), + sizeof(address)) == -1) { port1++; address.sin_port = htons(port1); } diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc index 6a12802c352a25..8925b9be379d85 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc @@ -1580,7 +1580,8 @@ std::future BrpcPsClient::PushSparse(size_t table_id, for (size_t kv_idx = 0; kv_idx < sorted_kv_size; ++kv_idx) { shard_kv_data.key_list[kv_idx] = sorted_kv_list[kv_idx].first; shard_kv_data.value_list[kv_idx].assign( - (const char *)sorted_kv_list[kv_idx].second, value_size); + reinterpret_cast(sorted_kv_list[kv_idx].second), + value_size); } shard_kv_data.kv_num = sorted_kv_size; } @@ -1790,8 +1791,8 @@ int BrpcPsClient::PushSparseAsyncShardMerge( } else if (sorted_kv_size == 1) { shard_kv_data.kv_num = 1; shard_kv_data.key_list[0] = sorted_kv_list[0].first; - shard_kv_data.value_list[0].assign((const char *)(sorted_kv_list[0].second), - value_size); + shard_kv_data.value_list[0].assign( + reinterpret_cast(sorted_kv_list[0].second), value_size); return 0; } @@ -1814,11 +1815,12 @@ int BrpcPsClient::PushSparseAsyncShardMerge( } if (last_merge_data != NULL) { shard_kv_data.value_list[merged_kv_count].assign( - (const char *)last_merge_data, value_size); + reinterpret_cast(last_merge_data), value_size); last_merge_data = NULL; } else { shard_kv_data.value_list[merged_kv_count].assign( - (const char *)sorted_kv_list[kv_idx - 1].second, value_size); + reinterpret_cast(sorted_kv_list[kv_idx - 1].second), + value_size); } shard_kv_data.key_list[merged_kv_count++] = last_key; if (kv_idx < sorted_kv_size) { @@ -1827,7 +1829,7 @@ int BrpcPsClient::PushSparseAsyncShardMerge( } if (kv_idx == sorted_kv_size - 1) { shard_kv_data.value_list[merged_kv_count].assign( - (const char *)last_value_data, value_size); + reinterpret_cast(last_value_data), value_size); shard_kv_data.key_list[merged_kv_count++] = last_key; } } @@ -1918,7 +1920,7 @@ std::future BrpcPsClient::PushDense(const Region *regions, CHECK(pos + data_num <= data_size) << "invalid dense size, cur pos[" << pos << "]" << " data_num[" << data_num << "] size[" << data_size << "]"; - const float *region_data = (const float *)(regions[i].data); + const float *region_data = reinterpret_cast(regions[i].data); memcpy(data + pos, region_data, regions[i].size); pos += data_num; } diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc index 84784c32f3b2d9..66987f766142bb 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc @@ -311,7 +311,7 @@ int32_t BrpcPsService::PullDense(Table *table, return 0; } CostTimer timer("pserver_server_pull_dense"); - uint32_t num = *(const uint32_t *)request.params(0).c_str(); + uint32_t num = *reinterpret_cast(request.params(0).c_str()); auto res_data = butil::get_object>(); res_data->resize(num * table->ValueAccesor()->GetAccessorInfo().select_size / @@ -346,12 +346,14 @@ int32_t BrpcPsService::PushDenseParam(Table *table, } push_buffer.resize(0); push_buffer.reserve(req_buffer_size); - const char *data = (const char *)cntl->request_attachment().fetch( - const_cast(push_buffer.data()), req_buffer_size); + const char *data = + reinterpret_cast(cntl->request_attachment().fetch( + const_cast(push_buffer.data()), req_buffer_size)); - uint32_t num = *(const uint32_t *)data; + uint32_t num = *reinterpret_cast(data); - const float *values = (const float *)(data + sizeof(uint32_t)); + const float *values = + reinterpret_cast(data + sizeof(uint32_t)); TableContext table_context; table_context.value_type = Dense; table_context.push_context.values = values; @@ -383,11 +385,11 @@ int32_t BrpcPsService::PushDense(Table *table, |--num--|---valuesData---| |--4B---|----------------| */ - uint32_t num = *(const uint32_t *)(request.data().data()); + uint32_t num = *reinterpret_cast(request.data().data()); TableContext table_context; table_context.value_type = Dense; table_context.push_context.values = - (const float *)(request.data().data() + sizeof(uint32_t)); + reinterpret_cast(request.data().data() + sizeof(uint32_t)); table_context.num = num; // const float *values = (const float *)(request.data().data() + // sizeof(uint32_t)); @@ -446,9 +448,9 @@ int32_t BrpcPsService::PushSparseParam(Table *table, |---keysData---|---valuesData---| |---8*{num}B---|----------------| */ - const uint64_t *keys = (const uint64_t *)push_data.data(); - const float *values = - (const float *)(push_data.data() + sizeof(uint64_t) * num); + const uint64_t *keys = reinterpret_cast(push_data.data()); + const float *values = reinterpret_cast(push_data.data() + + sizeof(uint64_t) * num); TableContext table_context; table_context.value_type = Sparse; @@ -578,9 +580,10 @@ int32_t BrpcPsService::PushSparse(Table *table, */ TableContext table_context; table_context.value_type = Sparse; - table_context.push_context.keys = (const uint64_t *)push_data.data(); - table_context.push_context.values = - (const float *)(push_data.data() + sizeof(uint64_t) * num); + table_context.push_context.keys = + reinterpret_cast(push_data.data()); + table_context.push_context.values = reinterpret_cast( + push_data.data() + sizeof(uint64_t) * num); table_context.num = num; // const uint64_t *keys = (const uint64_t *)push_data.data(); // const float *values = (const float *)(push_data.data() + sizeof(uint64_t) * @@ -879,8 +882,8 @@ int32_t BrpcPsService::PushGlobalStep(Table *table, set_response_code(response, 0, "run_program data is empty"); return 0; } - const int64_t *values = - (const int64_t *)(request.data().data() + sizeof(uint32_t)); + const int64_t *values = reinterpret_cast( + request.data().data() + sizeof(uint32_t)); auto trainer_id = request.client_id(); TableContext context; diff --git a/paddle/fluid/distributed/ps/service/brpc_utils.cc b/paddle/fluid/distributed/ps/service/brpc_utils.cc index 715d1bbf954f07..9f2a14b7ff835e 100644 --- a/paddle/fluid/distributed/ps/service/brpc_utils.cc +++ b/paddle/fluid/distributed/ps/service/brpc_utils.cc @@ -333,7 +333,7 @@ std::string GetIntTypeEndpoint(const std::string& ip, const uint32_t& port) { char* int_ip = NULL; while (hp->h_addr_list[i] != NULL) { - int_ip = inet_ntoa(*(struct in_addr*)hp->h_addr_list[i]); + int_ip = inet_ntoa(*reinterpret_cast(hp->h_addr_list[i])); VLOG(3) << "Brpc Get host by name, host:" << ip << " -> ip: " << int_ip; break; } diff --git a/paddle/fluid/distributed/ps/service/heter_client.cc b/paddle/fluid/distributed/ps/service/heter_client.cc index 2ca9fef5c08765..b62800e46bbdb4 100644 --- a/paddle/fluid/distributed/ps/service/heter_client.cc +++ b/paddle/fluid/distributed/ps/service/heter_client.cc @@ -185,7 +185,9 @@ std::future HeterClient::SendCmd( new paddle::distributed::DownpourBrpcClosure( request_call_num, [request_call_num, cmd_id](void* done) { int ret = 0; - auto* closure = (paddle::distributed::DownpourBrpcClosure*)done; + auto* closure = + reinterpret_cast( + done); for (size_t i = 0; i < request_call_num; ++i) { if (closure->check_response(i, cmd_id) != 0) { ret = -1; diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index 7fe53febc5a9b8..f2eba687880dbc 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -155,7 +155,7 @@ def FindParsingFunctionFromAttributeType(atype): FUNCTION_NAME_TEMPLATE = "{}{}{}" -PYTHON_C_FUNCTION_REG_TEMPLATE = " {{\"{}{}\", (PyCFunction)(void(*)(void)) {}eager_api_{}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {} in dygraph.\"}},\n" +PYTHON_C_FUNCTION_REG_TEMPLATE = " {{\"{}{}\", reinterpret_cast(reinterpret_cast({}eager_api_{})), METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {} in dygraph.\"}},\n" PYTHON_C_WRAPPER_TEMPLATE = """ @@ -243,9 +243,9 @@ def FindParsingFunctionFromAttributeType(atype): CORE_OPS_INFO_REGISTRY = """ - {\"get_core_ops_args_info\", (PyCFunction)(void(*)(void))eager_get_core_ops_args_info, METH_NOARGS, \"C++ interface function for eager_get_core_ops_args_info.\"}, - {\"get_core_ops_args_type_info\", (PyCFunction)(void(*)(void))eager_get_core_ops_args_type_info, METH_NOARGS, \"C++ interface function for eager_get_core_ops_args_type_info.\"}, - {\"get_core_ops_returns_info\", (PyCFunction)(void(*)(void))eager_get_core_ops_returns_info, METH_NOARGS, \"C++ interface function for eager_get_core_ops_returns_info.\"}, + {\"get_core_ops_args_info\", reinterpret_cast(reinterpret_cast(eager_get_core_ops_args_info)), METH_NOARGS, \"C++ interface function for eager_get_core_ops_args_info.\"}, + {\"get_core_ops_args_type_info\", reinterpret_cast(reinterpret_cast(eager_get_core_ops_args_type_info)), METH_NOARGS, \"C++ interface function for eager_get_core_ops_args_type_info.\"}, + {\"get_core_ops_returns_info\", reinterpret_cast(reinterpret_cast(eager_get_core_ops_returns_info)), METH_NOARGS, \"C++ interface function for eager_get_core_ops_returns_info.\"}, """ NAMESPACE_WRAPPER_TEMPLATE = """namespace {} {{ diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index b102f3714eeabe..1147a1cb9fc4fa 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -28,6 +28,7 @@ limitations under the License. */ #include "io/fs.h" #include "paddle/fluid/platform/monitor.h" #include "paddle/fluid/platform/timer.h" +#include "paddle/utils/mman_patch.h" USE_INT_STAT(STAT_total_feasign_num_in_mem); PHI_DECLARE_bool(enable_ins_parser_file); diff --git a/paddle/fluid/framework/fleet/gloo_wrapper.cc b/paddle/fluid/framework/fleet/gloo_wrapper.cc index e6385a38252755..57517a98ef6c70 100644 --- a/paddle/fluid/framework/fleet/gloo_wrapper.cc +++ b/paddle/fluid/framework/fleet/gloo_wrapper.cc @@ -281,7 +281,8 @@ void ParallelConnectContext::connectFullMesh( } Impl impl_; memcpy(&impl_, addr.data(), sizeof(impl_)); - struct sockaddr_in* sa = (struct sockaddr_in*)&(impl_.ss); + struct sockaddr_in* sa = + reinterpret_cast(&(impl_.ss)); std::string ip = getCharIpAddr(sa->sin_addr.s_addr); VLOG(0) << "peer " << i << " ip addr: " << ip << ", port: " << sa->sin_port; diff --git a/paddle/fluid/inference/api/mkldnn_quantizer.cc b/paddle/fluid/inference/api/mkldnn_quantizer.cc index 134c0e2abb4249..26a9fa0360b831 100644 --- a/paddle/fluid/inference/api/mkldnn_quantizer.cc +++ b/paddle/fluid/inference/api/mkldnn_quantizer.cc @@ -584,7 +584,7 @@ AnalysisPredictor::MkldnnQuantizer::Histogram( void AnalysisPredictor::MkldnnQuantizer::ClearDeviceContext() const { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); phi::OneDNNContext* dev_ctx = - (phi::OneDNNContext*)pool.Get(predictor_.place_); + dynamic_cast(pool.Get(predictor_.place_)); dev_ctx->ResetBlobMap(phi::OneDNNContext::tls().get_curr_exec()); } diff --git a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc index 176738ce6295e0..8049572475f54f 100644 --- a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc +++ b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc @@ -148,7 +148,7 @@ const void* TRTInt8Calibrator::readCalibrationCache(size_t& length) void TRTInt8Calibrator::writeCalibrationCache(const void* ptr, std::size_t length) TRT_NOEXCEPT { - calibration_table_ = std::string((const char*)ptr, length); + calibration_table_ = std::string(reinterpret_cast(ptr), length); VLOG(4) << "Got calibration data for " << engine_name_ << " " << ptr << " length=" << length; } diff --git a/paddle/fluid/memory/allocation/mmap_allocator.cc b/paddle/fluid/memory/allocation/mmap_allocator.cc index ceda4ee578cee8..273ae1b7d4bb53 100644 --- a/paddle/fluid/memory/allocation/mmap_allocator.cc +++ b/paddle/fluid/memory/allocation/mmap_allocator.cc @@ -27,6 +27,7 @@ #include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/phi/core/flags.h" +#include "paddle/utils/mman_patch.h" PHI_DECLARE_bool(use_shm_cache); diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index a0ad7e3939a026..77382464d6ca8c 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -51,10 +51,9 @@ BufferedReader::BufferedReader( #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) if (platform::is_gpu_place(place_) && !pin_memory) { int dev_idx = place_.device; - compute_stream_ = - ((phi::GPUContext *)(platform::DeviceContextPool::Instance().Get( - place_))) - ->stream(); + compute_stream_ = (dynamic_cast( + platform::DeviceContextPool::Instance().Get(place_))) + ->stream(); events_.resize(buffer_size); for (auto &event : events_) { event = platform::CudaEventResourcePool::Instance().New(dev_idx); @@ -80,8 +79,8 @@ BufferedReader::BufferedReader( #ifdef PADDLE_WITH_CUSTOM_DEVICE if (platform::is_custom_place(place_)) { - auto stream = ((platform::CustomDeviceContext - *)(platform::DeviceContextPool::Instance().Get(place_))) + auto stream = (dynamic_cast( + platform::DeviceContextPool::Instance().Get(place_))) ->stream(); custom_device_compute_stream_ = std::make_shared(place_, stream); diff --git a/paddle/fluid/platform/gen_comm_id_helper.cc b/paddle/fluid/platform/gen_comm_id_helper.cc index 365c44fc9abebe..1fab2e63ca891e 100644 --- a/paddle/fluid/platform/gen_comm_id_helper.cc +++ b/paddle/fluid/platform/gen_comm_id_helper.cc @@ -195,10 +195,11 @@ int CreateListenSocket(const std::string& ep) { int total_time = 0; while (true) { int ret_val = -1; - RETRY_SYS_CALL_VAL( - bind(server_fd, (struct sockaddr*)&address, sizeof(address)), - "bind", - ret_val); + RETRY_SYS_CALL_VAL(bind(server_fd, + reinterpret_cast(&address), + sizeof(address)), + "bind", + ret_val); if (ret_val == -1) { BindOrConnectFailed(timeout, &try_times, &total_time, "bind", ep); @@ -277,7 +278,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { int i = 0; while (hp->h_addr_list[i] != NULL) { - ip = inet_ntoa(*(struct in_addr*)hp->h_addr_list[i]); + ip = inet_ntoa(*reinterpret_cast(hp->h_addr_list[i])); VLOG(3) << "gethostbyname host:" << host << " ->ip: " << ip; break; } @@ -301,10 +302,11 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { CHECK_SYS_CALL_VAL(socket(AF_INET, SOCK_STREAM, 0), "socket", sock); while (true) { int ret_val = -1; - RETRY_SYS_CALL_VAL( - connect(sock, (struct sockaddr*)&server_addr, sizeof(server_addr)), - "connect", - ret_val); + RETRY_SYS_CALL_VAL(connect(sock, + reinterpret_cast(&server_addr), + sizeof(server_addr)), + "connect", + ret_val); if (ret_val == -1) { BindOrConnectFailed(timeout, &try_times, &total_time, "connect", ep); diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 316a19728c19b8..f93d25b184080a 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -1328,7 +1328,7 @@ void BindEager(pybind11::module* module) { auto type = &heap_type->ht_type; type->tp_name = "Tensor"; type->tp_basicsize = sizeof(TensorObject); - type->tp_dealloc = (destructor)TensorDealloc; + type->tp_dealloc = reinterpret_cast(TensorDealloc); type->tp_as_number = &number_methods; type->tp_as_sequence = &sequence_methods; type->tp_as_mapping = &mapping_methods; @@ -1337,7 +1337,7 @@ void BindEager(pybind11::module* module) { type->tp_init = TensorInit; type->tp_new = TensorNew; type->tp_weaklistoffset = offsetof(TensorObject, weakrefs); - Py_INCREF(&PyBaseObject_Type); + Py_INCREF(&PyBaseObject_Type); // NOLINT type->tp_base = reinterpret_cast(&PyBaseObject_Type); type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; @@ -1352,10 +1352,10 @@ void BindEager(pybind11::module* module) { return; } - Py_INCREF(type); + Py_INCREF(type); // NOLINT if (PyModule_AddObject(m.ptr(), "Tensor", reinterpret_cast(type)) < 0) { - Py_DECREF(type); + Py_DECREF(type); // NOLINT Py_DECREF(m.ptr()); PADDLE_THROW(platform::errors::Fatal( "Init Paddle error in BindEager(PyModule_AddObject).")); @@ -1377,7 +1377,7 @@ void BindEagerStringTensor(pybind11::module* module) { auto type = &heap_type->ht_type; type->tp_name = "StringTensor"; type->tp_basicsize = sizeof(TensorObject); - type->tp_dealloc = (destructor)TensorDealloc; + type->tp_dealloc = reinterpret_cast(TensorDealloc); type->tp_as_number = &number_methods; type->tp_as_sequence = &sequence_methods; type->tp_as_mapping = &mapping_methods; @@ -1385,7 +1385,7 @@ void BindEagerStringTensor(pybind11::module* module) { type->tp_getset = string_tensor_variable_properties; type->tp_init = StringTensorInit; type->tp_new = TensorNew; - Py_INCREF(&PyBaseObject_Type); + Py_INCREF(&PyBaseObject_Type); // NOLINT type->tp_base = reinterpret_cast(&PyBaseObject_Type); type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; @@ -1400,10 +1400,10 @@ void BindEagerStringTensor(pybind11::module* module) { return; } - Py_INCREF(type); + Py_INCREF(type); // NOLINT if (PyModule_AddObject( m.ptr(), "StringTensor", reinterpret_cast(type)) < 0) { - Py_DECREF(type); + Py_DECREF(type); // NOLINT Py_DECREF(m.ptr()); PADDLE_THROW(platform::errors::Fatal( "Init Paddle error in BindEagerStringTensor(PyModule_AddObject).")); diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 59a94a31c448d1..503418b59c14e3 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -1276,88 +1276,107 @@ static PyObject* eager_api_set_master_grads(PyObject* self, PyMethodDef variable_functions[] = { // TODO(jiabin): Remove scale when we have final state tests {"scale", - (PyCFunction)(void (*)(void))eager_api_scale, + reinterpret_cast( + reinterpret_cast(eager_api_scale)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_add_backward_final_hook", - (PyCFunction)(void (*)(void))eager_api__add_backward_final_hook, + reinterpret_cast( + reinterpret_cast(eager_api__add_backward_final_hook)), METH_VARARGS | METH_KEYWORDS, NULL}, {"run_backward", - (PyCFunction)(void (*)(void))eager_api_run_backward, + reinterpret_cast( + reinterpret_cast(eager_api_run_backward)), METH_VARARGS | METH_KEYWORDS, NULL}, {"run_partial_grad", - (PyCFunction)(void (*)(void))eager_api_run_partial_grad, + reinterpret_cast( + reinterpret_cast(eager_api_run_partial_grad)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_get_custom_operator_inplace_map", - (PyCFunction)(void (*)( - void))eager_api__get_custom_operator_inplace_reverse_idx, + reinterpret_cast(reinterpret_cast( + eager_api__get_custom_operator_inplace_reverse_idx)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_run_custom_op", - (PyCFunction)(void (*)(void))eager_api_run_custom_op, + reinterpret_cast( + reinterpret_cast(eager_api_run_custom_op)), METH_VARARGS | METH_KEYWORDS, NULL}, {"tensor_copy", - (PyCFunction)(void (*)(void))eager_api_tensor_copy, + reinterpret_cast( + reinterpret_cast(eager_api_tensor_copy)), METH_VARARGS | METH_KEYWORDS, NULL}, {"get_all_grads", - (PyCFunction)(void (*)(void))eager_api_get_all_grads, + reinterpret_cast( + reinterpret_cast(eager_api_get_all_grads)), METH_VARARGS | METH_KEYWORDS, NULL}, {"get_grads_lists", - (PyCFunction)(void (*)(void))eager_api_get_grads_lists, + reinterpret_cast( + reinterpret_cast(eager_api_get_grads_lists)), METH_VARARGS | METH_KEYWORDS, NULL}, {"get_grads_types", - (PyCFunction)(void (*)(void))eager_api_get_grads_types, + reinterpret_cast( + reinterpret_cast(eager_api_get_grads_types)), METH_VARARGS | METH_KEYWORDS, NULL}, {"read_next_tensor_list", - (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list, + reinterpret_cast( + reinterpret_cast(eager_api_read_next_tensor_list)), METH_VARARGS | METH_KEYWORDS, NULL}, {"jit_function_call", - (PyCFunction)(void (*)(void))eager_api_jit_function_call, + reinterpret_cast( + reinterpret_cast(eager_api_jit_function_call)), METH_VARARGS | METH_KEYWORDS, NULL}, /**sparse functions**/ {"sparse_coo_tensor", - (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor, + reinterpret_cast( + reinterpret_cast(eager_api_sparse_coo_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, {"sparse_csr_tensor", - (PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor, + reinterpret_cast( + reinterpret_cast(eager_api_sparse_csr_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, {"register_saved_tensors_hooks", - (PyCFunction)(void (*)(void))eager_api_register_saved_tensors_hooks, + reinterpret_cast(reinterpret_cast( + eager_api_register_saved_tensors_hooks)), METH_VARARGS | METH_KEYWORDS, NULL}, {"reset_saved_tensors_hooks", - (PyCFunction)(void (*)(void))eager_api_reset_saved_tensors_hooks, + reinterpret_cast( + reinterpret_cast(eager_api_reset_saved_tensors_hooks)), METH_VARARGS | METH_KEYWORDS, NULL}, /**amp functions**/ {"set_master_grads", - (PyCFunction)(void (*)(void))eager_api_set_master_grads, + reinterpret_cast( + reinterpret_cast(eager_api_set_master_grads)), METH_VARARGS | METH_KEYWORDS, NULL}, /**sparse functions**/ #if defined(PADDLE_WITH_CUDA) {"async_read", - (PyCFunction)(void (*)(void))eager_api_async_read, + reinterpret_cast( + reinterpret_cast(eager_api_async_read)), METH_VARARGS | METH_KEYWORDS, NULL}, {"async_write", - (PyCFunction)(void (*)(void))eager_api_async_write, + reinterpret_cast( + reinterpret_cast(eager_api_async_write)), METH_VARARGS | METH_KEYWORDS, NULL}, {"to_uva_tensor", - (PyCFunction)(void (*)(void))eager_api_to_uva_tensor, + reinterpret_cast( + reinterpret_cast(eager_api_to_uva_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, #endif diff --git a/paddle/fluid/pybind/eager_legacy_op_function_generator.cc b/paddle/fluid/pybind/eager_legacy_op_function_generator.cc index ece79bd33b7190..f7193647ecdcb7 100644 --- a/paddle/fluid/pybind/eager_legacy_op_function_generator.cc +++ b/paddle/fluid/pybind/eager_legacy_op_function_generator.cc @@ -129,7 +129,7 @@ static PyObject * %s(PyObject *self, PyObject *args, PyObject *kwargs) } })"; -const char* PYBIND_ITEM_TEMPLATE = R"( {"%s", (PyCFunction)(void(*)(void))%s, METH_VARARGS | METH_KEYWORDS, "C++ interface function for %s in dygraph."},)"; +const char* PYBIND_ITEM_TEMPLATE = R"( {"%s", reinterpret_cast(reinterpret_cast(%s)), METH_VARARGS | METH_KEYWORDS, "C++ interface function for %s in dygraph."},)"; // These operators will skip automatical code generation and // need to be handwritten in CUSTOM_HANDWRITE_OP_FUNC_FILE @@ -512,14 +512,17 @@ int main(int argc, char* argv[]) { auto core_ops_infos = GenerateCoreOpsInfoMap(); std::string core_ops_infos_registry = " {\"get_core_ops_args_info\", " - "(PyCFunction)(void(*)(void))eager_get_core_ops_args_info, METH_NOARGS, " + "reinterpret_cast(reinterpret_cast(eager_get_core_ops_args_info)), METH_NOARGS, " "\"C++ interface function for eager_get_core_ops_args_info.\"},\n" " {\"get_core_ops_args_type_info\", " - "(PyCFunction)(void(*)(void))eager_get_core_ops_args_type_info, " + "reinterpret_cast(reinterpret_cast(eager_get_core_ops_args_type_info)), " "METH_NOARGS, " "\"C++ interface function for eager_get_core_ops_args_type_info.\"},\n" " {\"get_core_ops_returns_info\", " - "(PyCFunction)(void(*)(void))eager_get_core_ops_returns_info, " + "reinterpret_cast(reinterpret_cast(eager_get_core_ops_returns_info)), " "METH_NOARGS, \"C++ interface function for " "eager_get_core_ops_returns_info.\"},\n"; diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index 69d0465bf7cddf..d6d5b1863587e2 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -1835,87 +1835,108 @@ static PyObject* tensor__eq__method(TensorObject* self, PyMethodDef math_op_patch_methods[] = { {"__add__", - (PyCFunction)(void (*)(void))tensor__add__method, + reinterpret_cast( + reinterpret_cast(tensor__add__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__radd__", - (PyCFunction)(void (*)(void))tensor__add__method, + reinterpret_cast( + reinterpret_cast(tensor__add__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__sub__", - (PyCFunction)(void (*)(void))tensor__sub__method, + reinterpret_cast( + reinterpret_cast(tensor__sub__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__rsub__", - (PyCFunction)(void (*)(void))tensor__rsub__method, + reinterpret_cast( + reinterpret_cast(tensor__rsub__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__mul__", - (PyCFunction)(void (*)(void))tensor__mul__method, + reinterpret_cast( + reinterpret_cast(tensor__mul__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__rmul__", - (PyCFunction)(void (*)(void))tensor__mul__method, + reinterpret_cast( + reinterpret_cast(tensor__mul__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__div__", - (PyCFunction)(void (*)(void))tensor__div__method, + reinterpret_cast( + reinterpret_cast(tensor__div__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__truediv__", - (PyCFunction)(void (*)(void))tensor__div__method, + reinterpret_cast( + reinterpret_cast(tensor__div__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__rdiv__", - (PyCFunction)(void (*)(void))tensor__rdiv__method, + reinterpret_cast( + reinterpret_cast(tensor__rdiv__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__rtruediv__", - (PyCFunction)(void (*)(void))tensor__rdiv__method, + reinterpret_cast( + reinterpret_cast(tensor__rdiv__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__floordiv__", - (PyCFunction)(void (*)(void))tensor__floordiv__method, + reinterpret_cast( + reinterpret_cast(tensor__floordiv__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__pow__", - (PyCFunction)(void (*)(void))tensor__pow__method, + reinterpret_cast( + reinterpret_cast(tensor__pow__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__rpow__", - (PyCFunction)(void (*)(void))tensor__rpow__method, + reinterpret_cast( + reinterpret_cast(tensor__rpow__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__mod__", - (PyCFunction)(void (*)(void))tensor__mod__method, + reinterpret_cast( + reinterpret_cast(tensor__mod__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__matmul__", - (PyCFunction)(void (*)(void))tensor__matmul__method, + reinterpret_cast( + reinterpret_cast(tensor__matmul__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__gt__", - (PyCFunction)(void (*)(void))tensor__gt__method, + reinterpret_cast( + reinterpret_cast(tensor__gt__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__ge__", - (PyCFunction)(void (*)(void))tensor__ge__method, + reinterpret_cast( + reinterpret_cast(tensor__ge__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__lt__", - (PyCFunction)(void (*)(void))tensor__lt__method, + reinterpret_cast( + reinterpret_cast(tensor__lt__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__le__", - (PyCFunction)(void (*)(void))tensor__le__method, + reinterpret_cast( + reinterpret_cast(tensor__le__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__eq__", - (PyCFunction)(void (*)(void))tensor__eq__method, + reinterpret_cast( + reinterpret_cast(tensor__eq__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__ne__", - (PyCFunction)(void (*)(void))tensor__ne__method, + reinterpret_cast( + reinterpret_cast(tensor__ne__method)), METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 6a3f7e09c202a8..0767e3a68c8c3c 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -2034,240 +2034,297 @@ static PyObject* tensor_method__is_string_tensor_hold_allocation( PyMethodDef variable_methods[] = { {"numpy", - (PyCFunction)(void (*)(void))tensor_method_numpy, + reinterpret_cast( + reinterpret_cast(tensor_method_numpy)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_initialized", - (PyCFunction)(void (*)(void))tensor_method__is_initialized, + reinterpret_cast( + reinterpret_cast(tensor_method__is_initialized)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_dense_tensor_hold_allocation", - (PyCFunction)(void (*)( - void))tensor_method__is_dense_tensor_hold_allocation, + reinterpret_cast(reinterpret_cast( + tensor_method__is_dense_tensor_hold_allocation)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_copy_to", - (PyCFunction)(void (*)(void))tensor_method__copy_to, + reinterpret_cast( + reinterpret_cast(tensor_method__copy_to)), METH_VARARGS | METH_KEYWORDS, NULL}, {"copy_", - (PyCFunction)(void (*)(void))tensor_method_copy_, + reinterpret_cast( + reinterpret_cast(tensor_method_copy_)), METH_VARARGS | METH_KEYWORDS, NULL}, {"clone", - (PyCFunction)(void (*)(void))tensor_method_clone, + reinterpret_cast( + reinterpret_cast(tensor_method_clone)), METH_VARARGS | METH_KEYWORDS, NULL}, {"reconstruct_from_", - (PyCFunction)(void (*)(void))tensor_method_reconstruct_from_, + reinterpret_cast( + reinterpret_cast(tensor_method_reconstruct_from_)), METH_VARARGS | METH_KEYWORDS, NULL}, {"retain_grads", - (PyCFunction)(void (*)(void))tensor_retain_grads, + reinterpret_cast( + reinterpret_cast(tensor_retain_grads)), METH_VARARGS | METH_KEYWORDS, NULL}, {"clear_gradient", - (PyCFunction)(void (*)(void))tensor_clear_gradient, + reinterpret_cast( + reinterpret_cast(tensor_clear_gradient)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_dense", - (PyCFunction)(void (*)(void))tensor_method_is_dense, + reinterpret_cast( + reinterpret_cast(tensor_method_is_dense)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_dist", - (PyCFunction)(void (*)(void))tensor_method_is_dist, + reinterpret_cast( + reinterpret_cast(tensor_method_is_dist)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_zero_grads", - (PyCFunction)(void (*)(void))tensor__zero_grads, + reinterpret_cast( + reinterpret_cast(tensor__zero_grads)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_share_buffer_to", - (PyCFunction)(void (*)(void))tensor__share_buffer_to, + reinterpret_cast( + reinterpret_cast(tensor__share_buffer_to)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_shared_buffer_with", - (PyCFunction)(void (*)(void))tensor__is_shared_buffer_with, + reinterpret_cast( + reinterpret_cast(tensor__is_shared_buffer_with)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_share_underline_tensor_to", - (PyCFunction)(void (*)(void))tensor__share_underline_tensor_to, + reinterpret_cast( + reinterpret_cast(tensor__share_underline_tensor_to)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_shared_underline_tensor_with", - (PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with, + reinterpret_cast(reinterpret_cast( + tensor__is_shared_underline_tensor_with)), METH_VARARGS | METH_KEYWORDS, NULL}, {"detach", - (PyCFunction)(void (*)(void))tensor_method_detach, + reinterpret_cast( + reinterpret_cast(tensor_method_detach)), METH_VARARGS | METH_KEYWORDS, NULL}, {"get_tensor", - (PyCFunction)(void (*)(void))tensor_method_get_underline_tensor, + reinterpret_cast( + reinterpret_cast(tensor_method_get_underline_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, {"get_selected_rows", - (PyCFunction)(void (*)(void))tensor_method_get_underline_selected_rows, + reinterpret_cast(reinterpret_cast( + tensor_method_get_underline_selected_rows)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_get_tensor_from_selected_rows", - (PyCFunction)(void (*)(void))tensor_method__get_tensor_from_selected_rows, + reinterpret_cast(reinterpret_cast( + tensor_method__get_tensor_from_selected_rows)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_getitem_index_not_tensor", - (PyCFunction)(void (*)(void))tensor__getitem_index_not_tensor, + reinterpret_cast( + reinterpret_cast(tensor__getitem_index_not_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_getitem_from_offset", - (PyCFunction)(void (*)(void))tensor__getitem_from_offset, + reinterpret_cast( + reinterpret_cast(tensor__getitem_from_offset)), METH_VARARGS | METH_KEYWORDS, NULL}, {"__setitem_eager_tensor__", - (PyCFunction)(void (*)(void))tensor_method__setitem_eager_tensor, + reinterpret_cast( + reinterpret_cast(tensor_method__setitem_eager_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_register_grad_hook", - (PyCFunction)(void (*)(void))tensor_register_grad_hook, + reinterpret_cast( + reinterpret_cast(tensor_register_grad_hook)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_remove_grad_hook", - (PyCFunction)(void (*)(void))tensor_remove_grad_hook, + reinterpret_cast( + reinterpret_cast(tensor_remove_grad_hook)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_register_backward_hook", - (PyCFunction)(void (*)(void))tensor_register_reduce_hook, + reinterpret_cast( + reinterpret_cast(tensor_register_reduce_hook)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_set_grad_type", - (PyCFunction)(void (*)(void))tensor__set_grad_type, + reinterpret_cast( + reinterpret_cast(tensor__set_grad_type)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_clear", - (PyCFunction)(void (*)(void))tensor__clear, + reinterpret_cast( + reinterpret_cast(tensor__clear)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_clear_dataptr", - (PyCFunction)(void (*)(void))tensor__clear_dataptr, + reinterpret_cast( + reinterpret_cast(tensor__clear_dataptr)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_copy_gradient_from", - (PyCFunction)(void (*)(void))tensor__copy_gradient_from, + reinterpret_cast( + reinterpret_cast(tensor__copy_gradient_from)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_tensor_use_gpudnn", - (PyCFunction)(void (*)(void))tensor__use_gpudnn, + reinterpret_cast( + reinterpret_cast(tensor__use_gpudnn)), METH_VARARGS | METH_KEYWORDS, NULL}, /** the methods to adapt old dygraph, will be removed in the future **/ {"set_string_list", - (PyCFunction)(void (*)(void))tensor_method_set_string_list, + reinterpret_cast( + reinterpret_cast(tensor_method_set_string_list)), METH_VARARGS | METH_KEYWORDS, NULL}, {"set_vocab", - (PyCFunction)(void (*)(void))tensor_method_set_vocab, + reinterpret_cast( + reinterpret_cast(tensor_method_set_vocab)), METH_VARARGS | METH_KEYWORDS, NULL}, {"get_map_tensor", - (PyCFunction)(void (*)(void))tensor_method_get_map_tensor, + reinterpret_cast( + reinterpret_cast(tensor_method_get_map_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, /***the method of sparse tensor****/ {"nnz", - (PyCFunction)(void (*)(void))tensor_method_get_non_zero_nums, + reinterpret_cast( + reinterpret_cast(tensor_method_get_non_zero_nums)), METH_VARARGS | METH_KEYWORDS, NULL}, {"indices", - (PyCFunction)(void (*)(void))tensor_method_get_non_zero_indices, + reinterpret_cast( + reinterpret_cast(tensor_method_get_non_zero_indices)), METH_VARARGS | METH_KEYWORDS, NULL}, {"values", - (PyCFunction)(void (*)(void))tensor_method_get_non_zero_elements, + reinterpret_cast( + reinterpret_cast(tensor_method_get_non_zero_elements)), METH_VARARGS | METH_KEYWORDS, NULL}, {"crows", - (PyCFunction)(void (*)(void))tensor_method_get_non_zero_crows, + reinterpret_cast( + reinterpret_cast(tensor_method_get_non_zero_crows)), METH_VARARGS | METH_KEYWORDS, NULL}, {"cols", - (PyCFunction)(void (*)(void))tensor_method_get_non_zero_cols, + reinterpret_cast( + reinterpret_cast(tensor_method_get_non_zero_cols)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_sparse", - (PyCFunction)(void (*)(void))tensor_method_is_sparse, + reinterpret_cast( + reinterpret_cast(tensor_method_is_sparse)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_sparse_coo", - (PyCFunction)(void (*)(void))tensor_method_is_sparse_coo, + reinterpret_cast( + reinterpret_cast(tensor_method_is_sparse_coo)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_sparse_csr", - (PyCFunction)(void (*)(void))tensor_method_is_sparse_csr, + reinterpret_cast( + reinterpret_cast(tensor_method_is_sparse_csr)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_same_shape", - (PyCFunction)(void (*)(void))tensor_method_is_same_shape, + reinterpret_cast( + reinterpret_cast(tensor_method_is_same_shape)), METH_VARARGS | METH_KEYWORDS, NULL}, {"to_sparse_csr", - (PyCFunction)(void (*)(void))tensor_method_to_sparse_csr, + reinterpret_cast( + reinterpret_cast(tensor_method_to_sparse_csr)), METH_VARARGS | METH_KEYWORDS, NULL}, {"element_size", - (PyCFunction)(void (*)(void))tensor_method_element_size, + reinterpret_cast( + reinterpret_cast(tensor_method_element_size)), METH_VARARGS | METH_KEYWORDS, NULL}, /***the method of sparse tensor****/ {"_inplace_version", - (PyCFunction)(void (*)(void))tensor__inplace_version, + reinterpret_cast( + reinterpret_cast(tensor__inplace_version)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_bump_inplace_version", - (PyCFunction)(void (*)(void))tensor__bump_inplace_version, + reinterpret_cast( + reinterpret_cast(tensor__bump_inplace_version)), METH_VARARGS | METH_KEYWORDS, NULL}, {"is_selected_rows", - (PyCFunction)(void (*)(void))tensor_method_is_selected_rows, + reinterpret_cast( + reinterpret_cast(tensor_method_is_selected_rows)), METH_VARARGS | METH_KEYWORDS, NULL}, {"rows", - (PyCFunction)(void (*)(void))tensor_method_get_rows, + reinterpret_cast( + reinterpret_cast(tensor_method_get_rows)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_reset_grad_inplace_version", - (PyCFunction)(void (*)(void))tensor__reset_grad_inplace_version, + reinterpret_cast( + reinterpret_cast(tensor__reset_grad_inplace_version)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_share_memory", - (PyCFunction)(void (*)(void))tensor_method__share_memory, + reinterpret_cast( + reinterpret_cast(tensor_method__share_memory)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_offset", - (PyCFunction)(void (*)(void))tensor__offset, + reinterpret_cast( + reinterpret_cast(tensor__offset)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_grad_name", - (PyCFunction)(void (*)(void))tensor__grad_name, + reinterpret_cast( + reinterpret_cast(tensor__grad_name)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_grad_value", - (PyCFunction)(void (*)(void))tensor__grad_value, + reinterpret_cast( + reinterpret_cast(tensor__grad_value)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_unset_fake_empty", - (PyCFunction)(void (*)(void))tensor__unset_fake_empty, + reinterpret_cast( + reinterpret_cast(tensor__unset_fake_empty)), METH_VARARGS | METH_KEYWORDS, NULL}, {"data_ptr", - (PyCFunction)(void (*)(void))tensor_data_ptr, + reinterpret_cast( + reinterpret_cast(tensor_data_ptr)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_grad_ivar", - (PyCFunction)(void (*)(void))tensor__grad_ivar, + reinterpret_cast( + reinterpret_cast(tensor__grad_ivar)), METH_VARARGS | METH_KEYWORDS, NULL}, #if defined(PADDLE_WITH_CUDA) {"_tensor_uva", - (PyCFunction)(void (*)(void))tensor_method__uva, + reinterpret_cast( + reinterpret_cast(tensor_method__uva)), METH_VARARGS | METH_KEYWORDS, NULL}, #endif @@ -2276,16 +2333,18 @@ PyMethodDef variable_methods[] = { // variable_methods for core.eager.StringTensor PyMethodDef string_tensor_variable_methods[] = { {"numpy", - (PyCFunction)(void (*)(void))tensor_method_numpy_for_string_tensor, + reinterpret_cast(reinterpret_cast( + tensor_method_numpy_for_string_tensor)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_initialized", - (PyCFunction)(void (*)(void))tensor_method__is_initialized, + reinterpret_cast( + reinterpret_cast(tensor_method__is_initialized)), METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_string_tensor_hold_allocation", - (PyCFunction)(void (*)( - void))tensor_method__is_string_tensor_hold_allocation, + reinterpret_cast(reinterpret_cast( + tensor_method__is_string_tensor_hold_allocation)), METH_VARARGS | METH_KEYWORDS, NULL}, // TODO(zhoushunjie): Need to add _copy_to, copy_ for StringTensor. diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 42c5b97067b0eb..6b3e6b85b4dcfe 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -340,46 +340,70 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { struct PyGetSetDef variable_properties[] = { {"grad", - (getter)tensor_properties_get_grad, - (setter)tensor_properties_set_grad, + reinterpret_cast(tensor_properties_get_grad), + reinterpret_cast(tensor_properties_set_grad), nullptr, nullptr}, {"name", - (getter)tensor_properties_get_name, - (setter)tensor_properties_set_name, + reinterpret_cast(tensor_properties_get_name), + reinterpret_cast(tensor_properties_set_name), nullptr, nullptr}, {"stop_gradient", - (getter)tensor_properties_get_stop_gradient, - (setter)tensor_properties_set_stop_gradient, + reinterpret_cast(tensor_properties_get_stop_gradient), + reinterpret_cast(tensor_properties_set_stop_gradient), nullptr, nullptr}, {"persistable", - (getter)tensor_properties_get_persistable, - (setter)tensor_properties_set_persistable, + reinterpret_cast(tensor_properties_get_persistable), + reinterpret_cast(tensor_properties_set_persistable), nullptr, nullptr}, - {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr}, - {"layout", (getter)tensor_properties_get_layout, nullptr, nullptr, nullptr}, - // {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr, - // nullptr, + {"shape", + reinterpret_cast(tensor_properties_get_shape), + nullptr, + nullptr, + nullptr}, + {"layout", + reinterpret_cast(tensor_properties_get_layout), + nullptr, + nullptr, + nullptr}, + // {"is_leaf", reinterpret_cast(tensor_properties_get_is_leaf), + // nullptr, nullptr, // nullptr}, - {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr}, + {"place", + reinterpret_cast(tensor_properties_get_place), + nullptr, + nullptr, + nullptr}, {"dist_attr", - (getter)tensor_properties_get_dist_attr, + reinterpret_cast(tensor_properties_get_dist_attr), nullptr, nullptr, nullptr}, {"_place_str", - (getter)tensor_properties_get_place_str, + reinterpret_cast(tensor_properties_get_place_str), + nullptr, + nullptr, + nullptr}, + {"dtype", + reinterpret_cast(tensor_properties_get_dtype), + nullptr, + nullptr, + nullptr}, + {"type", + reinterpret_cast(tensor_properties_get_type), + nullptr, + nullptr, + nullptr}, + {"is_leaf", + reinterpret_cast(tensor_properties_is_leaf), nullptr, nullptr, nullptr}, - {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, - {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, - {"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr}, {"grad_fn", - (getter)tensor_properties_get_grad_fn, + reinterpret_cast(tensor_properties_get_grad_fn), nullptr, nullptr, nullptr}, @@ -388,15 +412,27 @@ struct PyGetSetDef variable_properties[] = { // variable_properties for core.eager.StringTensor struct PyGetSetDef string_tensor_variable_properties[] = { {"name", - (getter)tensor_properties_get_name, - (setter)tensor_properties_set_name, + reinterpret_cast(tensor_properties_get_name), + reinterpret_cast(tensor_properties_set_name), + nullptr, + nullptr}, + {"shape", + reinterpret_cast(tensor_properties_get_shape), + nullptr, + nullptr, + nullptr}, + {"layout", + reinterpret_cast(tensor_properties_get_layout), + nullptr, + nullptr, + nullptr}, + {"place", + reinterpret_cast(tensor_properties_get_place), + nullptr, nullptr, nullptr}, - {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr}, - {"layout", (getter)tensor_properties_get_layout, nullptr, nullptr, nullptr}, - {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr}, {"_place_str", - (getter)tensor_properties_get_place_str, + reinterpret_cast(tensor_properties_get_place_str), nullptr, nullptr, nullptr}, diff --git a/paddle/fluid/pybind/eager_py_layer.cc b/paddle/fluid/pybind/eager_py_layer.cc index cf899aa0ab19a3..7b62b814be548c 100644 --- a/paddle/fluid/pybind/eager_py_layer.cc +++ b/paddle/fluid/pybind/eager_py_layer.cc @@ -164,7 +164,7 @@ PyObject* pylayer_method_apply(PyObject* cls, } inputs_size = kwargs_size + args_size; forward_args = PyTuple_New(args_size + 1); - Py_INCREF(ctx); + Py_INCREF(ctx); // NOLINT PyTuple_SET_ITEM(forward_args, 0, reinterpret_cast(ctx)); std::vector> inputs_autograd_meta; @@ -458,7 +458,7 @@ PyObject* pylayer_method_apply(PyObject* cls, Py_XDECREF(kwargs_value_list); Py_XDECREF(backward_function); Py_XDECREF(forward_fn); - Py_XDECREF(ctx); + Py_XDECREF(ctx); // NOLINT return outputs; EAGER_CATCH_AND_THROW_RETURN_NULL @@ -665,34 +665,36 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self, PyMethodDef pylayer_methods[] = { {"name", - (PyCFunction)(void (*)(void))pylayer_method_name, + reinterpret_cast( + reinterpret_cast(pylayer_method_name)), METH_NOARGS, NULL}, {"apply", - (PyCFunction)(void (*)(void))pylayer_method_apply, + reinterpret_cast( + reinterpret_cast(pylayer_method_apply)), METH_CLASS | METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; struct PyGetSetDef pylayer_properties[] { {"container", - (getter)tensor_properties_get_container, - (setter)tensor_properties_set_container, + reinterpret_cast(tensor_properties_get_container), + reinterpret_cast(tensor_properties_set_container), nullptr, nullptr}, {"non_differentiable", - (getter)tensor_properties_get_non_differentiable, - (setter)tensor_properties_set_non_differentiable, + reinterpret_cast(tensor_properties_get_non_differentiable), + reinterpret_cast(tensor_properties_set_non_differentiable), nullptr, nullptr}, {"not_inplace_tensors", - (getter)tensor_properties_get_not_inplace_tensors, - (setter)tensor_properties_set_not_inplace_tensors, + reinterpret_cast(tensor_properties_get_not_inplace_tensors), + reinterpret_cast(tensor_properties_set_not_inplace_tensors), nullptr, nullptr}, {"materialize_grads", nullptr, - (setter)tensor_properties_set_materialize_grads, + reinterpret_cast(tensor_properties_set_materialize_grads), nullptr, nullptr}, { @@ -708,11 +710,11 @@ void BindEagerPyLayer(PyObject* module) { auto type = &heap_type->ht_type; type->tp_name = "PyLayer"; type->tp_basicsize = sizeof(PyLayerObject); - type->tp_dealloc = (destructor)PyLayerDealloc; + type->tp_dealloc = reinterpret_cast(PyLayerDealloc); type->tp_methods = pylayer_methods; type->tp_getset = pylayer_properties; type->tp_new = (newfunc)PyLayerNew; - Py_INCREF(&PyBaseObject_Type); + Py_INCREF(&PyBaseObject_Type); // NOLINT type->tp_base = reinterpret_cast(&PyBaseObject_Type); type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; @@ -727,10 +729,10 @@ void BindEagerPyLayer(PyObject* module) { return; } - Py_INCREF(type); + Py_INCREF(type); // NOLINT if (PyModule_AddObject(module, "PyLayer", reinterpret_cast(type)) < 0) { - Py_DECREF(type); + Py_DECREF(type); // NOLINT Py_DECREF(module); PADDLE_THROW(platform::errors::Fatal( "Init Paddle error in BindEager(PyModule_AddObject).")); diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 208d2f25e7d21f..6a99ca5272562d 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -36,6 +36,7 @@ typedef SSIZE_T ssize_t; #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/selected_rows.h" #include "paddle/utils/pybind.h" +#include "paddle/utils/python_patch.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" #ifdef PADDLE_WITH_DISTRIBUTE diff --git a/paddle/fluid/pybind/jit.cc b/paddle/fluid/pybind/jit.cc index 31ec0774a231a9..80201b673be0c6 100644 --- a/paddle/fluid/pybind/jit.cc +++ b/paddle/fluid/pybind/jit.cc @@ -37,6 +37,7 @@ limitations under the License. */ #include "paddle/fluid/jit/layer.h" #include "paddle/fluid/jit/serializer.h" #include "paddle/utils/pybind.h" +#include "paddle/utils/python_patch.h" namespace py = pybind11; @@ -128,7 +129,7 @@ inline static PyObject *eval_custom_code(PyThreadState *tstate, #else PyObject *result = eval_frame_default(tstate, shadow, throw_flag); #endif - Py_DECREF(shadow); + Py_DECREF(shadow); // NOLINT: for cppcoreguidelines-pro-type-cstyle-cast return result; } diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index b4b96c02bb2271..26f5212e66b94a 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -933,10 +933,12 @@ std::shared_ptr GetVarBaseFromArgs( ssize_t arg_idx, bool dispensable) { ::pybind11::detail::instance* inst = - (::pybind11::detail::instance*)PyTuple_GET_ITEM(args, arg_idx); + reinterpret_cast<::pybind11::detail::instance*>( + PyTuple_GET_ITEM(args, arg_idx)); if (PyTuple_Check((PyObject*)inst)) { // NOLINT - inst = (::pybind11::detail::instance*)PyTuple_GET_ITEM(inst, 0); + inst = reinterpret_cast<::pybind11::detail::instance*>( + PyTuple_GET_ITEM(inst, 0)); } if (inst == nullptr || (PyObject*)inst == Py_None) { // NOLINT @@ -999,7 +1001,8 @@ std::vector> GetVarBaseListFromArgs( } ::pybind11::detail::instance* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { - item = (::pybind11::detail::instance*)PyList_GetItem(list, i); + item = reinterpret_cast<::pybind11::detail::instance*>( + PyList_GetItem(list, i)); if (!PyObject_TypeCheck((PyObject*)item, g_varbase_pytype)) { // NOLINT PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument '%s' (position %d) must be list of Tensors, but " diff --git a/paddle/ir/core/value.cc b/paddle/ir/core/value.cc index 666be5481c4182..1dfa9f23c3196e 100644 --- a/paddle/ir/core/value.cc +++ b/paddle/ir/core/value.cc @@ -224,7 +224,7 @@ ir::Operation *OpResultImpl::owner() const { // For outline result, pointer offset outline_index to obtain the address of // maximum inline result. const OpOutlineResultImpl *outline_result = - (const OpOutlineResultImpl *)(this); + reinterpret_cast(this); outline_result += (outline_result->outline_index_ - GetMaxInlineResultIndex()); // The offset of the maximum inline result distance op is diff --git a/paddle/phi/api/profiler/device_tracer.cc b/paddle/phi/api/profiler/device_tracer.cc index da7d8050110811..77d3829d80c11d 100644 --- a/paddle/phi/api/profiler/device_tracer.cc +++ b/paddle/phi/api/profiler/device_tracer.cc @@ -73,9 +73,10 @@ uint64_t kAlignSize = 8; std::unordered_map runtime_cbid_str, driver_cbid_str; -#define ALIGN_BUFFER(buffer, align) \ - (((uintptr_t)(buffer) & ((align)-1)) \ - ? ((buffer) + (align) - ((uintptr_t)(buffer) & ((align)-1))) \ +#define ALIGN_BUFFER(buffer, align) \ + ((reinterpret_cast(buffer) & ((align)-1)) \ + ? ((buffer) + (align) - \ + (reinterpret_cast(buffer) & ((align)-1))) \ : (buffer)) #define CUPTI_CALL(call) \ diff --git a/paddle/phi/core/distributed/store/socket.cpp b/paddle/phi/core/distributed/store/socket.cpp index 122ab124dae82a..68a192caaecd8c 100644 --- a/paddle/phi/core/distributed/store/socket.cpp +++ b/paddle/phi/core/distributed/store/socket.cpp @@ -51,7 +51,7 @@ static int _get_sockname(int sock, char *out, int out_len) { port = ntohs(s->sin_port); ::inet_ntop(AF_INET, &s->sin_addr, ip, sizeof(ip)); } else { // AF_INET6 - struct sockaddr_in6 *s = (struct sockaddr_in6 *)&addr; + struct sockaddr_in6 *s = reinterpret_cast(&addr); port = ntohs(s->sin6_port); ::inet_ntop(AF_INET6, &s->sin6_addr, ip, sizeof(ip)); } diff --git a/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc b/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc index c0728874ebe90d..909a84a7879ee0 100644 --- a/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc +++ b/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc @@ -63,11 +63,11 @@ void Array2Poly(const T* box, (*poly).num_contours = 1; (*poly).hole = reinterpret_cast(malloc(sizeof(int))); (*poly).hole[0] = 0; - (*poly).contour = - (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); + (*poly).contour = reinterpret_cast( + malloc(sizeof(phi::funcs::gpc_vertex_list))); (*poly).contour->num_vertices = pts_num; - (*poly).contour->vertex = - (phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); + (*poly).contour->vertex = reinterpret_cast( + malloc(sizeof(phi::funcs::gpc_vertex) * pts_num)); for (size_t i = 0; i < pts_num; ++i) { (*poly).contour->vertex[i].x = box[2 * i]; (*poly).contour->vertex[i].y = box[2 * i + 1]; @@ -81,11 +81,11 @@ void PointVec2Poly(const std::vector>& vec, (*poly).num_contours = 1; (*poly).hole = reinterpret_cast(malloc(sizeof(int))); (*poly).hole[0] = 0; - (*poly).contour = - (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); + (*poly).contour = reinterpret_cast( + malloc(sizeof(phi::funcs::gpc_vertex_list))); (*poly).contour->num_vertices = pts_num; - (*poly).contour->vertex = - (phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); + (*poly).contour->vertex = reinterpret_cast( + malloc(sizeof(phi::funcs::gpc_vertex) * pts_num)); for (size_t i = 0; i < pts_num; ++i) { (*poly).contour->vertex[i].x = vec[i].x; (*poly).contour->vertex[i].y = vec[i].y; diff --git a/paddle/phi/kernels/funcs/sequence_padding.cc b/paddle/phi/kernels/funcs/sequence_padding.cc index ffd8622b094081..df9e021f0d7128 100644 --- a/paddle/phi/kernels/funcs/sequence_padding.cc +++ b/paddle/phi/kernels/funcs/sequence_padding.cc @@ -89,7 +89,7 @@ static void fast_mem_init(void* dest, while (dest_size > num_bytes) { size_t remaining = dest_size - num_bytes; size_t count = (remaining > num_bytes) ? num_bytes : remaining; - memcpy((unsigned char*)dest + num_bytes, dest, count); + memcpy(reinterpret_cast(dest) + num_bytes, dest, count); num_bytes += count; } } diff --git a/paddle/utils/mman_patch.h b/paddle/utils/mman_patch.h new file mode 100644 index 00000000000000..e95a57d174104e --- /dev/null +++ b/paddle/utils/mman_patch.h @@ -0,0 +1,22 @@ +/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +// Define sys/mman.h Macros instead of to avoid +// cppcoreguidelines-pro-type-cstyle-cast + +/* Return value of `mmap' in case of an error. */ +#define MAP_FAILED_s (reinterpret_cast(-1)) + +#ifdef MAP_FAILED +#undef MAP_FAILED +#define MAP_FAILED MAP_FAILED_s +#endif diff --git a/paddle/utils/python_patch.h b/paddle/utils/python_patch.h new file mode 100644 index 00000000000000..a6796601c71d96 --- /dev/null +++ b/paddle/utils/python_patch.h @@ -0,0 +1,114 @@ +/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +// Define Python Macros instead of to avoid +// cppcoreguidelines-pro-type-cstyle-cast + +#ifndef PYTHON_CLANGTIDY_PATCH_FLAG +#define PYTHON_CLANGTIDY_PATCH_FLAG + +#define _PyObject_CAST_s(op) (reinterpret_cast(op)) + +#ifdef _PyObject_CAST +#undef _PyObject_CAST +#define _PyObject_CAST(op) _PyObject_CAST_s(op) +#endif + +/* Cast argument toc PyVarObject* type. */ +#define _PyVarObject_CAST_s(op) (reinterpret_cast(op)) + +#define Py_SIZE_s(ob) (_PyVarObject_CAST_s(ob)->ob_size) +#define Py_TYPE_s(ob) ((reinterpret_cast(ob))->ob_type) + +#ifdef Py_TYPE +#undef Py_TYPE +#define Py_TYPE(ob) Py_TYPE_s(ob) +#endif + +// ********************* +// * PyList * +// ********************* +#define PyList_Check_s(op) \ + PyType_FastSubclass(Py_TYPE_s(op), Py_TPFLAGS_LIST_SUBCLASS) + +#define PyList_GET_ITEM_s(op, i) \ + ((reinterpret_cast(op))->ob_item[i]) +#define PyList_SET_ITEM_s(op, i, v) \ + ((reinterpret_cast(op))->ob_item[i] = (v)) +#define PyList_GET_SIZE_s(op) (assert(PyList_Check_s(op)), Py_SIZE_s(op)) +#define _PyList_ITEMS_s(op) ((reinterpret_cast(op))->ob_item) + +#ifdef PyList_GET_ITEM +#undef PyList_GET_ITEM +#define PyList_GET_ITEM(op, i) PyList_GET_ITEM_s(op, i) +#endif + +#ifdef PyList_SET_ITEM +#undef PyList_SET_ITEM +#define PyList_SET_ITEM(op, i, v) PyList_SET_ITEM_s(op, i, v) +#endif + +#ifdef PyList_GET_SIZE +#undef PyList_GET_SIZE +#define PyList_GET_SIZE(op) PyList_GET_SIZE_s(op) +#endif + +#ifdef _PyList_ITEMS +#undef _PyList_ITEMS +#define _PyList_ITEMS(op) _PyList_ITEMS_s(op) +#endif + +// ********************* +// * PyTuple * +// ********************* + +/* Cast argument to PyTupleObject* type. */ +#define _PyTuple_CAST_s(op) \ + (assert(PyTuple_Check(op)), reinterpret_cast(op)) +#define PyTuple_GET_SIZE_s(op) Py_SIZE_s(_PyTuple_CAST_s(op)) +#define PyTuple_GET_ITEM_s(op, i) (_PyTuple_CAST_s(op)->ob_item[i]) +/* Macro, *only* to be used to fill in brand new tuples */ +#define PyTuple_SET_ITEM_s(op, i, v) (_PyTuple_CAST_s(op)->ob_item[i] = v) + +#ifdef PyTuple_GET_SIZE +#undef PyTuple_GET_SIZE +#define PyTuple_GET_SIZE(op) PyTuple_GET_SIZE_s(op) +#endif + +#ifdef PyTuple_GET_ITEM +#undef PyTuple_GET_ITEM +#define PyTuple_GET_ITEM(op, i) PyTuple_GET_ITEM_s(op, i) +#endif + +#ifdef PyTuple_SET_ITEM +#undef PyTuple_SET_ITEM +#define PyTuple_SET_ITEM(op, i, v) PyTuple_SET_ITEM_s(op, i, v) +#endif + +// ********************* +// * Py_Bool * +// ********************* + +#define Py_False_s (reinterpret_cast(&_Py_FalseStruct)) +#define Py_True_s (reinterpret_cast(&_Py_TrueStruct)) + +#ifdef Py_False +#undef Py_False +#define Py_False Py_False_s +#endif + +#ifdef Py_True +#undef Py_True +#define Py_True Py_True_s +#endif + +#endif diff --git a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc index 5f43927e93361f..209c9c574793d6 100644 --- a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc +++ b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc @@ -40,7 +40,8 @@ typedef struct RunParameter { } RunParameter; void* run(void* thread_param) { - struct RunParameter* param = (struct RunParameter*)thread_param; + struct RunParameter* param = + reinterpret_cast(thread_param); LOG(INFO) << "Thread " << param->thread_index << " start run!"; PD_OneDimArrayCstr* input_names = PD_PredictorGetInputNames(param->predictor); PD_Tensor* tensor =