Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

[clang-tidy] NO.25 enable modernize-use-transparent-functors #61689

Merged
merged 10 commits into from
Feb 26, 2024
2 changes: 1 addition & 1 deletion paddle/common/ddim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ DDim DDim::reshape(std::vector<int>& shape) const {
if (it != shape.end()) {
int index = static_cast<int>(std::distance(shape.begin(), it));
int reshape_out_product =
std::accumulate(shape.begin(), shape.end(), -1, std::multiplies<int>());
std::accumulate(shape.begin(), shape.end(), -1, std::multiplies<>());
shape[index] = static_cast<int>(product(in_dims)) / reshape_out_product;
}

Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/infermeta/spmd_rules/reshape.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ std::vector<int64_t> InferTargetShape(const std::vector<int64_t>& shape,
}
}

int64_t product = std::accumulate(
shape.begin(), shape.end(), 1, std::multiplies<int64_t>());
int64_t product =
std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>());
if (product > 0) {
PADDLE_ENFORCE_EQ(
product,
Expand All @@ -72,7 +72,7 @@ std::vector<std::shared_ptr<DimTrans>> MakeReshapeDimTrans(
const std::vector<int64_t>& tgt_shape) {
std::vector<std::shared_ptr<DimTrans>> ret;
int64_t total_elem_num_src = std::accumulate(
src_shape.begin(), src_shape.end(), 1, std::multiplies<int64_t>());
src_shape.begin(), src_shape.end(), 1, std::multiplies<>());
std::vector<int64_t> inferred_tgt_shape =
InferTargetShape(tgt_shape, total_elem_num_src);

Expand Down
16 changes: 8 additions & 8 deletions paddle/pir/src/core/ir_printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ void BasicIrPrinter::PrintType(Type type) {
} else if (type.isa<VectorType>()) {
os << "vec[";
auto inner_types = type.dyn_cast<VectorType>().data();
detail::PrintInterleave(
pir::detail::PrintInterleave(
inner_types.begin(),
inner_types.end(),
[this](Type v) { this->PrintType(v); },
Expand Down Expand Up @@ -132,7 +132,7 @@ void BasicIrPrinter::PrintAttribute(Attribute attr) {
} else if (auto arr = attr.dyn_cast<ArrayAttribute>()) {
const auto& vec = arr.AsVector();
os << "[";
detail::PrintInterleave(
pir::detail::PrintInterleave(
vec.begin(),
vec.end(),
[this](Attribute v) { this->PrintAttribute(v); },
Expand Down Expand Up @@ -256,7 +256,7 @@ void IrPrinter::PrintOpResult(Operation* op) {
for (size_t idx = 0; idx < num_op_result; idx++) {
op_results.push_back(op->result(idx));
}
detail::PrintInterleave(
pir::detail::PrintInterleave(
op_results.begin(),
op_results.end(),
[this](Value v) { this->PrintValue(v); },
Expand All @@ -266,11 +266,11 @@ void IrPrinter::PrintOpResult(Operation* op) {

void IrPrinter::PrintAttributeMap(Operation* op) {
AttributeMap attributes = op->attributes();
std::map<std::string, Attribute, std::less<std::string>> order_attributes(
std::map<std::string, Attribute, std::less<>> order_attributes(
attributes.begin(), attributes.end());
os << " {";

detail::PrintInterleave(
pir::detail::PrintInterleave(
order_attributes.begin(),
order_attributes.end(),
[this](std::pair<std::string, Attribute> it) {
Expand All @@ -291,7 +291,7 @@ void IrPrinter::PrintOpOperands(Operation* op) {
for (size_t idx = 0; idx < num_op_operands; idx++) {
op_operands.push_back(op->operand_source(idx));
}
detail::PrintInterleave(
pir::detail::PrintInterleave(
op_operands.begin(),
op_operands.end(),
[this](Value v) { this->PrintValue(v); },
Expand All @@ -312,7 +312,7 @@ void IrPrinter::PrintOperandsType(Operation* op) {
}
}
os << " (";
detail::PrintInterleave(
pir::detail::PrintInterleave(
op_operand_types.begin(),
op_operand_types.end(),
[this](Type t) { this->PrintType(t); },
Expand All @@ -332,7 +332,7 @@ void IrPrinter::PrintOpReturnType(Operation* op) {
op_result_types.emplace_back(nullptr);
}
}
detail::PrintInterleave(
pir::detail::PrintInterleave(
op_result_types.begin(),
op_result_types.end(),
[this](Type t) { this->PrintType(t); },
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/memory/thread_local_allocator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ TEST(ThreadLocalAllocator, cross_scope_release) {
for (auto &addresses : allocator_addresses) {
std::sort(addresses.begin(), addresses.end());
ASSERT_EQ(std::adjacent_find(
addresses.begin(), addresses.end(), std::equal_to<void *>()),
addresses.begin(), addresses.end(), std::equal_to<>()),
addresses.end());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ void PD_run() {
int32_t out_num = std::accumulate(output_shape->data,
output_shape->data + output_shape->size,
1,
std::multiplies<int32_t>());
std::multiplies<>());
out_data.resize(out_num);
PD_TensorCopyToCpuFloat(output_tensor, out_data.data());
LOG(INFO) << "Output tensor name is: " << PD_TensorGetName(output_tensor);
Expand Down