Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

【paddle_test No.40】replace of cc_test with paddle_test #61945

Merged
merged 8 commits into from
Feb 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions paddle/fluid/framework/ir/memory_optimize_pass/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,6 @@ if(WITH_CINN)
share_varinfo_into_cinn_pass
SRCS share_varinfo_into_cinn_pass.cc
DEPS pass enforce common graph_helper)
cc_test(
share_varinfo_into_cinn_pass_test
SRCS share_varinfo_into_cinn_pass_test.cc
DEPS share_varinfo_into_cinn_pass parallel_executor elementwise_add_op
mul_op cinn_launch_op)
list(APPEND EAGER_DELETETION_PASS_DEPS share_varinfo_into_cinn_pass)
endif()

Expand All @@ -72,9 +67,3 @@ cc_library(
inplace_addto_op_pass
SRCS inplace_addto_op_pass.cc
DEPS memory_reuse_pass)

cc_test(
test_reference_count_pass_last_lived_ops
SRCS test_reference_count_pass_last_lived_ops.cc
DEPS parallel_executor elementwise_mul_op elementwise_add_op generated_op phi
common)
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,6 @@
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/kernel_registry.h"

USE_OP_ITSELF(scale);
USE_OP_ITSELF(elementwise_mul);
USE_OP_ITSELF(elementwise_add);
USE_OP_ITSELF(elementwise_add_grad);

PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT);

COMMON_DECLARE_double(eager_delete_tensor_gb);

namespace paddle {
Expand Down
20 changes: 10 additions & 10 deletions paddle/fluid/framework/parallel_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@ class ParallelExecutor {
DISABLE_COPY_AND_ASSIGN(ParallelExecutor);

public:
explicit ParallelExecutor(const std::vector<platform::Place> &places,
const std::vector<std::string> &bcast_vars,
const std::string &loss_var_name,
Scope *scope,
const std::vector<Scope *> &local_scopes,
const ExecutionStrategy &exec_strategy,
const BuildStrategy &build_strategy,
ir::Graph *graph);
TEST_API explicit ParallelExecutor(const std::vector<platform::Place> &places,
const std::vector<std::string> &bcast_vars,
const std::string &loss_var_name,
Scope *scope,
const std::vector<Scope *> &local_scopes,
const ExecutionStrategy &exec_strategy,
const BuildStrategy &build_strategy,
ir::Graph *graph);

// NOTE(Aurelius84): Construct a PE running on single device for @to_static
explicit ParallelExecutor(const platform::Place &place,
Expand All @@ -68,7 +68,7 @@ class ParallelExecutor {
const BuildStrategy &build_strategy,
ir::Graph *graph);

~ParallelExecutor();
TEST_API ~ParallelExecutor();

size_t DeviceCount() const;

Expand Down Expand Up @@ -98,7 +98,7 @@ class ParallelExecutor {
void ResetOpHandleScopeMapOfGraphs(
const std::unordered_map<Scope *, Scope *> &scope_map);

const ir::Graph &Graph() const;
TEST_API const ir::Graph &Graph() const;
void PrepareVariables(Scope *scope);

void SkipMemoryReuse(size_t scope_idx,
Expand Down
2 changes: 2 additions & 0 deletions test/cpp/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -346,3 +346,5 @@ cc_test(
workqueue_test
SRCS new_executor/workqueue_test.cc
DEPS standalone_executor)

add_subdirectory(ir)
1 change: 1 addition & 0 deletions test/cpp/fluid/framework/ir/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
add_subdirectory(memory_optimize_pass)
14 changes: 14 additions & 0 deletions test/cpp/fluid/framework/ir/memory_optimize_pass/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
if(WITH_CINN)
paddle_test(share_varinfo_into_cinn_pass_test SRCS
share_varinfo_into_cinn_pass_test.cc)
list(APPEND EAGER_DELETETION_PASS_DEPS share_varinfo_into_cinn_pass)
endif()

paddle_test(test_reference_count_pass_last_lived_ops SRCS
test_reference_count_pass_last_lived_ops.cc DEPS common)

if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_reference_count_pass_last_lived_ops)
endif()
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>

#include "gtest/gtest.h"
#include "paddle/fluid/framework/details/computation_op_handle.h"
#include "paddle/fluid/framework/details/eager_deletion_op_handle.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h"
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/paddle2cinn/build_cinn_pass.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiler.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/program_desc.h"

USE_OP_ITSELF(mul);
USE_OP_ITSELF(elementwise_add);

USE_OP_ITSELF(cinn_launch);
PD_DECLARE_KERNEL(cinn_launch, CPU, ALL_LAYOUT);
#ifdef PADDLE_WITH_CUDA
PD_DECLARE_KERNEL(cinn_launch, GPU, ALL_LAYOUT);
#endif

namespace paddle::framework {

using Name2VarInfoMap =
std::unordered_map<std::string, std::shared_ptr<ir::MemOptVarInfo>>;

static ProgramDesc BuildProgramInsideCinnLaunchOp() {
ProgramDesc program;
auto* block = program.MutableBlock(0);
block->Var("var1");
block->Var("var2");
block->Var("var3");
block->Var("var4");
block->Var("var5");

auto add_op =
std::unique_ptr<OpDesc>(new OpDesc("elementwise_add",
{{"X", {"var1"}}, {"Y", {"var2"}}},
{{"Out", {"var3"}}},
{}));
block->AppendAllocatedOp(std::move(add_op));
auto mul_op = std::unique_ptr<OpDesc>(new OpDesc(
"mul", {{"X", {"var3"}}, {"Y", {"var4"}}}, {{"Out", {"var5"}}}, {}));
block->AppendAllocatedOp(std::move(mul_op));
return program;
}

static ProgramDesc BuildProgramWithCinnLaunchOp(int64_t compilation_key) {
// create a cinn_launch op
ProgramDesc program;
auto* block = program.MutableBlock(0);
block->Var("var1");
block->Var("var2");
block->Var("var4");
block->Var("var5");

auto cinn_launch_op = std::unique_ptr<OpDesc>(
new OpDesc("cinn_launch",
{{"X", {"var1", "var2", "var4"}}},
{{"Out", {"var5"}}},
{{"compilation_key", compilation_key}}));
block->AppendAllocatedOp(std::move(cinn_launch_op));
return program;
}

struct TestPassContext {
explicit TestPassContext(const ProgramDesc& program) {
graph = std::make_unique<ir::Graph>(program);
details::BuildStrategy build_strategy;
details::ExecutionStrategy exec_strategy;
exec_strategy.use_device_ = paddle::platform::kCUDA;
executor.reset(new ParallelExecutor(platform::CUDAPlace(0),
&scope,
exec_strategy,
build_strategy,
graph.get()));
}

Scope scope;
std::unique_ptr<ir::Graph> graph;
std::unique_ptr<ParallelExecutor> executor;
};

TEST(ShareMemInfoToSubGraphPassTest, test_main_graph_share_varinfo) {
// add a subgraph to CinnCompiler
auto subgraph = std::make_unique<ir::Graph>(BuildProgramInsideCinnLaunchOp());
subgraph->GetOrInit<Name2VarInfoMap>(
paddle2cinn::kMemOptVarInfoFromMainGraph);
auto compilation_key =
paddle2cinn::CinnCompiler::GetInstance()->AddGraph(std::move(subgraph));

// build test data and apply pass
auto context = std::make_unique<TestPassContext>(
BuildProgramWithCinnLaunchOp(compilation_key));

// check result
const ir::Graph& result_subgraph =
paddle2cinn::CinnCompiler::GetInstance()->FindGraph(compilation_key);
const auto& dst_varinfo_map = result_subgraph.Get<Name2VarInfoMap>(
paddle2cinn::kMemOptVarInfoFromMainGraph);
ASSERT_EQ(dst_varinfo_map.size(), 4);
EXPECT_EQ(dst_varinfo_map.count("var1"), 1);
EXPECT_EQ(dst_varinfo_map.count("var5"), 1);
EXPECT_EQ(dst_varinfo_map.at("var1").use_count(), 2);
EXPECT_EQ(dst_varinfo_map.at("var5").use_count(), 2);
}

TEST(ShareMemInfoToSubGraphPassTest, test_subgraph_take_varinfo) {
// build test data and apply pass
auto context =
std::make_unique<TestPassContext>(BuildProgramInsideCinnLaunchOp());
auto& varinfo_map_shared = context->graph->GetOrInit<Name2VarInfoMap>(
paddle2cinn::kMemOptVarInfoFromMainGraph);
varinfo_map_shared = {
{"var1", std::make_shared<ir::MemOptVarInfo>("var1", 1)},
{"var2", std::make_shared<ir::MemOptVarInfo>("var2", 2)},
};

ir::MemOptVarInfoMapList varinfo_maps(1);
auto& dst_varinfo_map = varinfo_maps.front();
dst_varinfo_map = {{"var1", std::make_shared<ir::MemOptVarInfo>("var1", 1)},
{"var2", std::make_shared<ir::MemOptVarInfo>("var2", 1)},
{"var3", std::make_shared<ir::MemOptVarInfo>("var3", 1)},
{"var4", std::make_shared<ir::MemOptVarInfo>("var4", 1)},
{"var5", std::make_shared<ir::MemOptVarInfo>("var5", 1)}};
auto share_pass =
ir::PassRegistry::Instance().Get("share_varinfo_into_cinn_pass");
share_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &varinfo_maps);
share_pass->Apply(context->graph.get());

// check result
ASSERT_NE(dst_varinfo_map.at("var1")->ParentHolder(), nullptr);
ASSERT_NE(dst_varinfo_map.at("var2")->ParentHolder(), nullptr);
ASSERT_EQ(dst_varinfo_map.at("var3")->ParentHolder(), nullptr);
ASSERT_EQ(dst_varinfo_map.at("var4")->ParentHolder(), nullptr);
ASSERT_EQ(dst_varinfo_map.at("var5")->ParentHolder(), nullptr);
}

} // namespace paddle::framework
Loading