Skip to content

Commit 156f9fe

Browse files
authored
Migrate thread_parallel.cpp to ET_CHECK_OR_RETURN_FALSE (#8977)
It's convenient if this thing doesn't depend on tensor_util, and conveniently the dependency was deprecated anyway.
1 parent f23971a commit 156f9fe

File tree

2 files changed

+25
-25
lines changed

2 files changed

+25
-25
lines changed

extension/parallel/targets.bzl

+17-21
Original file line numberDiff line numberDiff line change
@@ -7,24 +7,20 @@ def define_common_targets():
77
TARGETS and BUCK files that call this function.
88
"""
99

10-
for aten_mode in get_aten_mode_options():
11-
aten_suffix = ("_aten" if aten_mode else "")
12-
13-
runtime.cxx_library(
14-
name = "thread_parallel" + aten_suffix,
15-
srcs = [
16-
"thread_parallel.cpp",
17-
],
18-
exported_headers = [
19-
"thread_parallel.h",
20-
],
21-
visibility = [
22-
"//executorch/...",
23-
"@EXECUTORCH_CLIENTS",
24-
],
25-
deps = [
26-
"//executorch/extension/threadpool:threadpool",
27-
"//executorch/runtime/core:core",
28-
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
29-
],
30-
)
10+
runtime.cxx_library(
11+
name = "thread_parallel",
12+
srcs = [
13+
"thread_parallel.cpp",
14+
],
15+
exported_headers = [
16+
"thread_parallel.h",
17+
],
18+
visibility = [
19+
"//executorch/...",
20+
"@EXECUTORCH_CLIENTS",
21+
],
22+
deps = [
23+
"//executorch/extension/threadpool:threadpool",
24+
"//executorch/runtime/core:core",
25+
],
26+
)

extension/parallel/thread_parallel.cpp

+8-4
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,12 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <cinttypes>
910
#include <tuple>
1011

1112
#include <executorch/extension/parallel/thread_parallel.h>
1213
#include <executorch/extension/threadpool/threadpool.h>
13-
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
14+
#include <executorch/runtime/core/error.h>
1415
#include <executorch/runtime/platform/assert.h>
1516

1617
namespace executorch {
@@ -53,9 +54,12 @@ bool parallel_for(
5354
const int64_t end,
5455
const int64_t grain_size,
5556
const std::function<void(int64_t, int64_t)>& f) {
56-
ET_LOG_AND_RETURN_IF_FALSE(begin >= 0 && end >= 0);
57-
ET_LOG_AND_RETURN_IF_FALSE(end >= begin);
58-
ET_LOG_AND_RETURN_IF_FALSE(grain_size > 0);
57+
ET_CHECK_OR_RETURN_FALSE(
58+
begin >= 0 && end >= 0 && end >= begin,
59+
"begin = %" PRId64 ", end = %" PRId64,
60+
begin,
61+
end);
62+
ET_CHECK_OR_RETURN_FALSE(grain_size > 0, "grain_size = %" PRId64, grain_size);
5963
int64_t num_tasks = 0, chunk_size = 0;
6064
std::tie(num_tasks, chunk_size) =
6165
calc_num_tasks_and_chunk_size(begin, end, grain_size);

0 commit comments

Comments
 (0)