Skip to content

Commit b3b2136

Browse files
WoosukKwonAlvant
authored andcommitted
[TPU] Use Ray for default distributed backend (vllm-project#8389)
Signed-off-by: Alvant <alvasian@yandex.ru>
1 parent 56eef8b commit b3b2136

File tree

1 file changed

+7
-0
lines changed

1 file changed

+7
-0
lines changed

vllm/config.py

+7
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,13 @@ def __init__(
869869
f"distributed executor backend "
870870
f"'{self.distributed_executor_backend}'.")
871871

872+
if current_platform.is_tpu() and self.world_size > 1:
873+
if self.distributed_executor_backend is None:
874+
self.distributed_executor_backend = "ray"
875+
if self.distributed_executor_backend != "ray":
876+
raise ValueError(
877+
"TPU backend only supports Ray for distributed inference.")
878+
872879
if self.distributed_executor_backend is None and self.world_size > 1:
873880
# We use multiprocessing by default if world_size fits on the
874881
# current node and we aren't in a ray placement group.

0 commit comments

Comments
 (0)