Skip to content

Commit b71c956

Browse files
authored
[TPU] Use Ray for default distributed backend (#8389)
1 parent f842a7a commit b71c956

File tree

1 file changed

+7
-0
lines changed

1 file changed

+7
-0
lines changed

vllm/config.py

+7
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,13 @@ def __init__(
869869
f"distributed executor backend "
870870
f"'{self.distributed_executor_backend}'.")
871871

872+
if current_platform.is_tpu() and self.world_size > 1:
873+
if self.distributed_executor_backend is None:
874+
self.distributed_executor_backend = "ray"
875+
if self.distributed_executor_backend != "ray":
876+
raise ValueError(
877+
"TPU backend only supports Ray for distributed inference.")
878+
872879
if self.distributed_executor_backend is None and self.world_size > 1:
873880
# We use multiprocessing by default if world_size fits on the
874881
# current node and we aren't in a ray placement group.

0 commit comments

Comments
 (0)