Skip to content

Commit 92b11e2

Browse files
authored
Merge pull request vllm-project#9 from TTNTech/fenglui-patch-throughput
uvloop fix for throughput.py
2 parents d9fec81 + 9fceb60 commit 92b11e2

File tree

1 file changed

+11
-2
lines changed

1 file changed

+11
-2
lines changed

vllm/benchmarks/throughput.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,16 @@
1010
from typing import Any, Optional, Union
1111

1212
import torch
13-
import uvloop
13+
import platform
14+
if platform.system() == "Windows":
15+
import winloop as uvloop_impl
16+
# Windows does not support fork
17+
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
18+
19+
# Disable libuv on Windows by default
20+
os.environ["USE_LIBUV"] = os.environ.get("USE_LIBUV", "0")
21+
else:
22+
import uvloop as uvloop_impl
1423
from tqdm import tqdm
1524
from transformers import (AutoModelForCausalLM, AutoTokenizer,
1625
PreTrainedTokenizerBase)
@@ -537,7 +546,7 @@ def main(args: argparse.Namespace):
537546
request_outputs: Optional[list[RequestOutput]] = None
538547
if args.backend == "vllm":
539548
if args.async_engine:
540-
elapsed_time = uvloop.run(
549+
elapsed_time = uvloop_impl.run(
541550
run_vllm_async(
542551
requests,
543552
args.n,

0 commit comments

Comments
 (0)