diff --git a/benchmarks/README.md b/benchmarks/README.md index 1d715a193ea1..69d32e222819 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -194,6 +194,7 @@ vllm serve Qwen/Qwen2-VL-7B-Instruct ```bash vllm bench serve \ --backend openai-chat \ + --endpoint-type openai-chat \ --model Qwen/Qwen2-VL-7B-Instruct \ --endpoint /v1/chat/completions \ --dataset-name hf \ @@ -230,6 +231,7 @@ vllm serve Qwen/Qwen2-VL-7B-Instruct ```bash vllm bench serve \ --backend openai-chat \ + --endpoint-type openai-chat \ --model Qwen/Qwen2-VL-7B-Instruct \ --endpoint /v1/chat/completions \ --dataset-name hf \ @@ -244,6 +246,7 @@ vllm bench serve \ ```bash vllm bench serve \ --backend openai-chat \ + --endpoint-type openai-chat \ --model Qwen/Qwen2-VL-7B-Instruct \ --endpoint /v1/chat/completions \ --dataset-name hf \ diff --git a/vllm/benchmarks/datasets.py b/vllm/benchmarks/datasets.py index 72d7ce49b8e1..29e6e166841d 100644 --- a/vllm/benchmarks/datasets.py +++ b/vllm/benchmarks/datasets.py @@ -716,10 +716,11 @@ def get_samples(args, tokenizer) -> list[SampleRequest]: "openai-chat", "openai-audio", ]: - # multi-modal benchmark is only available on OpenAI Chat backend. + # multi-modal benchmark is only available on OpenAI Chat + # endpoint-type. raise ValueError( "Multi-modal content is only supported on 'openai-chat' and " - "'openai-audio' backend.") + "'openai-audio' endpoint-type.") input_requests = dataset_class( dataset_path=args.dataset_path, dataset_subset=args.hf_subset,