Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .buildkite/scripts/hardware_ci/run-tpu-v1-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ docker run --privileged --net host --shm-size=16G -it \
vllm-tpu /bin/bash -c "python3 -m pip install git+https://github.com/thuml/depyf.git \
&& python3 -m pip install pytest pytest-asyncio tpu-info \
&& python3 -m pip install lm_eval[api]==0.4.4 \
&& export VLLM_XLA_CACHE_PATH= \
&& export VLLM_USE_V1=1 \
&& export VLLM_XLA_CHECK_RECOMPILATION=1 \
&& echo HARDWARE \
Expand Down
6 changes: 5 additions & 1 deletion tests/v1/entrypoints/llm/test_struct_output_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from vllm.entrypoints.llm import LLM
from vllm.outputs import RequestOutput
from vllm.platforms import current_platform
from vllm.sampling_params import GuidedDecodingParams, SamplingParams

PARAMS_MODELS_BACKENDS_TOKENIZER_MODE = [
Expand Down Expand Up @@ -63,10 +64,13 @@ def test_structured_output(
):
monkeypatch.setenv("VLLM_USE_V1", "1")

# Don't use eager execution on TPUs because we want to test for no
# recompilation at runtime
enforce_eager = bool(not current_platform.is_tpu())
# Use a single LLM instance for several scenarios to
# speed up the test suite.
llm = LLM(model=model_name,
enforce_eager=True,
enforce_eager=enforce_eager,
max_model_len=1024,
guided_decoding_backend=guided_decoding_backend,
tokenizer_mode=tokenizer_mode)
Expand Down
2 changes: 1 addition & 1 deletion tests/v1/tpu/test_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def test_sampler_different(model_name: str):
different results.
"""
llm = LLM(model_name,
enforce_eager=True,
enforce_eager=False,
max_num_seqs=1,
max_model_len=512,
max_num_batched_tokens=512)
Expand Down