Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def _process_sequence_group_samples(
child_seqs.append((parent, parent))

for seq, _ in child_seqs:
self._decode_sequence(seq)
self._decode_sequence(seq, seq_group.sampling_params)
self._check_stop(seq, seq_group.sampling_params)

# Non-beam search case
Expand Down Expand Up @@ -621,7 +621,8 @@ def _log_system_stats(
f"CPU KV cache usage: {cpu_cache_usage * 100:.1f}%")
self.last_logging_time = now

def _decode_sequence(self, seq: Sequence) -> None:
def _decode_sequence(self, seq: Sequence,
sampling_params: SamplingParams) -> None:
"""Decodes the new token for a sequence."""
(new_tokens, new_output_text, prefix_offset,
read_offset) = detokenize_incrementally(
Expand All @@ -630,7 +631,7 @@ def _decode_sequence(self, seq: Sequence) -> None:
prev_tokens=seq.tokens,
prefix_offset=seq.prefix_offset,
read_offset=seq.read_offset,
skip_special_tokens=True,
skip_special_tokens=sampling_params.skip_special_tokens,
)
if seq.tokens is None:
seq.tokens = new_tokens
Expand Down
2 changes: 2 additions & 0 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ async def create_chat_completion(request: ChatCompletionRequest,
top_k=request.top_k,
ignore_eos=request.ignore_eos,
use_beam_search=request.use_beam_search,
skip_special_tokens=request.skip_special_tokens,
)
except ValueError as e:
return create_error_response(HTTPStatus.BAD_REQUEST, str(e))
Expand Down Expand Up @@ -426,6 +427,7 @@ async def create_completion(request: CompletionRequest, raw_request: Request):
max_tokens=request.max_tokens,
logprobs=request.logprobs,
use_beam_search=request.use_beam_search,
skip_special_tokens=request.skip_special_tokens,
)
except ValueError as e:
return create_error_response(HTTPStatus.BAD_REQUEST, str(e))
Expand Down
2 changes: 2 additions & 0 deletions vllm/entrypoints/openai/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ class ChatCompletionRequest(BaseModel):
ignore_eos: Optional[bool] = False
use_beam_search: Optional[bool] = False
stop_token_ids: Optional[List[int]] = Field(default_factory=list)
skip_special_tokens: Optional[bool] = True


class CompletionRequest(BaseModel):
Expand All @@ -96,6 +97,7 @@ class CompletionRequest(BaseModel):
ignore_eos: Optional[bool] = False
use_beam_search: Optional[bool] = False
stop_token_ids: Optional[List[int]] = Field(default_factory=list)
skip_special_tokens: Optional[bool] = True


class LogProbs(BaseModel):
Expand Down
7 changes: 6 additions & 1 deletion vllm/sampling_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ class SamplingParams:
tokens after the EOS token is generated.
max_tokens: Maximum number of tokens to generate per output sequence.
logprobs: Number of log probabilities to return per output token.
skip_special_tokens: Whether to skip special tokens in the output.
Defaults to true.
"""

def __init__(
Expand All @@ -79,6 +81,7 @@ def __init__(
ignore_eos: bool = False,
max_tokens: int = 16,
logprobs: Optional[int] = None,
skip_special_tokens: bool = True,
) -> None:
self.n = n
self.best_of = best_of if best_of is not None else n
Expand All @@ -103,6 +106,7 @@ def __init__(
self.ignore_eos = ignore_eos
self.max_tokens = max_tokens
self.logprobs = logprobs
self.skip_special_tokens = skip_special_tokens

self._verify_args()
if self.use_beam_search:
Expand Down Expand Up @@ -196,4 +200,5 @@ def __repr__(self) -> str:
f"stop={self.stop}, "
f"ignore_eos={self.ignore_eos}, "
f"max_tokens={self.max_tokens}, "
f"logprobs={self.logprobs})")
f"logprobs={self.logprobs}, "
f"skip_special_tokens={self.skip_special_tokens})")