Skip to content

Commit 9059886

Browse files
committed
Add CUDA block size check
Signed-off-by: Konrad Zawora <[email protected]>
1 parent 17aeab2 commit 9059886

File tree

2 files changed

+8
-1
lines changed

2 files changed

+8
-1
lines changed

vllm/config.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -917,6 +917,11 @@ def _verify_args(self) -> None:
917917
raise ValueError(
918918
"GPU memory utilization must be less than 1.0. Got "
919919
f"{self.gpu_memory_utilization}.")
920+
if (current_platform.is_cuda_alike()
921+
and self.block_size not in [8, 16, 32]):
922+
raise ValueError(
923+
"CUDA Paged Attention kernel only supports "
924+
f"block sizes [8, 16, 32]. Got {self.block_size}.")
920925

921926
def _verify_cache_dtype(self) -> None:
922927
if self.cache_dtype == "auto":

vllm/engine/arg_utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,9 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
426426
choices=[8, 16, 32, 64, 128],
427427
help='Token block size for contiguous chunks of '
428428
'tokens. This is ignored on neuron devices and '
429-
'set to max-model-len')
429+
'set to max-model-len. On CUDA devices, '
430+
'only block sizes up to 32 are supported. '
431+
'On HPU devices, block size defaults to 128.')
430432

431433
parser.add_argument(
432434
"--enable-prefix-caching",

0 commit comments

Comments
 (0)