Skip to content

Commit 4f5b059

Browse files
authored
Clean up unused padding_idx variables across many model definitions (#13240)
Signed-off-by: Tyler Michael Smith <[email protected]>
1 parent 288ca11 commit 4f5b059

30 files changed

+1
-35
lines changed

vllm/model_executor/models/arctic.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -370,7 +370,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
370370
cache_config = vllm_config.cache_config
371371
quant_config = vllm_config.quant_config
372372

373-
self.padding_idx = config.pad_token_id
374373
self.vocab_size = config.vocab_size
375374
self.embed_tokens = VocabParallelEmbedding(
376375
self.vocab_size,

vllm/model_executor/models/baichuan.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,6 @@ def __init__(
267267
quant_config = vllm_config.quant_config
268268

269269
self.config = config
270-
self.padding_idx = config.pad_token_id
271270
self.vocab_size = config.vocab_size
272271

273272
self.embed_tokens = VocabParallelEmbedding(

vllm/model_executor/models/bart.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -725,7 +725,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
725725

726726
self.config = config
727727

728-
self.padding_idx = config.pad_token_id
729728
lora_vocab = (lora_config.lora_extra_vocab_size *
730729
(lora_config.max_loras or 1)) if lora_config else 0
731730
self.vocab_size = config.vocab_size + lora_vocab

vllm/model_executor/models/chameleon.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -851,7 +851,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
851851
quant_config = vllm_config.quant_config
852852

853853
self.config = config
854-
self.padding_idx = config.pad_token_id
855854
self.vocab_size = config.vocab_size
856855
self.embed_tokens = VocabParallelEmbedding(
857856
self.vocab_size,

vllm/model_executor/models/deepseek.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
339339
cache_config = vllm_config.cache_config
340340
quant_config = vllm_config.quant_config
341341

342-
self.padding_idx = config.pad_token_id
343342
self.vocab_size = config.vocab_size
344343

345344
self.embed_tokens = VocabParallelEmbedding(

vllm/model_executor/models/deepseek_v2.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
570570
cache_config = vllm_config.cache_config
571571
quant_config = vllm_config.quant_config
572572

573-
self.padding_idx = config.pad_token_id
574573
self.vocab_size = config.vocab_size
575574

576575
if get_pp_group().is_first_rank:

vllm/model_executor/models/exaone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
313313
lora_config = vllm_config.lora_config
314314

315315
self.config = config
316-
self.padding_idx = config.pad_token_id
317316
lora_vocab = ((lora_config.lora_extra_vocab_size *
318317
(lora_config.max_loras or 1)) if lora_config else 0)
319318
self.vocab_size = config.vocab_size + lora_vocab

vllm/model_executor/models/florence2.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
592592

593593
self.config = config
594594

595-
self.padding_idx = config.pad_token_id
596595
self.vocab_size = config.vocab_size
597596

598597
self.shared = BartScaledWordEmbedding(self.vocab_size, config.d_model)

vllm/model_executor/models/fuyu.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
255255
self.config = config
256256
self.multimodal_config = multimodal_config
257257

258-
self.padding_idx = config.pad_token_id
259258
self.vocab_size = config.text_config.vocab_size
260259
self.image_token_id = _IMAGE_TOKEN_ID
261260
self.image_feature_size = config.patch_size**2 * config.num_channels

vllm/model_executor/models/granite.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
260260
lora_config = vllm_config.lora_config
261261

262262
self.config = config
263-
self.padding_idx = config.pad_token_id
264263
lora_vocab = (lora_config.lora_extra_vocab_size *
265264
(lora_config.max_loras or 1)) if lora_config else 0
266265
self.vocab_size = config.vocab_size + lora_vocab

0 commit comments

Comments
 (0)