diff --git a/src/transformers/cache_utils.py b/src/transformers/cache_utils.py index aea93f0e770c..1b1e0bd5cf40 100644 --- a/src/transformers/cache_utils.py +++ b/src/transformers/cache_utils.py @@ -1313,7 +1313,7 @@ def check_dynamic_cache(self, method: str): isinstance(self.self_attention_cache, DynamicCache) and isinstance(self.cross_attention_cache, DynamicCache) ): - raise ValueError( + raise TypeError( f"`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self " f"attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache." ) diff --git a/src/transformers/commands/serving.py b/src/transformers/commands/serving.py index da822647c09a..3fccadcc6253 100644 --- a/src/transformers/commands/serving.py +++ b/src/transformers/commands/serving.py @@ -1186,7 +1186,7 @@ def generate_response(self, req: dict) -> Generator[str, None, None]: inputs = [{"role": "system", "content": req["instructions"]}] if "instructions" in req else [] inputs.append(req["input"]) else: - raise ValueError("inputs should be a list, dict, or str") + raise TypeError("inputs should be a list, dict, or str") inputs = processor.apply_chat_template(inputs, add_generation_prompt=True, return_tensors="pt") inputs = inputs.to(model.device) diff --git a/src/transformers/models/evolla/processing_evolla.py b/src/transformers/models/evolla/processing_evolla.py index a42128767611..3be0e07364a6 100644 --- a/src/transformers/models/evolla/processing_evolla.py +++ b/src/transformers/models/evolla/processing_evolla.py @@ -165,7 +165,7 @@ def __call__( if isinstance(messages_list, (list, tuple)): for messages in messages_list: if not isinstance(messages, (list, tuple)): - raise ValueError(f"Each messages in messages_list should be a list instead of {type(messages)}.") + raise TypeError(f"Each messages in messages_list should be a list instead of {type(messages)}.") if not all(isinstance(m, dict) for m in messages): raise ValueError( "Each message in messages_list should be a list of dictionaries, but not all elements are dictionaries." diff --git a/src/transformers/models/gemma3n/processing_gemma3n.py b/src/transformers/models/gemma3n/processing_gemma3n.py index 105b1983b7c7..913336b8d3f5 100644 --- a/src/transformers/models/gemma3n/processing_gemma3n.py +++ b/src/transformers/models/gemma3n/processing_gemma3n.py @@ -107,7 +107,7 @@ def __call__( if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, or a list of strings") + raise TypeError("Invalid input text. Please provide a string, or a list of strings") if audio is not None: audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"]) diff --git a/src/transformers/models/lfm2_vl/processing_lfm2_vl.py b/src/transformers/models/lfm2_vl/processing_lfm2_vl.py index 12f289c266a1..037e3cbc25b2 100755 --- a/src/transformers/models/lfm2_vl/processing_lfm2_vl.py +++ b/src/transformers/models/lfm2_vl/processing_lfm2_vl.py @@ -135,7 +135,7 @@ def __call__( if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, or a list of strings") + raise TypeError("Invalid input text. Please provide a string, or a list of strings") n_images_in_text = [sample.count(self.image_token) for sample in text] if sum(n_images_in_text) > 0 and images is None: diff --git a/src/transformers/models/ovis2/processing_ovis2.py b/src/transformers/models/ovis2/processing_ovis2.py index efb79409da2b..82cd686682a9 100644 --- a/src/transformers/models/ovis2/processing_ovis2.py +++ b/src/transformers/models/ovis2/processing_ovis2.py @@ -118,7 +118,7 @@ def __call__( if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, or a list of strings") + raise TypeError("Invalid input text. Please provide a string, or a list of strings") image_inputs = {} diff --git a/src/transformers/models/perception_lm/processing_perception_lm.py b/src/transformers/models/perception_lm/processing_perception_lm.py index e57418ef92f7..41016765c1fd 100644 --- a/src/transformers/models/perception_lm/processing_perception_lm.py +++ b/src/transformers/models/perception_lm/processing_perception_lm.py @@ -144,7 +144,7 @@ def __call__( if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, or a list of strings") + raise TypeError("Invalid input text. Please provide a string, or a list of strings") # try to expand inputs in processing if we have the necessary parts prompt_strings = [] diff --git a/src/transformers/models/sam/image_processing_sam_fast.py b/src/transformers/models/sam/image_processing_sam_fast.py index e77b69ee1e2b..54dbcf52c17f 100644 --- a/src/transformers/models/sam/image_processing_sam_fast.py +++ b/src/transformers/models/sam/image_processing_sam_fast.py @@ -390,7 +390,7 @@ def post_process_masks( if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): - raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") + raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) diff --git a/src/transformers/models/sam2/image_processing_sam2_fast.py b/src/transformers/models/sam2/image_processing_sam2_fast.py index 5ae472f53638..c468f6400d54 100644 --- a/src/transformers/models/sam2/image_processing_sam2_fast.py +++ b/src/transformers/models/sam2/image_processing_sam2_fast.py @@ -669,7 +669,7 @@ def post_process_masks( if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): - raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") + raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False) if apply_non_overlapping_constraints: interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask) diff --git a/src/transformers/models/sam2/modular_sam2.py b/src/transformers/models/sam2/modular_sam2.py index e6058db272fe..8fcfe36a759e 100644 --- a/src/transformers/models/sam2/modular_sam2.py +++ b/src/transformers/models/sam2/modular_sam2.py @@ -287,7 +287,7 @@ def post_process_masks( if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): - raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") + raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False) if apply_non_overlapping_constraints: interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask) diff --git a/src/transformers/models/sam2/processing_sam2.py b/src/transformers/models/sam2/processing_sam2.py index 5f147aab8dfa..a2d90581ec70 100644 --- a/src/transformers/models/sam2/processing_sam2.py +++ b/src/transformers/models/sam2/processing_sam2.py @@ -258,7 +258,7 @@ def _convert_to_nested_list(self, data, expected_depth, current_depth=0): elif isinstance(data, (int, float)): return data else: - raise ValueError(f"Unsupported data type: {type(data)}") + raise TypeError(f"Unsupported data type: {type(data)}") def _get_nested_dimensions(self, nested_list, max_dims=None): """ diff --git a/src/transformers/models/sam2_video/processing_sam2_video.py b/src/transformers/models/sam2_video/processing_sam2_video.py index d5a3c94d7f87..0c0df9490152 100644 --- a/src/transformers/models/sam2_video/processing_sam2_video.py +++ b/src/transformers/models/sam2_video/processing_sam2_video.py @@ -262,7 +262,7 @@ def _convert_to_nested_list(self, data, expected_depth, current_depth=0): elif isinstance(data, (int, float)): return data else: - raise ValueError(f"Unsupported data type: {type(data)}") + raise TypeError(f"Unsupported data type: {type(data)}") def _get_nested_dimensions(self, nested_list, max_dims=None): """ diff --git a/src/transformers/models/sam2_video/video_processing_sam2_video.py b/src/transformers/models/sam2_video/video_processing_sam2_video.py index b0280828cb66..873bf2c378ab 100644 --- a/src/transformers/models/sam2_video/video_processing_sam2_video.py +++ b/src/transformers/models/sam2_video/video_processing_sam2_video.py @@ -92,7 +92,7 @@ def post_process_masks( if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): - raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") + raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F_t.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] interpolated_mask = F_t.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) diff --git a/src/transformers/quantizers/auto.py b/src/transformers/quantizers/auto.py index 6b5bdd3e9c3c..e21423a8fe12 100644 --- a/src/transformers/quantizers/auto.py +++ b/src/transformers/quantizers/auto.py @@ -287,7 +287,7 @@ def register_quantizer_fn(cls): raise ValueError(f"Quantizer '{name}' already registered") if not issubclass(cls, HfQuantizer): - raise ValueError("Quantizer must extend HfQuantizer") + raise TypeError("Quantizer must extend HfQuantizer") AUTO_QUANTIZER_MAPPING[name] = cls return cls diff --git a/src/transformers/tokenization_mistral_common.py b/src/transformers/tokenization_mistral_common.py index f388f46cb9d8..027a9deccafd 100644 --- a/src/transformers/tokenization_mistral_common.py +++ b/src/transformers/tokenization_mistral_common.py @@ -1433,7 +1433,7 @@ def apply_chat_template( f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonTokenizer.apply_chat_template`." ) if not isinstance(truncation, bool): - raise ValueError("`truncation` must be a boolean for `apply_chat_template` method.") + raise TypeError("`truncation` must be a boolean for `apply_chat_template` method.") if isinstance(conversation, (list, tuple)) and ( isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages") diff --git a/tests/models/sam2/test_processor_sam2.py b/tests/models/sam2/test_processor_sam2.py index 1c388e210836..db1b27738bf7 100644 --- a/tests/models/sam2/test_processor_sam2.py +++ b/tests/models/sam2/test_processor_sam2.py @@ -140,5 +140,5 @@ def test_post_process_masks(self): self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] - with self.assertRaises(ValueError): + with self.assertRaises(TypeError): masks = processor.post_process_masks(dummy_masks, np.array(original_sizes)) diff --git a/tests/models/sam2_video/test_processor_sam2_video.py b/tests/models/sam2_video/test_processor_sam2_video.py index 6e071158be11..5e4d07bed1be 100644 --- a/tests/models/sam2_video/test_processor_sam2_video.py +++ b/tests/models/sam2_video/test_processor_sam2_video.py @@ -149,5 +149,5 @@ def test_post_process_masks(self): self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] - with self.assertRaises(ValueError): + with self.assertRaises(TypeError): masks = processor.post_process_masks(dummy_masks, np.array(original_sizes)) diff --git a/tests/test_tokenization_mistral_common.py b/tests/test_tokenization_mistral_common.py index e34083177587..9b574f85fb1c 100644 --- a/tests/test_tokenization_mistral_common.py +++ b/tests/test_tokenization_mistral_common.py @@ -945,7 +945,7 @@ def test_appsly_chat_template_with_truncation(self): # Test 3: # assert truncation is boolean - with self.assertRaises(ValueError): + with self.assertRaises(TypeError): self.tokenizer.apply_chat_template( conversation, tokenize=True, truncation=TruncationStrategy.LONGEST_FIRST, max_length=20 ) @@ -1189,7 +1189,7 @@ def test_batch_apply_chat_template_with_truncation( # Test 3: # assert truncation is boolean - with self.assertRaises(ValueError): + with self.assertRaises(TypeError): self.tokenizer.apply_chat_template( self.fixture_conversations, tokenize=True, truncation=TruncationStrategy.LONGEST_FIRST, max_length=20 )