From b251b381df3645da3fb933f184054e5a054ec7b0 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 27 Oct 2023 10:52:54 +0200 Subject: [PATCH] Revert "add exllamav2 arg (#26437)" This reverts commit 8214d6e7b1d6ac25859ad745ccebdf73434e166d. --- docs/source/en/main_classes/quantization.md | 12 +--- src/transformers/modeling_utils.py | 2 +- src/transformers/utils/quantization_config.py | 24 +------- tests/quantization/gptq/test_gptq.py | 60 +------------------ 4 files changed, 5 insertions(+), 93 deletions(-) diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 106f820a82ce..36ef2eefa896 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -128,22 +128,12 @@ For 4-bit model, you can use the exllama kernels in order to a faster inference ```py import torch -gptq_config = GPTQConfig(bits=4) -model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) -``` - -With the release of the exllamav2 kernels, you can get faster inference speed compared to the exllama kernels. You just need to -pass `use_exllama_v2=True` in [`GPTQConfig`] and disable exllama kernels: - -```py -import torch -gptq_config = GPTQConfig(bits=4, use_exllama_v2=True) +gptq_config = GPTQConfig(bits=4, disable_exllama=False) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) ``` Note that only 4-bit models are supported for now. Furthermore, it is recommended to deactivate the exllama kernels if you are finetuning a quantized model with peft. -You can find the benchmark of these kernels [here](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark) #### Fine-tune a quantized model With the official support of adapters in the Hugging Face ecosystem, you can fine-tune models that have been quantized with GPTQ. diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 8179d5d6f045..59397ec20aff 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2759,7 +2759,7 @@ def from_pretrained( logger.warning( "You passed `quantization_config` to `from_pretrained` but the model you're loading already has a " "`quantization_config` attribute and has already quantized weights. However, loading attributes" - " (e.g. disable_exllama, use_cuda_fp16, max_input_length, use_exllama_v2) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." + " (e.g. disable_exllama, use_cuda_fp16, max_input_length) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." ) if ( quantization_method_from_args == QuantizationMethod.GPTQ diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 3526df21afe9..13f81a5a2cfa 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -349,8 +349,6 @@ class GPTQConfig(QuantizationConfigMixin): max_input_length (`int`, *optional*): The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input length. It is specific to the exllama backend with act-order. - use_exllama_v2 (`bool`, *optional*, defaults to `False`): - Whether to use exllamav2 backend. Only works with `bits` = 4. """ def __init__( @@ -371,7 +369,6 @@ def __init__( pad_token_id: Optional[int] = None, disable_exllama: bool = False, max_input_length: Optional[int] = None, - use_exllama_v2: bool = False, **kwargs, ): self.quant_method = QuantizationMethod.GPTQ @@ -391,14 +388,11 @@ def __init__( self.pad_token_id = pad_token_id self.disable_exllama = disable_exllama self.max_input_length = max_input_length - self.use_exllama_v2 = use_exllama_v2 - # needed for compatibility with optimum gptq config - self.disable_exllamav2 = not use_exllama_v2 self.post_init() def get_loading_attributes(self): attibutes_dict = copy.deepcopy(self.__dict__) - loading_attibutes = ["disable_exllama", "use_exllama_v2", "use_cuda_fp16", "max_input_length"] + loading_attibutes = ["disable_exllama", "use_cuda_fp16", "max_input_length"] loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} return loading_attibutes_dict @@ -424,19 +418,3 @@ def post_init(self): f"""dataset needs to be either a list of string or a value in ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" ) - if self.bits == 4: - if self.use_exllama_v2: - optimum_version = version.parse(importlib.metadata.version("optimum")) - autogptq_version = version.parse(importlib.metadata.version("auto_gptq")) - if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"): - raise ValueError( - f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}" - ) - self.disable_exllama = True - logger.warning("You have activated exllamav2 kernels. Exllama kernels will be disabled.") - if not self.disable_exllama: - logger.warning( - """You have activated exllama backend. Note that you can get better inference - speed using exllamav2 kernel by setting `use_exllama_v2=True`.`disable_exllama` will be deprecated - in future version.""" - ) diff --git a/tests/quantization/gptq/test_gptq.py b/tests/quantization/gptq/test_gptq.py index 93c6d094e19b..4c7587f063a5 100644 --- a/tests/quantization/gptq/test_gptq.py +++ b/tests/quantization/gptq/test_gptq.py @@ -178,7 +178,6 @@ def test_quantized_layers_class(self): group_size=self.group_size, bits=self.bits, disable_exllama=self.disable_exllama, - disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) @@ -282,7 +281,8 @@ def setUpClass(cls): """ Setup quantized model """ - cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) + + cls.quantization_config = GPTQConfig(bits=4, disable_exllama=False, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, @@ -334,62 +334,6 @@ def test_max_input_length(self): self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) -@slow -@require_optimum -@require_auto_gptq -@require_torch_gpu -@require_accelerate -class GPTQTestExllamaV2(unittest.TestCase): - """ - Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). - More information on those arguments here: - https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig - """ - - EXPECTED_OUTPUTS = set() - EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") - model_name = "hf-internal-testing/Llama-2-7B-GPTQ" - revision = "gptq-4bit-128g-actorder_True" - input_text = "Hello my name is" - - @classmethod - def setUpClass(cls): - """ - Setup quantized model - """ - cls.quantization_config = GPTQConfig(bits=4, use_exllama_v2=True) - cls.quantized_model = AutoModelForCausalLM.from_pretrained( - cls.model_name, - revision=cls.revision, - torch_dtype=torch.float16, - device_map={"": 0}, - quantization_config=cls.quantization_config, - ) - cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) - - def check_inference_correctness(self, model): - """ - Test the generation quality of the quantized model and see that we are matching the expected output. - Given that we are operating on small numbers + the testing model is relatively small, we might not get - the same output across GPUs. So we'll generate few tokens (5-10) and check their output. - """ - - # Check that inference pass works on the model - encoded_input = self.tokenizer(self.input_text, return_tensors="pt") - - # Check the exactness of the results - output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) - - # Get the generation - self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) - - def test_generate_quality(self): - """ - Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens - """ - self.check_inference_correctness(self.quantized_model) - - # fail when run all together @pytest.mark.skip @require_accelerate