diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 36ef2eefa896..e346d3e0ecbe 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -124,7 +124,7 @@ model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", de ### Exllama kernels for faster inference -For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `disable_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. +For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `disable_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. Also, you can perform CPU inference using Auto-GPTQ for Auto-GPTQ version > 0.4.2 by passing `device_map` = "cpu". For CPU inference, you have to pass `disable_exallama = True` in the `GPTQConfig.` ```py import torch diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index d567b9438a0d..03500d5c673b 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2718,7 +2718,8 @@ def from_pretrained( quantization_method_from_args == QuantizationMethod.GPTQ or quantization_method_from_config == QuantizationMethod.GPTQ ): - if not torch.cuda.is_available(): + gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2") + if not gptq_supports_cpu and not torch.cuda.is_available(): raise RuntimeError("GPU is required to quantize or run quantize model.") elif not (is_optimum_available() and is_auto_gptq_available()): raise ImportError(