From edf85051aa15161007da3ec415eaefc2bd552723 Mon Sep 17 00:00:00 2001 From: "Liu, Kaixuan" Date: Mon, 18 Aug 2025 07:02:12 -0400 Subject: [PATCH 1/2] for intel xpu case, use MatMul8bitFp even not use ipex Signed-off-by: Liu, Kaixuan --- bitsandbytes/autograd/_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index 80fc86861..5a58ec6a2 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -426,7 +426,7 @@ def matmul( state.threshold = threshold # MatMul8bitLt is slower because no fast kernel for quant/dequant 8bit in CPU/XPU if state.is_training: - if (A.device.type == "cpu" and ipex_cpu) or (A.device.type == "xpu" and ipex_xpu): + if (A.device.type == "cpu" and ipex_cpu) or (A.device.type == "xpu"): return MatMul8bitFp.apply(A, B, out, bias, state) return MatMul8bitLt.apply(A, B, out, bias, state) From 194b4e34adaea9bb45dba2123be831cc7c40b2b4 Mon Sep 17 00:00:00 2001 From: "Liu, Kaixuan" Date: Wed, 3 Sep 2025 06:15:37 -0400 Subject: [PATCH 2/2] fix lint issue Signed-off-by: Liu, Kaixuan --- bitsandbytes/autograd/_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index 5a58ec6a2..3dba26fcf 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -8,7 +8,7 @@ from typing_extensions import deprecated import bitsandbytes.functional as F -from bitsandbytes.functional import ipex_cpu, ipex_xpu +from bitsandbytes.functional import ipex_cpu # The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov: # https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py