We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d981396 commit bca55b5Copy full SHA for bca55b5
vllm/model_executor/layers/quantization/ipex_quant.py
@@ -181,8 +181,6 @@ def apply(self,
181
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
182
reshaped_x = x.reshape(-1, x.shape[-1])
183
out = layer.ipex_qlinear(reshaped_x)
184
- if bias is not None:
185
- out.add_(bias)
186
return out.reshape(x.shape[:-1] + (layer.ipex_output_size, ))
187
188
0 commit comments