We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent bc9cb55 commit a8704d2Copy full SHA for a8704d2
src/transformers/quantizers/quantizer_hqq.py
@@ -112,7 +112,11 @@ def check_quantized_param(
112
module, tensor_name = get_module_from_name(model, param_name)
113
114
if self.pre_quantized:
115
- return (isinstance(module, torch.nn.Linear) or isinstance(module, HQQLinear)) and tensor_name != "weight" and tensor_name != "bias"
+ return (
116
+ (isinstance(module, torch.nn.Linear) or isinstance(module, HQQLinear))
117
+ and tensor_name != "weight"
118
+ and tensor_name != "bias"
119
+ )
120
else:
121
return isinstance(module, torch.nn.Linear) and tensor_name == "weight"
122
0 commit comments