File tree Expand file tree Collapse file tree 1 file changed +2
-2
lines changed
neural_compressor/torch/algorithms/weight_only Expand file tree Collapse file tree 1 file changed +2
-2
lines changed Original file line number Diff line number Diff line change @@ -81,7 +81,7 @@ def rtn_quantize(
81
81
model .to (device )
82
82
83
83
assert isinstance (model , torch .nn .Module ), "only support torch module"
84
- supported_layers = [ " Linear" ]
84
+ supported_layers = ( torch . nn . Linear ,)
85
85
# initialize global configuration
86
86
double_quant_config = {
87
87
"double_quant" : kwargs .get ("use_double_quant" , False ),
@@ -93,7 +93,7 @@ def rtn_quantize(
93
93
if export_compressed_model :
94
94
use_optimum_format = kwargs .get ("use_optimum_format" , True )
95
95
for name , m in model .named_modules ():
96
- if m . __class__ . __name__ not in supported_layers :
96
+ if not isinstance ( m , supported_layers ) :
97
97
continue
98
98
if name in weight_config : # pragma: no cover
99
99
# initialize op configuration
You can’t perform that action at this time.
0 commit comments