Skip to content
This repository was archived by the owner on Jun 4, 2025. It is now read-only.

Commit 03e9e9d

Browse files
natuanKSGulin
authored andcommitted
Default behavior for sparseml w/o recipes (#13)
1 parent 84b2267 commit 03e9e9d

File tree

1 file changed

+3
-5
lines changed

1 file changed

+3
-5
lines changed

src/transformers/sparse.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -111,17 +111,15 @@ def create_scheduler(self, num_training_steps: int):
111111
# scheduler already set
112112
return
113113

114-
if self.manager.learning_rate_modifiers:
114+
if self.manager is not None and self.manager.learning_rate_modifiers:
115115
# allow SparseML to manage LR and set a dummy scheduler
116-
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
117-
self.optimizer, lambda _: 1.0, -1
118-
)
116+
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lambda _: 1.0, -1)
119117
else:
120118
# default scheduler
121119
super().create_scheduler(num_training_steps)
122120

123121
def qat_active(self, epoch: int):
124-
if not self.manager.quantization_modifiers:
122+
if self.manager is None or not self.manager.quantization_modifiers:
125123
return False
126124

127125
qat_start = min([mod.start_epoch for mod in self.manager.quantization_modifiers])

0 commit comments

Comments
 (0)