From cc7a1917191387ccac6f6ec10a13897a8174f56f Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 10:09:04 +0200 Subject: [PATCH 01/12] include train mode count --- src/lightning/pytorch/callbacks/model_summary.py | 5 ++++- .../pytorch/utilities/model_summary/model_summary.py | 8 +++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/lightning/pytorch/callbacks/model_summary.py b/src/lightning/pytorch/callbacks/model_summary.py index ddff91ed2949e..53a082b423d5d 100644 --- a/src/lightning/pytorch/callbacks/model_summary.py +++ b/src/lightning/pytorch/callbacks/model_summary.py @@ -66,9 +66,10 @@ def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") - total_parameters = model_summary.total_parameters trainable_parameters = model_summary.trainable_parameters model_size = model_summary.model_size + training_modes = model_summary.training_modes if trainer.is_global_zero: - self.summarize(summary_data, total_parameters, trainable_parameters, model_size, **self._summarize_kwargs) + self.summarize(summary_data, total_parameters, trainable_parameters, model_size, training_modes, **self._summarize_kwargs) def _summary(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Union[DeepSpeedSummary, Summary]: from lightning.pytorch.strategies.deepspeed import DeepSpeedStrategy @@ -83,12 +84,14 @@ def summarize( total_parameters: int, trainable_parameters: int, model_size: float, + training_modes: List[bool], **summarize_kwargs: Any, ) -> None: summary_table = _format_summary_table( total_parameters, trainable_parameters, model_size, + training_modes, *summary_data, ) log.info("\n" + summary_table) diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index 806724e1c434a..f7a59fc3715c4 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -351,8 +351,9 @@ def __str__(self) -> str: total_parameters = self.total_parameters trainable_parameters = self.trainable_parameters model_size = self.model_size + training_modes = self.training_modes - return _format_summary_table(total_parameters, trainable_parameters, model_size, *arrays) + return _format_summary_table(total_parameters, trainable_parameters, model_size, training_modes, *arrays) def __repr__(self) -> str: return str(self) @@ -372,6 +373,7 @@ def _format_summary_table( total_parameters: int, trainable_parameters: int, model_size: float, + training_modes: List[bool], *cols: Tuple[str, List[str]], ) -> str: """Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one big @@ -408,6 +410,10 @@ def _format_summary_table( summary += "Total params" summary += "\n" + s.format(get_formatted_model_size(model_size), 10) summary += "Total estimated model params size (MB)" + summary += "\n" + s.format(training_modes.count(True), 10) + summary += "Submodules in train mode" + summary += "\n" + s.format(training_modes.count(False), 10) + summary += "Submodules in eval mode" return summary From 40c71bdb506aaf4b74ea1d3bab768ffa0f17ffcc Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 10:14:44 +0200 Subject: [PATCH 02/12] Rich --- src/lightning/pytorch/callbacks/rich_model_summary.py | 3 +++ .../pytorch/utilities/model_summary/model_summary.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/lightning/pytorch/callbacks/rich_model_summary.py b/src/lightning/pytorch/callbacks/rich_model_summary.py index f551a9c397531..50fafd6bdab22 100644 --- a/src/lightning/pytorch/callbacks/rich_model_summary.py +++ b/src/lightning/pytorch/callbacks/rich_model_summary.py @@ -71,6 +71,7 @@ def summarize( total_parameters: int, trainable_parameters: int, model_size: float, + training_modes: List[bool], **summarize_kwargs: Any, ) -> None: from rich import get_console @@ -110,5 +111,7 @@ def summarize( grid.add_row(f"[bold]Non-trainable params[/]: {parameters[1]}") grid.add_row(f"[bold]Total params[/]: {parameters[2]}") grid.add_row(f"[bold]Total estimated model params size (MB)[/]: {parameters[3]}") + grid.add_row(f"[bold]Modules in train mode[/]: {training_modes.count(True)}") + grid.add_row(f"[bold]Modules in eval mode[/]: {training_modes.count(False)}") console.print(grid) diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index f7a59fc3715c4..add0f5dd22224 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -411,9 +411,9 @@ def _format_summary_table( summary += "\n" + s.format(get_formatted_model_size(model_size), 10) summary += "Total estimated model params size (MB)" summary += "\n" + s.format(training_modes.count(True), 10) - summary += "Submodules in train mode" + summary += "Modules in train mode" summary += "\n" + s.format(training_modes.count(False), 10) - summary += "Submodules in eval mode" + summary += "Modules in eval mode" return summary From 0c582404ab09b5af8264a4a9b03eff6a776b9a00 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 10:18:37 +0200 Subject: [PATCH 03/12] Update tests --- tests/tests_pytorch/callbacks/test_model_summary.py | 3 +++ tests/tests_pytorch/callbacks/test_rich_model_summary.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/tests_pytorch/callbacks/test_model_summary.py b/tests/tests_pytorch/callbacks/test_model_summary.py index 0f255367f1a10..19755512df07c 100644 --- a/tests/tests_pytorch/callbacks/test_model_summary.py +++ b/tests/tests_pytorch/callbacks/test_model_summary.py @@ -49,6 +49,7 @@ def summarize( total_parameters: int, trainable_parameters: int, model_size: float, + training_modes, **summarize_kwargs: Any, ) -> None: assert summary_data[1][0] == "Name" @@ -64,6 +65,8 @@ def summarize( assert summary_data[4][0] == "Mode" assert summary_data[4][1][0] == "train" + assert training_modes == [True] + model = BoringModel() trainer = Trainer(default_root_dir=tmp_path, callbacks=CustomModelSummary(), max_steps=1) diff --git a/tests/tests_pytorch/callbacks/test_rich_model_summary.py b/tests/tests_pytorch/callbacks/test_rich_model_summary.py index f8ede0eb0239e..1e7b03ec2d7ed 100644 --- a/tests/tests_pytorch/callbacks/test_rich_model_summary.py +++ b/tests/tests_pytorch/callbacks/test_rich_model_summary.py @@ -56,7 +56,9 @@ def example_input_array(self) -> Any: summary = summarize(model) summary_data = summary._get_summary_data() - model_summary.summarize(summary_data=summary_data, total_parameters=1, trainable_parameters=1, model_size=1) + model_summary.summarize( + summary_data=summary_data, total_parameters=1, trainable_parameters=1, model_size=1, training_modes=[True] + ) # ensure that summary was logged + the breakdown of model parameters assert mock_console.call_count == 2 From 4bf78b845d2ad833bd004aaa2deac5d9e62bfbfe Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 10:31:15 +0200 Subject: [PATCH 04/12] update transfer learning docs --- docs/source-pytorch/advanced/transfer_learning.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source-pytorch/advanced/transfer_learning.rst b/docs/source-pytorch/advanced/transfer_learning.rst index 935bcd4bb8773..7f6af6ad5a56d 100644 --- a/docs/source-pytorch/advanced/transfer_learning.rst +++ b/docs/source-pytorch/advanced/transfer_learning.rst @@ -116,6 +116,7 @@ Here's a model that uses `Huggingface transformers Date: Sun, 4 Aug 2024 10:46:41 +0200 Subject: [PATCH 05/12] totals --- src/lightning/pytorch/callbacks/model_summary.py | 15 +++++++++++---- .../pytorch/callbacks/rich_model_summary.py | 8 ++++---- .../utilities/model_summary/model_summary.py | 12 +++++++++--- 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/lightning/pytorch/callbacks/model_summary.py b/src/lightning/pytorch/callbacks/model_summary.py index 53a082b423d5d..691d58e3c6c8a 100644 --- a/src/lightning/pytorch/callbacks/model_summary.py +++ b/src/lightning/pytorch/callbacks/model_summary.py @@ -66,10 +66,17 @@ def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") - total_parameters = model_summary.total_parameters trainable_parameters = model_summary.trainable_parameters model_size = model_summary.model_size - training_modes = model_summary.training_modes + total_training_modes = model_summary.total_training_modes if trainer.is_global_zero: - self.summarize(summary_data, total_parameters, trainable_parameters, model_size, training_modes, **self._summarize_kwargs) + self.summarize( + summary_data, + total_parameters, + trainable_parameters, + model_size, + total_training_modes, + **self._summarize_kwargs + ) def _summary(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Union[DeepSpeedSummary, Summary]: from lightning.pytorch.strategies.deepspeed import DeepSpeedStrategy @@ -84,14 +91,14 @@ def summarize( total_parameters: int, trainable_parameters: int, model_size: float, - training_modes: List[bool], + total_training_modes: Dict[str, int], **summarize_kwargs: Any, ) -> None: summary_table = _format_summary_table( total_parameters, trainable_parameters, model_size, - training_modes, + total_training_modes, *summary_data, ) log.info("\n" + summary_table) diff --git a/src/lightning/pytorch/callbacks/rich_model_summary.py b/src/lightning/pytorch/callbacks/rich_model_summary.py index 50fafd6bdab22..1345ef71a7274 100644 --- a/src/lightning/pytorch/callbacks/rich_model_summary.py +++ b/src/lightning/pytorch/callbacks/rich_model_summary.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Tuple +from typing import Any, List, Tuple, Dict from typing_extensions import override @@ -71,7 +71,7 @@ def summarize( total_parameters: int, trainable_parameters: int, model_size: float, - training_modes: List[bool], + total_training_modes: Dict[str, int], **summarize_kwargs: Any, ) -> None: from rich import get_console @@ -111,7 +111,7 @@ def summarize( grid.add_row(f"[bold]Non-trainable params[/]: {parameters[1]}") grid.add_row(f"[bold]Total params[/]: {parameters[2]}") grid.add_row(f"[bold]Total estimated model params size (MB)[/]: {parameters[3]}") - grid.add_row(f"[bold]Modules in train mode[/]: {training_modes.count(True)}") - grid.add_row(f"[bold]Modules in eval mode[/]: {training_modes.count(False)}") + grid.add_row(f"[bold]Modules in train mode[/]: {total_training_modes['train']}") + grid.add_row(f"[bold]Modules in eval mode[/]: {total_training_modes['eval']}") console.print(grid) diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index add0f5dd22224..b5274161aa7c4 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -252,6 +252,12 @@ def param_nums(self) -> List[int]: def training_modes(self) -> List[bool]: return [layer.training for layer in self._layer_summary.values()] + @property + def total_training_modes(self) -> Dict[str, int]: + modes = [layer.training for layer in self._model.modules()] + modes = modes[1:] # exclude the root module + return {"train": modes.count(True), "eval": modes.count(False)} + @property def total_parameters(self) -> int: return sum(p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters()) @@ -373,7 +379,7 @@ def _format_summary_table( total_parameters: int, trainable_parameters: int, model_size: float, - training_modes: List[bool], + total_training_modes: Dict[str, int], *cols: Tuple[str, List[str]], ) -> str: """Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one big @@ -410,9 +416,9 @@ def _format_summary_table( summary += "Total params" summary += "\n" + s.format(get_formatted_model_size(model_size), 10) summary += "Total estimated model params size (MB)" - summary += "\n" + s.format(training_modes.count(True), 10) + summary += "\n" + s.format(total_training_modes["train"], 10) summary += "Modules in train mode" - summary += "\n" + s.format(training_modes.count(False), 10) + summary += "\n" + s.format(total_training_modes["eval"], 10) summary += "Modules in eval mode" return summary From 649a4c1bd876f8d4d70fe40eea60528ceef8875d Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 10:50:55 +0200 Subject: [PATCH 06/12] update doctest --- .../pytorch/utilities/model_summary/model_summary.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index b5274161aa7c4..e9d161550eb89 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -187,6 +187,8 @@ class ModelSummary: 0 Non-trainable params 132 K Total params 0.530 Total estimated model params size (MB) + 1 Modules in train mode + 0 Modules in eval mode >>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE | Name | Type | Params | Mode | In sizes | Out sizes ---------------------------------------------------------------------- @@ -198,7 +200,8 @@ class ModelSummary: 0 Non-trainable params 132 K Total params 0.530 Total estimated model params size (MB) - + 3 Modules in train mode + 0 Modules in eval mode """ def __init__(self, model: "pl.LightningModule", max_depth: int = 1) -> None: From ade15abb1cb69757b2f53caae0603faa4798c111 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 11:04:53 +0200 Subject: [PATCH 07/12] update tests --- .../callbacks/test_model_summary.py | 4 +-- .../callbacks/test_rich_model_summary.py | 6 ++++- .../utilities/test_model_summary.py | 26 ++++++++++++++++++- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_model_summary.py b/tests/tests_pytorch/callbacks/test_model_summary.py index 19755512df07c..b42907dc9a38d 100644 --- a/tests/tests_pytorch/callbacks/test_model_summary.py +++ b/tests/tests_pytorch/callbacks/test_model_summary.py @@ -49,7 +49,7 @@ def summarize( total_parameters: int, trainable_parameters: int, model_size: float, - training_modes, + total_training_modes, **summarize_kwargs: Any, ) -> None: assert summary_data[1][0] == "Name" @@ -65,7 +65,7 @@ def summarize( assert summary_data[4][0] == "Mode" assert summary_data[4][1][0] == "train" - assert training_modes == [True] + assert total_training_modes == {"train": 1, "eval": 0} model = BoringModel() trainer = Trainer(default_root_dir=tmp_path, callbacks=CustomModelSummary(), max_steps=1) diff --git a/tests/tests_pytorch/callbacks/test_rich_model_summary.py b/tests/tests_pytorch/callbacks/test_rich_model_summary.py index 1e7b03ec2d7ed..73709fd80a833 100644 --- a/tests/tests_pytorch/callbacks/test_rich_model_summary.py +++ b/tests/tests_pytorch/callbacks/test_rich_model_summary.py @@ -57,7 +57,11 @@ def example_input_array(self) -> Any: summary_data = summary._get_summary_data() model_summary.summarize( - summary_data=summary_data, total_parameters=1, trainable_parameters=1, model_size=1, training_modes=[True] + summary_data=summary_data, + total_parameters=1, + trainable_parameters=1, + model_size=1, + total_training_modes=summary.total_training_modes, ) # ensure that summary was logged + the breakdown of model parameters diff --git a/tests/tests_pytorch/utilities/test_model_summary.py b/tests/tests_pytorch/utilities/test_model_summary.py index a50ec425fc894..c7218a9f0603f 100644 --- a/tests/tests_pytorch/utilities/test_model_summary.py +++ b/tests/tests_pytorch/utilities/test_model_summary.py @@ -423,6 +423,27 @@ def forward(self, x): assert not model.layer2.training +def test_total_training_modes(): + """Test that the `total_training_modes` counts the modules in 'train' and 'eval' mode, excluding the root module.""" + class ModelWithoutChildren(LightningModule): + pass + + summary = ModelSummary(ModelWithoutChildren()) + assert summary.total_training_modes == {"train": 0, "eval": 0} + + model = DeepNestedModel() + summary = ModelSummary(model) + assert summary.total_training_modes == {"train": 19, "eval": 0} + assert sum(summary.total_training_modes.values()) == len(list(model.modules())) - 1 + + model = DeepNestedModel() + summary = ModelSummary(model) + model.branch1[1][0].eval() + model.branch2.eval() + assert summary.total_training_modes == {"train": 17, "eval": 2} + assert sum(summary.total_training_modes.values()) == len(list(model.modules())) - 1 + + def test_summary_training_mode(): """Test that the model summary captures the training mode on all submodules.""" model = DeepNestedModel() @@ -436,6 +457,7 @@ def test_summary_training_mode(): "eval", # branch2 "train", # head ] + assert summary.total_training_modes == {"train": 17, "eval": 2} summary = summarize(model, max_depth=-1) expected_eval = {"branch1.1.0", "branch2"} @@ -445,5 +467,7 @@ def test_summary_training_mode(): # A model with params not belonging to a layer model = NonLayerParamsModel() model.layer.eval() - summary_data = OrderedDict(summarize(model)._get_summary_data()) + summary = summarize(model) + summary_data = OrderedDict(summary._get_summary_data()) assert summary_data["Mode"] == ["eval", "n/a"] + assert summary.total_training_modes == {"train": 0, "eval": 1} From 3cb873223e764b6920b556433da1ad3e91e0a92f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 4 Aug 2024 09:05:21 +0000 Subject: [PATCH 08/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/lightning/pytorch/callbacks/model_summary.py | 2 +- src/lightning/pytorch/callbacks/rich_model_summary.py | 2 +- .../pytorch/utilities/model_summary/model_summary.py | 1 + tests/tests_pytorch/utilities/test_model_summary.py | 4 +++- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/lightning/pytorch/callbacks/model_summary.py b/src/lightning/pytorch/callbacks/model_summary.py index 691d58e3c6c8a..89c31b2cc65e8 100644 --- a/src/lightning/pytorch/callbacks/model_summary.py +++ b/src/lightning/pytorch/callbacks/model_summary.py @@ -75,7 +75,7 @@ def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") - trainable_parameters, model_size, total_training_modes, - **self._summarize_kwargs + **self._summarize_kwargs, ) def _summary(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Union[DeepSpeedSummary, Summary]: diff --git a/src/lightning/pytorch/callbacks/rich_model_summary.py b/src/lightning/pytorch/callbacks/rich_model_summary.py index 1345ef71a7274..c6c429b4bd2f5 100644 --- a/src/lightning/pytorch/callbacks/rich_model_summary.py +++ b/src/lightning/pytorch/callbacks/rich_model_summary.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Tuple, Dict +from typing import Any, Dict, List, Tuple from typing_extensions import override diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index e9d161550eb89..00c9fe8f524b7 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -202,6 +202,7 @@ class ModelSummary: 0.530 Total estimated model params size (MB) 3 Modules in train mode 0 Modules in eval mode + """ def __init__(self, model: "pl.LightningModule", max_depth: int = 1) -> None: diff --git a/tests/tests_pytorch/utilities/test_model_summary.py b/tests/tests_pytorch/utilities/test_model_summary.py index c7218a9f0603f..00fdf77d4cdfd 100644 --- a/tests/tests_pytorch/utilities/test_model_summary.py +++ b/tests/tests_pytorch/utilities/test_model_summary.py @@ -424,7 +424,9 @@ def forward(self, x): def test_total_training_modes(): - """Test that the `total_training_modes` counts the modules in 'train' and 'eval' mode, excluding the root module.""" + """Test that the `total_training_modes` counts the modules in 'train' and 'eval' mode, excluding the root + module.""" + class ModelWithoutChildren(LightningModule): pass From 0389b8aa40b17bf0151bd9c927758976fbd1a6bb Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 11:12:19 +0200 Subject: [PATCH 09/12] update --- .../pytorch/utilities/model_summary/model_summary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index 00c9fe8f524b7..d6724dc0721b7 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -361,9 +361,9 @@ def __str__(self) -> str: total_parameters = self.total_parameters trainable_parameters = self.trainable_parameters model_size = self.model_size - training_modes = self.training_modes + total_training_modes = self.total_training_modes - return _format_summary_table(total_parameters, trainable_parameters, model_size, training_modes, *arrays) + return _format_summary_table(total_parameters, trainable_parameters, model_size, total_training_modes, *arrays) def __repr__(self) -> str: return str(self) From 4aa28e238d41fc636e3900dc634f6063f707343f Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 11:23:13 +0200 Subject: [PATCH 10/12] fix doctest --- src/lightning/pytorch/utilities/model_summary/model_summary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lightning/pytorch/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py index d6724dc0721b7..0f48bee191c7b 100644 --- a/src/lightning/pytorch/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -187,7 +187,7 @@ class ModelSummary: 0 Non-trainable params 132 K Total params 0.530 Total estimated model params size (MB) - 1 Modules in train mode + 3 Modules in train mode 0 Modules in eval mode >>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE | Name | Type | Params | Mode | In sizes | Out sizes From c29f5b61d03707013715fcb3ad3dcc467aeaa8ff Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 11:38:35 +0200 Subject: [PATCH 11/12] chlog --- src/lightning/pytorch/CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lightning/pytorch/CHANGELOG.md b/src/lightning/pytorch/CHANGELOG.md index 3091e1ea9b297..4d8eebf134b75 100644 --- a/src/lightning/pytorch/CHANGELOG.md +++ b/src/lightning/pytorch/CHANGELOG.md @@ -17,6 +17,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - The `TQDMProgressBar` now provides an option to retain prior training epoch bars ([#19578](https://github.com/Lightning-AI/pytorch-lightning/pull/19578)) +- Added the count of modules in train and eval mode to the printed `ModelSummary` table ([#20159](https://github.com/Lightning-AI/pytorch-lightning/pull/20159)) + ### Changed - Triggering KeyboardInterrupt (Ctrl+C) during `.fit()`, `.evaluate()`, `.test()` or `.predict()` now terminates all processes launched by the Trainer and exits the program ([#19976](https://github.com/Lightning-AI/pytorch-lightning/pull/19976)) From 67bf3dc5b125b67318ae1ba7ac4d16add1df41aa Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 4 Aug 2024 12:48:01 +0200 Subject: [PATCH 12/12] flaky test --- tests/tests_pytorch/callbacks/test_early_stopping.py | 2 +- tests/tests_pytorch/core/test_datamodules.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_early_stopping.py b/tests/tests_pytorch/callbacks/test_early_stopping.py index c065cdcdbf022..633c1dc0853e0 100644 --- a/tests/tests_pytorch/callbacks/test_early_stopping.py +++ b/tests/tests_pytorch/callbacks/test_early_stopping.py @@ -58,7 +58,7 @@ def on_train_epoch_end(self, trainer, pl_module): self.saved_states.append(self.state_dict().copy()) -@RunIf(sklearn=True) +@RunIf(sklearn=True, skip_windows=True) # Flaky test on Windows for unknown reasons @mock.patch.dict(os.environ, os.environ.copy(), clear=True) def test_resume_early_stopping_from_checkpoint(tmp_path): """Prevent regressions to bugs: diff --git a/tests/tests_pytorch/core/test_datamodules.py b/tests/tests_pytorch/core/test_datamodules.py index 14692b359dad3..65fccb691a33d 100644 --- a/tests/tests_pytorch/core/test_datamodules.py +++ b/tests/tests_pytorch/core/test_datamodules.py @@ -218,7 +218,7 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: assert dm.my_state_dict == {"my": "state_dict"} -@RunIf(sklearn=True) +@RunIf(sklearn=True, skip_windows=True) # Flaky test on Windows for unknown reasons def test_full_loop(tmp_path): seed_everything(7)