diff --git a/docs/zh/development.md b/docs/zh/development.md index 0807343ddc..06d88aff34 100644 --- a/docs/zh/development.md +++ b/docs/zh/development.md @@ -116,7 +116,7 @@ model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 9, 50, "tanh") ``` py --8<-- - ppsci/arch/mlp.py:73:138 + ppsci/arch/mlp.py:86:151 --8<-- ``` @@ -124,7 +124,7 @@ model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 9, 50, "tanh") ``` py --8<-- - ppsci/arch/mlp.py:140:167 + ppsci/arch/mlp.py:153:180 --8<-- ``` diff --git a/docs/zh/examples/biharmonic2d.md b/docs/zh/examples/biharmonic2d.md index 67489c3b03..816dbf5bcc 100644 --- a/docs/zh/examples/biharmonic2d.md +++ b/docs/zh/examples/biharmonic2d.md @@ -114,7 +114,7 @@ examples/biharmonic2d/biharmonic2d.py:93:95 ``` py linenums="97" --8<-- -examples/biharmonic2d/biharmonic2d.py:97:108 +examples/biharmonic2d/biharmonic2d.py:97:106 --8<-- ``` @@ -122,9 +122,9 @@ examples/biharmonic2d/biharmonic2d.py:97:108 以作用在背板内部点的 `InteriorConstraint` 为例,代码如下: -``` py linenums="207" +``` py linenums="205" --8<-- -examples/biharmonic2d/biharmonic2d.py:207:216 +examples/biharmonic2d/biharmonic2d.py:205:214 --8<-- ``` @@ -158,19 +158,19 @@ examples/biharmonic2d/conf/biharmonic2d.yaml:60:62 #### 3.4.2 边界约束 -如 [2 问题定义](#2) 中所述,$x=0$ 处的挠度 $w$ 为 0,有如下边界条件,其他 7 个边界条件也与之类似: +如 [2. 问题定义](#2) 中所述,$x=0$ 处的挠度 $w$ 为 0,有如下边界条件,其他 7 个边界条件也与之类似: -``` py linenums="111" +``` py linenums="108" --8<-- -examples/biharmonic2d/biharmonic2d.py:111:120 +examples/biharmonic2d/biharmonic2d.py:108:118 --8<-- ``` 在方程约束、边界约束构建完毕之后,以刚才的命名为关键字,封装到一个字典中,方便后续访问。 -``` py linenums="217" +``` py linenums="215" --8<-- -examples/biharmonic2d/biharmonic2d.py:217:228 +examples/biharmonic2d/biharmonic2d.py:215:226 --8<-- ``` @@ -198,9 +198,9 @@ examples/biharmonic2d/conf/biharmonic2d.yaml:46:56 完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练,注意两个优化过程需要分别构建 `Solver`。 -``` py linenums="230" +``` py linenums="228" --8<-- -examples/biharmonic2d/biharmonic2d.py:230:269 +examples/biharmonic2d/biharmonic2d.py:228:267 --8<-- ``` @@ -208,9 +208,9 @@ examples/biharmonic2d/biharmonic2d.py:230:269 训练完成后,可以在 `eval` 模式中对训练好的模型进行评估和可视化。由于案例的特殊性,不需构建评估器和可视化器,而是使用自定义代码。 -``` py linenums="272" +``` py linenums="270" --8<-- -examples/biharmonic2d/biharmonic2d.py:272:352 +examples/biharmonic2d/biharmonic2d.py:270:350 --8<-- ``` diff --git a/docs/zh/tutorials.md b/docs/zh/tutorials.md new file mode 100644 index 0000000000..0ef12e7263 --- /dev/null +++ b/docs/zh/tutorials.md @@ -0,0 +1,6 @@ +# 学习资料 + +## 教程课件 + +- [深度学习技术与科学计算](https://aistudio.baidu.com/course/introduce/29929?sharedType=1&sharedUserId=438690&ts=1705731573142) +- [飞桨AI for Science流体力学公开课第一期](https://aistudio.baidu.com/course/introduce/27926?sharedType=1&sharedUserId=438690&ts=1705892946215) diff --git a/examples/biharmonic2d/biharmonic2d.py b/examples/biharmonic2d/biharmonic2d.py index 3cccfd6731..ad4f08256d 100644 --- a/examples/biharmonic2d/biharmonic2d.py +++ b/examples/biharmonic2d/biharmonic2d.py @@ -103,8 +103,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 0, - "auto_collation": False, } # set constraint diff --git a/examples/biharmonic2d/conf/biharmonic2d.yaml b/examples/biharmonic2d/conf/biharmonic2d.yaml index 4df5f54d61..67bd20f926 100644 --- a/examples/biharmonic2d/conf/biharmonic2d.yaml +++ b/examples/biharmonic2d/conf/biharmonic2d.yaml @@ -23,7 +23,7 @@ hydra: mode: train # running mode: train/eval seed: 2023 output_dir: ${hydra:run.dir} -log_freq: 200 +log_freq: 20 # set working condition E: 201880.0e+6 # Pa = N/m2 diff --git a/examples/bubble/bubble.py b/examples/bubble/bubble.py index ef5e531081..a0a71b2987 100644 --- a/examples/bubble/bubble.py +++ b/examples/bubble/bubble.py @@ -209,7 +209,7 @@ def transform_out(in_, out): NPOINT_PDE * NTIME_PDE, evenly=True ) - pred_norm = solver.predict(visu_mat, no_grad=False, return_numpy=True) + pred_norm = solver.predict(visu_mat, None, 4096, no_grad=False, return_numpy=True) # inverse normalization p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T @@ -362,7 +362,9 @@ def transform_out(in_, out): NPOINT_PDE * NTIME_PDE, evenly=True ) - pred_norm = solver.predict(visu_mat, None, 4096, no_grad=False, return_numpy=True) + pred_norm = solver.predict( + visu_mat, None, 4096 * 2, no_grad=False, return_numpy=True + ) # inverse normalization p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T diff --git a/mkdocs.yml b/mkdocs.yml index 0d274018db..343c893355 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -34,6 +34,7 @@ nav: - 功能介绍: zh/overview.md - 安装使用: zh/install_setup.md - 快速开始: zh/quickstart.md + - 学习资料: zh/tutorials.md - 经典案例: - " ": - 数学(AI for Math): diff --git a/ppsci/arch/activation.py b/ppsci/arch/activation.py index 73ac202fce..c78ae238ff 100644 --- a/ppsci/arch/activation.py +++ b/ppsci/arch/activation.py @@ -75,6 +75,12 @@ def forward(self, x): class Silu(nn.Layer): + """ + FIXME: This activation function is a workaround for the potential occurrence of NaNs + during the computation of the native SiLU function via using x*sigmoid(x) instead of + silu(x) + """ + def __init__(self): super().__init__() diff --git a/ppsci/arch/afno.py b/ppsci/arch/afno.py index 6f820bd4d8..ec113c03e7 100644 --- a/ppsci/arch/afno.py +++ b/ppsci/arch/afno.py @@ -529,9 +529,8 @@ def forward_tensor(self, x): return x - def split_to_dict( - self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] - ): + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): return {key: data_tensors[i] for i, key in enumerate(keys)} def forward(self, x): @@ -653,9 +652,8 @@ def forward_tensor(self, x): x = self.act(x) return x - def split_to_dict( - self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] - ): + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): return {key: data_tensors[i] for i, key in enumerate(keys)} def forward(self, x): diff --git a/ppsci/arch/base.py b/ppsci/arch/base.py index e5569db37e..a3f4831c09 100644 --- a/ppsci/arch/base.py +++ b/ppsci/arch/base.py @@ -60,8 +60,9 @@ def num_params(self) -> int: logger.warning(f"{name} has no attribute 'shape'") return num + @staticmethod def concat_to_tensor( - self, data_dict: Dict[str, paddle.Tensor], keys: Tuple[str, ...], axis=-1 + data_dict: Dict[str, paddle.Tensor], keys: Tuple[str, ...], axis=-1 ) -> Tuple[paddle.Tensor, ...]: """Concatenate tensors from dict in the order of given keys. @@ -95,8 +96,9 @@ def concat_to_tensor( data = [data_dict[key] for key in keys] return paddle.concat(data, axis) + @staticmethod def split_to_dict( - self, data_tensor: paddle.Tensor, keys: Tuple[str, ...], axis=-1 + data_tensor: paddle.Tensor, keys: Tuple[str, ...], axis=-1 ) -> Dict[str, paddle.Tensor]: """Split tensor and wrap into a dict by given keys. diff --git a/ppsci/arch/embedding_koopman.py b/ppsci/arch/embedding_koopman.py index a4c852da3b..36a37cc0d8 100644 --- a/ppsci/arch/embedding_koopman.py +++ b/ppsci/arch/embedding_koopman.py @@ -177,9 +177,8 @@ def forward_tensor(self, x): return (pred_data[:, :-1, :], recover_data, k_matrix) - def split_to_dict( - self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] - ): + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): return {key: data_tensors[i] for i, key in enumerate(keys)} def forward(self, x): @@ -482,9 +481,8 @@ def forward_tensor(self, states, visc): return (pred_data[:, :-1], recover_data, k_matrix) - def split_to_dict( - self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] - ): + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): return {key: data_tensors[i] for i, key in enumerate(keys)} def forward(self, x): diff --git a/ppsci/arch/epnn.py b/ppsci/arch/epnn.py index f7ca76c6db..67f901dc24 100644 --- a/ppsci/arch/epnn.py +++ b/ppsci/arch/epnn.py @@ -66,10 +66,13 @@ class Epnn(base.Arch): Examples: >>> import ppsci >>> ann_node_sizes_state = [1] - >>> model = ppsci.arch.Epnn(("x",), ("y",), node_sizes=node_sizes_state, - activations=("leaky_relu"), - drop_p=0.0 - ) + >>> model = ppsci.arch.Epnn( + ... ("x",), + ... ("y",), + ... node_sizes=ann_node_sizes_state, + ... activations=("leaky_relu"), + ... drop_p=0.0, + ... ) """ def __init__( diff --git a/ppsci/arch/gan.py b/ppsci/arch/gan.py index e78a291fe4..3daf311e64 100644 --- a/ppsci/arch/gan.py +++ b/ppsci/arch/gan.py @@ -352,8 +352,9 @@ def forward(self, x): return y + @staticmethod def split_to_dict( - self, data_list: List[paddle.Tensor], keys: Tuple[str, ...] + data_list: List[paddle.Tensor], keys: Tuple[str, ...] ) -> Dict[str, paddle.Tensor]: """Overwrite of split_to_dict() method belongs to Class base.Arch. diff --git a/ppsci/arch/nowcastnet.py b/ppsci/arch/nowcastnet.py index 4fc3d6a613..ed59e2f179 100644 --- a/ppsci/arch/nowcastnet.py +++ b/ppsci/arch/nowcastnet.py @@ -62,9 +62,8 @@ def __init__( sample_tensor = paddle.zeros(shape=[1, 1, self.image_height, self.image_width]) self.grid = make_grid(sample_tensor) - def split_to_dict( - self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] - ): + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): return {key: data_tensors[i] for i, key in enumerate(keys)} def forward(self, x): diff --git a/ppsci/arch/physx_transformer.py b/ppsci/arch/physx_transformer.py index 014cc0e5f3..9e734df773 100644 --- a/ppsci/arch/physx_transformer.py +++ b/ppsci/arch/physx_transformer.py @@ -367,7 +367,8 @@ def forward_eval(self, x): outputs = self.generate(input_embeds) return (outputs[:, 1:],) - def split_to_dict(self, data_tensors, keys): + @staticmethod + def split_to_dict(data_tensors, keys): return {key: data_tensors[i] for i, key in enumerate(keys)} def forward(self, x): diff --git a/ppsci/data/__init__.py b/ppsci/data/__init__.py index 40dc96bee5..3d8aab7ef3 100644 --- a/ppsci/data/__init__.py +++ b/ppsci/data/__init__.py @@ -128,7 +128,10 @@ def build_dataloader(_dataset, cfg): collate_fn=collate_fn, ) else: - if cfg.get("auto_collation", True) is False: + if ( + cfg.get("auto_collation", not getattr(_dataset, "batch_index", False)) + is False + ): # 1. wrap batch_sampler again into BatchSampler for disabling auto collation, # which can speed up the process of batch samples indexing from dataset. See # details at: https://discuss.pytorch.org/t/efficiency-of-dataloader-and-collate-for-large-array-like-datasets/59569/8 @@ -140,10 +143,14 @@ def build_dataloader(_dataset, cfg): ) # 2. disable auto collation by given identity collate_fn which return the first # (also the only) batch data in batch list, or there will be a redundant - # axis at the first dimension returned by dataloader. It is step is necessary + # axis at the first dimension returned by dataloader. This step is necessary # because paddle do not support 'sampler' as instantiation argument of 'io.DataLoader' collate_fn = lambda batch: batch[0] # noqa: E731 - logger.info("Auto collation is disabled to speed up batch sampling") + _DEFAULT_NUM_WORKERS = 0 + logger.info( + "Auto collation is disabled and set num_workers to " + f"{_DEFAULT_NUM_WORKERS} to speed up batch sampling." + ) dataloader_ = io.DataLoader( dataset=_dataset, diff --git a/ppsci/data/dataset/airfoil_dataset.py b/ppsci/data/dataset/airfoil_dataset.py index fa36d89825..2a249104f7 100644 --- a/ppsci/data/dataset/airfoil_dataset.py +++ b/ppsci/data/dataset/airfoil_dataset.py @@ -109,6 +109,9 @@ class MeshAirfoilDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + use_pgl: bool = True def __init__( diff --git a/ppsci/data/dataset/array_dataset.py b/ppsci/data/dataset/array_dataset.py index 8a536a1094..2acba441bc 100644 --- a/ppsci/data/dataset/array_dataset.py +++ b/ppsci/data/dataset/array_dataset.py @@ -41,6 +41,9 @@ class NamedArrayDataset(io.Dataset): >>> dataset = ppsci.data.dataset.NamedArrayDataset(input, output, weight) """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + def __init__( self, input: Dict[str, np.ndarray], @@ -91,6 +94,9 @@ class IterableNamedArrayDataset(io.IterableDataset): >>> dataset = ppsci.data.dataset.IterableNamedArrayDataset(input, label, weight) """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, input: Dict[str, np.ndarray], diff --git a/ppsci/data/dataset/csv_dataset.py b/ppsci/data/dataset/csv_dataset.py index 0fbed42b17..c14bb107da 100644 --- a/ppsci/data/dataset/csv_dataset.py +++ b/ppsci/data/dataset/csv_dataset.py @@ -54,6 +54,9 @@ class CSVDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + def __init__( self, file_path: str, @@ -176,6 +179,9 @@ class IterableCSVDataset(io.IterableDataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, diff --git a/ppsci/data/dataset/cylinder_dataset.py b/ppsci/data/dataset/cylinder_dataset.py index e18fd50bb5..3a49b7d436 100644 --- a/ppsci/data/dataset/cylinder_dataset.py +++ b/ppsci/data/dataset/cylinder_dataset.py @@ -55,6 +55,9 @@ class MeshCylinderDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + use_pgl: bool = True def __init__( diff --git a/ppsci/data/dataset/era5_dataset.py b/ppsci/data/dataset/era5_dataset.py index d02f347969..e9fa86d243 100644 --- a/ppsci/data/dataset/era5_dataset.py +++ b/ppsci/data/dataset/era5_dataset.py @@ -51,6 +51,9 @@ class ERA5Dataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, diff --git a/ppsci/data/dataset/mat_dataset.py b/ppsci/data/dataset/mat_dataset.py index 2cc42d4650..609e35aeaa 100644 --- a/ppsci/data/dataset/mat_dataset.py +++ b/ppsci/data/dataset/mat_dataset.py @@ -54,6 +54,9 @@ class MatDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + def __init__( self, file_path: str, @@ -176,6 +179,9 @@ class IterableMatDataset(io.IterableDataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, diff --git a/ppsci/data/dataset/npz_dataset.py b/ppsci/data/dataset/npz_dataset.py index 28fcee218d..76d737d021 100644 --- a/ppsci/data/dataset/npz_dataset.py +++ b/ppsci/data/dataset/npz_dataset.py @@ -54,6 +54,9 @@ class NPZDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + def __init__( self, file_path: str, @@ -172,6 +175,9 @@ class IterableNPZDataset(io.IterableDataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, diff --git a/ppsci/data/dataset/radar_dataset.py b/ppsci/data/dataset/radar_dataset.py index 89d9ebdc47..e9d87b3515 100644 --- a/ppsci/data/dataset/radar_dataset.py +++ b/ppsci/data/dataset/radar_dataset.py @@ -57,6 +57,9 @@ class RadarDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, input_keys: Tuple[str, ...], @@ -105,7 +108,7 @@ def __init__( ) self.case_list.append(case) - def load(self, index): + def _load(self, index): data = [] for img_path in self.case_list[index]: img = cv2.imread(img_path, 2) @@ -115,7 +118,7 @@ def load(self, index): return data def __getitem__(self, index): - data = self.load(index)[-self.length :].copy() + data = self._load(index)[-self.length :].copy() mask = np.ones_like(data) mask[data < 0] = 0 data[data < 0] = 0 diff --git a/ppsci/data/dataset/trphysx_dataset.py b/ppsci/data/dataset/trphysx_dataset.py index ffc9c143c3..51c078b1c8 100644 --- a/ppsci/data/dataset/trphysx_dataset.py +++ b/ppsci/data/dataset/trphysx_dataset.py @@ -55,6 +55,9 @@ class LorenzDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, @@ -112,21 +115,21 @@ def read_data(self, file_path: str, block_size: int, stride: int): def __len__(self): return len(self.data) - def __getitem__(self, i): + def __getitem__(self, idx): # when embedding data is None if self.embedding_data is None: - data_item = self.data[i] + data_item = self.data[idx] input_item = {self.input_keys[0]: data_item} label_item = { self.label_keys[0]: data_item[1:, :], self.label_keys[1]: data_item, } else: - data_item = self.embedding_data[i] + data_item = self.embedding_data[idx] input_item = {self.input_keys[0]: data_item[:-1, :]} label_item = {self.label_keys[0]: data_item[1:, :]} if len(self.label_keys) == 2: - label_item[self.label_keys[1]] = self.data[i][1:, :] + label_item[self.label_keys[1]] = self.data[idx][1:, :] weight_shape = [1] * len(data_item.shape) weight_item = { @@ -160,6 +163,9 @@ class RosslerDataset(LorenzDataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, @@ -214,6 +220,9 @@ class CylinderDataset(io.Dataset): ... ) # doctest: +SKIP """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + def __init__( self, file_path: str, diff --git a/ppsci/data/dataset/vtu_dataset.py b/ppsci/data/dataset/vtu_dataset.py index 4a454a1e94..f238a45c7f 100644 --- a/ppsci/data/dataset/vtu_dataset.py +++ b/ppsci/data/dataset/vtu_dataset.py @@ -39,6 +39,9 @@ class VtuDataset(io.Dataset): transform(s). """ + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + def __init__( self, file_path: str, diff --git a/ppsci/solver/train.py b/ppsci/solver/train.py index 909b617615..5405f53d72 100644 --- a/ppsci/solver/train.py +++ b/ppsci/solver/train.py @@ -155,36 +155,38 @@ def train_LBFGS_epoch_func(solver: "solver.Solver", epoch_id: int, log_freq: int loss_dict = misc.Prettydefaultdict(float) loss_dict["loss"] = 0.0 total_batch_size = 0 - reader_cost = 0 - batch_cost = 0 + reader_cost = 0.0 + batch_cost = 0.0 reader_tic = time.perf_counter() input_dicts = [] label_dicts = [] weight_dicts = [] for _, _constraint in solver.constraint.items(): + # fetch data from data loader try: input_dict, label_dict, weight_dict = next(_constraint.data_iter) except StopIteration: _constraint.data_iter = iter(_constraint.data_loader) input_dict, label_dict, weight_dict = next(_constraint.data_iter) reader_cost += time.perf_counter() - reader_tic + for v in input_dict.values(): if hasattr(v, "stop_gradient"): v.stop_gradient = False - # gather all constraint data into list + # gather each constraint's input, label, weight to a list input_dicts.append(input_dict) label_dicts.append(label_dict) weight_dicts.append(weight_dict) total_batch_size += next(iter(input_dict.values())).shape[0] reader_tic = time.perf_counter() - def closure(): + def closure() -> paddle.Tensor: """Forward-backward closure function for LBFGS optimizer. Returns: - Tensor: Computed loss. + paddle.Tensor: Computed loss scalar. """ total_loss = 0 with solver.no_sync_context_manager(solver.world_size > 1, solver.model): @@ -230,6 +232,8 @@ def closure(): if solver.lr_scheduler is not None and not solver.lr_scheduler.by_epoch: solver.lr_scheduler.step() + if solver.benchmark_flag: + paddle.device.synchronize() batch_cost += time.perf_counter() - batch_tic # update and log training information diff --git a/ppsci/utils/misc.py b/ppsci/utils/misc.py index 8247ab2a0f..b5fe97a812 100644 --- a/ppsci/utils/misc.py +++ b/ppsci/utils/misc.py @@ -582,13 +582,16 @@ def plot_curve( plt.plot(np.arange(data_arr.shape[0]) * smooth_step, data_arr) plt.legend( list(data.keys()), - loc="lower left", + loc="upper left", + bbox_to_anchor=(1, 1), ) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.grid() plt.yticks(size=10) plt.xticks(size=10) + plt.tight_layout() - plt.savefig(os.path.join(output_dir, f"{xlabel}-{ylabel}_curve.jpg")) + plt.savefig(os.path.join(output_dir, f"{xlabel}-{ylabel}_curve.jpg"), dpi=200) plt.clf() + plt.close() diff --git a/ppsci/utils/symbolic.py b/ppsci/utils/symbolic.py index f322e0a7d3..ba105ec0a0 100644 --- a/ppsci/utils/symbolic.py +++ b/ppsci/utils/symbolic.py @@ -697,7 +697,7 @@ def lambdify( Args: expr (Union[sp.Basic, List[sp.Basic]]): Sympy expression(s) to be converted. - will return callable functions in list if multiple expressions are given. + Will return callable functions in list if multiple expressions are given. else will return one single callable function. models (Optional[Union[arch.Arch, Tuple[arch.Arch, ...]]]): Model(s) for computing forward result in `LayerNode`.