Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/zh/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,15 @@ model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 9, 50, "tanh")

``` py
--8<--
ppsci/arch/mlp.py:73:138
ppsci/arch/mlp.py:86:151
--8<--
```

=== "MLP.forward"

``` py
--8<--
ppsci/arch/mlp.py:140:167
ppsci/arch/mlp.py:153:180
--8<--
```

Expand Down
24 changes: 12 additions & 12 deletions docs/zh/examples/biharmonic2d.md
Original file line number Diff line number Diff line change
Expand Up @@ -114,17 +114,17 @@ examples/biharmonic2d/biharmonic2d.py:93:95

``` py linenums="97"
--8<--
examples/biharmonic2d/biharmonic2d.py:97:108
examples/biharmonic2d/biharmonic2d.py:97:106
--8<--
```

#### 3.4.1 内部约束

以作用在背板内部点的 `InteriorConstraint` 为例,代码如下:

``` py linenums="207"
``` py linenums="205"
--8<--
examples/biharmonic2d/biharmonic2d.py:207:216
examples/biharmonic2d/biharmonic2d.py:205:214
--8<--
```

Expand Down Expand Up @@ -158,19 +158,19 @@ examples/biharmonic2d/conf/biharmonic2d.yaml:60:62

#### 3.4.2 边界约束

如 [2 问题定义](#2) 中所述,$x=0$ 处的挠度 $w$ 为 0,有如下边界条件,其他 7 个边界条件也与之类似:
如 [2. 问题定义](#2) 中所述,$x=0$ 处的挠度 $w$ 为 0,有如下边界条件,其他 7 个边界条件也与之类似:

``` py linenums="111"
``` py linenums="108"
--8<--
examples/biharmonic2d/biharmonic2d.py:111:120
examples/biharmonic2d/biharmonic2d.py:108:118
--8<--
```

在方程约束、边界约束构建完毕之后,以刚才的命名为关键字,封装到一个字典中,方便后续访问。

``` py linenums="217"
``` py linenums="215"
--8<--
examples/biharmonic2d/biharmonic2d.py:217:228
examples/biharmonic2d/biharmonic2d.py:215:226
--8<--
```

Expand Down Expand Up @@ -198,19 +198,19 @@ examples/biharmonic2d/conf/biharmonic2d.yaml:46:56

完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练,注意两个优化过程需要分别构建 `Solver`。

``` py linenums="230"
``` py linenums="228"
--8<--
examples/biharmonic2d/biharmonic2d.py:230:269
examples/biharmonic2d/biharmonic2d.py:228:267
--8<--
```

### 3.8 模型评估和可视化

训练完成后,可以在 `eval` 模式中对训练好的模型进行评估和可视化。由于案例的特殊性,不需构建评估器和可视化器,而是使用自定义代码。

``` py linenums="272"
``` py linenums="270"
--8<--
examples/biharmonic2d/biharmonic2d.py:272:352
examples/biharmonic2d/biharmonic2d.py:270:350
--8<--
```

Expand Down
6 changes: 6 additions & 0 deletions docs/zh/tutorials.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# 学习资料

## 教程课件

- [深度学习技术与科学计算](https://aistudio.baidu.com/course/introduce/29929?sharedType=1&sharedUserId=438690&ts=1705731573142)
- [飞桨AI for Science流体力学公开课第一期](https://aistudio.baidu.com/course/introduce/27926?sharedType=1&sharedUserId=438690&ts=1705892946215)
2 changes: 0 additions & 2 deletions examples/biharmonic2d/biharmonic2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,6 @@ def train(cfg: DictConfig):
"drop_last": True,
"shuffle": True,
},
"num_workers": 0,
"auto_collation": False,
}

# set constraint
Expand Down
2 changes: 1 addition & 1 deletion examples/biharmonic2d/conf/biharmonic2d.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ hydra:
mode: train # running mode: train/eval
seed: 2023
output_dir: ${hydra:run.dir}
log_freq: 200
log_freq: 20

# set working condition
E: 201880.0e+6 # Pa = N/m2
Expand Down
6 changes: 4 additions & 2 deletions examples/bubble/bubble.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def transform_out(in_, out):
NPOINT_PDE * NTIME_PDE, evenly=True
)

pred_norm = solver.predict(visu_mat, no_grad=False, return_numpy=True)
pred_norm = solver.predict(visu_mat, None, 4096, no_grad=False, return_numpy=True)
# inverse normalization
p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T
u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T
Expand Down Expand Up @@ -362,7 +362,9 @@ def transform_out(in_, out):
NPOINT_PDE * NTIME_PDE, evenly=True
)

pred_norm = solver.predict(visu_mat, None, 4096, no_grad=False, return_numpy=True)
pred_norm = solver.predict(
visu_mat, None, 4096 * 2, no_grad=False, return_numpy=True
)
# inverse normalization
p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T
u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T
Expand Down
1 change: 1 addition & 0 deletions mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ nav:
- 功能介绍: zh/overview.md
- 安装使用: zh/install_setup.md
- 快速开始: zh/quickstart.md
- 学习资料: zh/tutorials.md
- 经典案例:
- " ":
- 数学(AI for Math):
Expand Down
6 changes: 6 additions & 0 deletions ppsci/arch/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,12 @@ def forward(self, x):


class Silu(nn.Layer):
"""
FIXME: This activation function is a workaround for the potential occurrence of NaNs
during the computation of the native SiLU function via using x*sigmoid(x) instead of
silu(x)
"""

def __init__(self):
super().__init__()

Expand Down
10 changes: 4 additions & 6 deletions ppsci/arch/afno.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,9 +529,8 @@ def forward_tensor(self, x):

return x

def split_to_dict(
self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]
):
@staticmethod
def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]):
return {key: data_tensors[i] for i, key in enumerate(keys)}

def forward(self, x):
Expand Down Expand Up @@ -653,9 +652,8 @@ def forward_tensor(self, x):
x = self.act(x)
return x

def split_to_dict(
self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]
):
@staticmethod
def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]):
return {key: data_tensors[i] for i, key in enumerate(keys)}

def forward(self, x):
Expand Down
6 changes: 4 additions & 2 deletions ppsci/arch/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,9 @@ def num_params(self) -> int:
logger.warning(f"{name} has no attribute 'shape'")
return num

@staticmethod
def concat_to_tensor(
self, data_dict: Dict[str, paddle.Tensor], keys: Tuple[str, ...], axis=-1
data_dict: Dict[str, paddle.Tensor], keys: Tuple[str, ...], axis=-1
) -> Tuple[paddle.Tensor, ...]:
"""Concatenate tensors from dict in the order of given keys.

Expand Down Expand Up @@ -95,8 +96,9 @@ def concat_to_tensor(
data = [data_dict[key] for key in keys]
return paddle.concat(data, axis)

@staticmethod
def split_to_dict(
self, data_tensor: paddle.Tensor, keys: Tuple[str, ...], axis=-1
data_tensor: paddle.Tensor, keys: Tuple[str, ...], axis=-1
) -> Dict[str, paddle.Tensor]:
"""Split tensor and wrap into a dict by given keys.

Expand Down
10 changes: 4 additions & 6 deletions ppsci/arch/embedding_koopman.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,8 @@ def forward_tensor(self, x):

return (pred_data[:, :-1, :], recover_data, k_matrix)

def split_to_dict(
self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]
):
@staticmethod
def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]):
return {key: data_tensors[i] for i, key in enumerate(keys)}

def forward(self, x):
Expand Down Expand Up @@ -482,9 +481,8 @@ def forward_tensor(self, states, visc):

return (pred_data[:, :-1], recover_data, k_matrix)

def split_to_dict(
self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]
):
@staticmethod
def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]):
return {key: data_tensors[i] for i, key in enumerate(keys)}

def forward(self, x):
Expand Down
11 changes: 7 additions & 4 deletions ppsci/arch/epnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,13 @@ class Epnn(base.Arch):
Examples:
>>> import ppsci
>>> ann_node_sizes_state = [1]
>>> model = ppsci.arch.Epnn(("x",), ("y",), node_sizes=node_sizes_state,
activations=("leaky_relu"),
drop_p=0.0
)
>>> model = ppsci.arch.Epnn(
... ("x",),
... ("y",),
... node_sizes=ann_node_sizes_state,
... activations=("leaky_relu"),
... drop_p=0.0,
... )
"""

def __init__(
Expand Down
3 changes: 2 additions & 1 deletion ppsci/arch/gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,8 +352,9 @@ def forward(self, x):

return y

@staticmethod
def split_to_dict(
self, data_list: List[paddle.Tensor], keys: Tuple[str, ...]
data_list: List[paddle.Tensor], keys: Tuple[str, ...]
) -> Dict[str, paddle.Tensor]:
"""Overwrite of split_to_dict() method belongs to Class base.Arch.

Expand Down
5 changes: 2 additions & 3 deletions ppsci/arch/nowcastnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,8 @@ def __init__(
sample_tensor = paddle.zeros(shape=[1, 1, self.image_height, self.image_width])
self.grid = make_grid(sample_tensor)

def split_to_dict(
self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]
):
@staticmethod
def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]):
return {key: data_tensors[i] for i, key in enumerate(keys)}

def forward(self, x):
Expand Down
3 changes: 2 additions & 1 deletion ppsci/arch/physx_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,8 @@ def forward_eval(self, x):
outputs = self.generate(input_embeds)
return (outputs[:, 1:],)

def split_to_dict(self, data_tensors, keys):
@staticmethod
def split_to_dict(data_tensors, keys):
return {key: data_tensors[i] for i, key in enumerate(keys)}

def forward(self, x):
Expand Down
13 changes: 10 additions & 3 deletions ppsci/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,10 @@ def build_dataloader(_dataset, cfg):
collate_fn=collate_fn,
)
else:
if cfg.get("auto_collation", True) is False:
if (
cfg.get("auto_collation", not getattr(_dataset, "batch_index", False))
is False
):
# 1. wrap batch_sampler again into BatchSampler for disabling auto collation,
# which can speed up the process of batch samples indexing from dataset. See
# details at: https://discuss.pytorch.org/t/efficiency-of-dataloader-and-collate-for-large-array-like-datasets/59569/8
Expand All @@ -140,10 +143,14 @@ def build_dataloader(_dataset, cfg):
)
# 2. disable auto collation by given identity collate_fn which return the first
# (also the only) batch data in batch list, or there will be a redundant
# axis at the first dimension returned by dataloader. It is step is necessary
# axis at the first dimension returned by dataloader. This step is necessary
# because paddle do not support 'sampler' as instantiation argument of 'io.DataLoader'
collate_fn = lambda batch: batch[0] # noqa: E731
logger.info("Auto collation is disabled to speed up batch sampling")
_DEFAULT_NUM_WORKERS = 0
logger.info(
"Auto collation is disabled and set num_workers to "
f"{_DEFAULT_NUM_WORKERS} to speed up batch sampling."
)

dataloader_ = io.DataLoader(
dataset=_dataset,
Expand Down
3 changes: 3 additions & 0 deletions ppsci/data/dataset/airfoil_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,9 @@ class MeshAirfoilDataset(io.Dataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = False

use_pgl: bool = True

def __init__(
Expand Down
6 changes: 6 additions & 0 deletions ppsci/data/dataset/array_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ class NamedArrayDataset(io.Dataset):
>>> dataset = ppsci.data.dataset.NamedArrayDataset(input, output, weight)
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = True

def __init__(
self,
input: Dict[str, np.ndarray],
Expand Down Expand Up @@ -91,6 +94,9 @@ class IterableNamedArrayDataset(io.IterableDataset):
>>> dataset = ppsci.data.dataset.IterableNamedArrayDataset(input, label, weight)
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = False

def __init__(
self,
input: Dict[str, np.ndarray],
Expand Down
6 changes: 6 additions & 0 deletions ppsci/data/dataset/csv_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ class CSVDataset(io.Dataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = True

def __init__(
self,
file_path: str,
Expand Down Expand Up @@ -176,6 +179,9 @@ class IterableCSVDataset(io.IterableDataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = False

def __init__(
self,
file_path: str,
Expand Down
3 changes: 3 additions & 0 deletions ppsci/data/dataset/cylinder_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ class MeshCylinderDataset(io.Dataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = False

use_pgl: bool = True

def __init__(
Expand Down
3 changes: 3 additions & 0 deletions ppsci/data/dataset/era5_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ class ERA5Dataset(io.Dataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = False

def __init__(
self,
file_path: str,
Expand Down
6 changes: 6 additions & 0 deletions ppsci/data/dataset/mat_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ class MatDataset(io.Dataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = True

def __init__(
self,
file_path: str,
Expand Down Expand Up @@ -176,6 +179,9 @@ class IterableMatDataset(io.IterableDataset):
... ) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
batch_index: bool = False

def __init__(
self,
file_path: str,
Expand Down
Loading