diff --git a/pina/label_tensor.py b/pina/label_tensor.py index 502d31f67..efa9fdd42 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -65,15 +65,56 @@ def __init__(self, x, labels): [0.9518, 0.1025], [0.8066, 0.9615]]) ''' - if x.ndim == 1: - x = x.reshape(-1, 1) + # if x.ndim == 1: + # x = x.reshape(-1, 1) if isinstance(labels, str): labels = [labels] - if len(labels) != x.shape[-1]: - raise ValueError('the tensor has not the same number of columns of ' - 'the passed labels.') + # print(labels) + if (isinstance(labels, (tuple, list)) + and not isinstance(labels[0], (tuple, list))): + labels = [labels] + + print(labels) + print(x.dim) + if len(labels) > x.ndim: + raise ValueError( + 'The number of labels is greater than the number of columns ' + 'of the tensor.') + + # print(len(labels), x.ndim, range(1-x.ndim, len(labels)-x.ndim, 1)) + k_ = [-k for k in range(1, len(labels)+1, 1)] + if isinstance(labels, (tuple, list)): + self.dim_labels = list(k_) + labels = dict(zip(k_, labels)) + elif isinstance(labels, dict): + self.dim_labels = list(labels.keys()) + labels = dict(zip(k_, labels.values())) + # print(labels) + + + else: + raise TypeError( + '`labels` should be a str, a list of str, a list of list of str or a dict') + + assert isinstance(labels, dict) + print(labels) + + # print(x.shape) + for d in labels: + # print(x.shape[d], len(labels[d]), d) + if x.shape[d] != len(labels[d]): + err = ( + f'The tensor has not the same number of columns of ' + f'the passed labels. {x.shape[d]} != {len(labels[d])} ' + f'(d = {d}).' + ) + raise ValueError(err) + + # if len(labels) != x.shape[-1]: + # raise ValueError('the tensor has not the same number of columns of ' + # 'the passed labels.') self._labels = labels @property @@ -93,27 +134,27 @@ def labels(self, labels): self._labels = labels # assign the label - @staticmethod - def vstack(label_tensors): - """ - Stack tensors vertically. For more details, see - :meth:`torch.vstack`. + # @staticmethod + # def vstack(label_tensors): + # """ + # Stack tensors vertically. For more details, see + # :meth:`torch.vstack`. - :param list(LabelTensor) label_tensors: the tensors to stack. They need - to have equal labels. - :return: the stacked tensor - :rtype: LabelTensor - """ - if len(label_tensors) == 0: - return [] + # :param list(LabelTensor) label_tensors: the tensors to stack. They need + # to have equal labels. + # :return: the stacked tensor + # :rtype: LabelTensor + # """ + # if len(label_tensors) == 0: + # return [] - all_labels = [label for lt in label_tensors for label in lt.labels] - if set(all_labels) != set(label_tensors[0].labels): - raise RuntimeError('The tensors to stack have different labels') + # all_labels = [label for lt in label_tensors for label in lt.labels] + # if set(all_labels) != set(label_tensors[0].labels): + # raise RuntimeError('The tensors to stack have different labels') - labels = label_tensors[0].labels - tensors = [lt.extract(labels) for lt in label_tensors] - return LabelTensor(torch.vstack(tensors), labels) + # labels = label_tensors[0].labels + # tensors = [lt.extract(labels) for lt in label_tensors] + # return LabelTensor(torch.vstack(tensors), labels) def clone(self, *args, **kwargs): """ @@ -167,6 +208,59 @@ def cpu(self, *args, **kwargs): new.data = tmp.data return tmp + def extract_(self, label_to_extract): + """ + """ + if isinstance(label_to_extract, str): + label_to_extract = [label_to_extract] + + if isinstance(label_to_extract, (tuple, list)): + # TODO: + # comment factorize improve + # Lasciate ogni speranza, o voi che entrate + print(self.labels) + dim_mask = [] + new_labels = [] + new_shape = [] + for j in range(-self.ndim, 0, 1): + jcomp_valid_indeces = [True] * self.shape[j] + print(self.dim_labels) + if j in self.dim_labels: + jcomp_labels = self.labels[j] + + for i, label in enumerate(label_to_extract): + if label in jcomp_labels: + index = jcomp_labels.index(label) + jcomp_valid_indeces[index] = False + + if all(jcomp_valid_indeces): + new_labels.append(jcomp_labels) + else: + new_labels.append([ + jcomp_labels[i] for i, valid in enumerate(jcomp_valid_indeces) if not valid]) + + new_shape.append(len(new_labels[-1])) + + else: # j not in self.dim_labels + new_shape.append(self.shape[j]) + print(j, new_labels) + dim_mask.append(torch.tensor(jcomp_valid_indeces)) + + def create_mask(dim_mask): + grids = torch.meshgrid(dim_mask) + f = grids[0] + for g in grids[1:]: + f = f & g + return f + mask = create_mask(dim_mask) + print(mask.shape) + print(new_labels) + print(self.tensor[~mask].reshape(new_shape[::-1]).shape) + + new_t = LabelTensor(self.tensor[~mask].reshape(new_shape[::-1]).T, labels=new_labels[::-1]) + + return new_t + def extract(self, label_to_extract): """ Extract the subset of the original tensor by returning all the columns @@ -202,6 +296,9 @@ def extract(self, label_to_extract): return extracted_tensor def detach(self): + """ + Return a new Tensor, detached from the current graph. + """ detached = super().detach() if hasattr(self, '_labels'): detached._labels = self._labels @@ -209,77 +306,110 @@ def detach(self): def requires_grad_(self, mode = True): + """ + Set tensor's ``requires_grad`` attribute in-place. + """ lt = super().requires_grad_(mode) lt.labels = self.labels return lt - def append(self, lt, mode='std'): - """ - Return a copy of the merged tensors. + # def append(self, lt, mode='std'): + # """ + # Return a copy of the merged tensors. + + # :param LabelTensor lt: The tensor to merge. + # :param str mode: {'std', 'first', 'cross'} + # :return: The merged tensors. + # :rtype: LabelTensor + # """ + # if set(self.labels).intersection(lt.labels): + # raise RuntimeError('The tensors to merge have common labels') + + # new_labels = self.labels + lt.labels + # if mode == 'std': + # new_tensor = torch.cat((self, lt), dim=1) + # elif mode == 'first': + # raise NotImplementedError + # elif mode == 'cross': + # tensor1 = self + # tensor2 = lt + # n1 = tensor1.shape[0] + # n2 = tensor2.shape[0] + + # tensor1 = LabelTensor(tensor1.repeat(n2, 1), labels=tensor1.labels) + # tensor2 = LabelTensor(tensor2.repeat_interleave(n1, dim=0), + # labels=tensor2.labels) + # new_tensor = torch.cat((tensor1, tensor2), dim=1) + + # new_tensor = new_tensor.as_subclass(LabelTensor) + # new_tensor.labels = new_labels + # return new_tensor + def append(self, lt, dim=None, component=None): + + + if dim is None and component is None: + pass - :param LabelTensor lt: The tensor to merge. - :param str mode: {'std', 'first', 'cross'} - :return: The merged tensors. - :rtype: LabelTensor - """ - if set(self.labels).intersection(lt.labels): - raise RuntimeError('The tensors to merge have common labels') - - new_labels = self.labels + lt.labels - if mode == 'std': - new_tensor = torch.cat((self, lt), dim=1) - elif mode == 'first': - raise NotImplementedError - elif mode == 'cross': - tensor1 = self - tensor2 = lt - n1 = tensor1.shape[0] - n2 = tensor2.shape[0] - - tensor1 = LabelTensor(tensor1.repeat(n2, 1), labels=tensor1.labels) - tensor2 = LabelTensor(tensor2.repeat_interleave(n1, dim=0), - labels=tensor2.labels) - new_tensor = torch.cat((tensor1, tensor2), dim=1) - - new_tensor = new_tensor.as_subclass(LabelTensor) - new_tensor.labels = new_labels - return new_tensor + if dim is None and component is not None: - def __getitem__(self, index): - """ - Return a copy of the selected tensor. - """ + if self.ndim != lt.ndim: + raise RuntimeError('The tensors to merge have different dimensions') - if isinstance(index, str) or (isinstance(index, (tuple, list))and all(isinstance(a, str) for a in index)): - return self.extract(index) + common_labels = [i for i in self.labels.values() + for j in lt.labels.values() if i == j] - selected_lt = super(Tensor, self).__getitem__(index) - - try: - len_index = len(index) - except TypeError: - len_index = 1 - - if isinstance(index, int) or len_index == 1: - if selected_lt.ndim == 1: - selected_lt = selected_lt.reshape(1, -1) - if hasattr(self, 'labels'): - selected_lt.labels = self.labels - elif len_index == 2: - if selected_lt.ndim == 1: - selected_lt = selected_lt.reshape(-1, 1) - if hasattr(self, 'labels'): - if isinstance(index[1], list): - selected_lt.labels = [self.labels[i] for i in index[1]] - else: - selected_lt.labels = self.labels[index[1]] - else: - selected_lt.labels = self.labels + # if len(common_labels) > 1: + # raise RuntimeError(f'The tensors to merge have too many common labels: {common_labels}') + + if len(common_labels) == 0: + raise RuntimeError(f'The tensors to merge have no common labels') + + common_labels = common_labels[0] + for k, v in self.labels.items(): + if v == common_labels: + dim1 = [True] * self.ndim + dim1[k] = False - return selected_lt + for k, v in lt.labels.items(): + if v == common_labels: + dim2 = [True] * lt.ndim + dim2[k] = False + + if dim1 == dim2: + print(dim1, common_labels) + dim_to_append = [i for i, j in enumerate(dim1) if j == True] + print(dim_to_append) + if len(dim_to_append) > 1: + raise RuntimeError(f'The tensors to merge have too dimensions and only {component} is given') + result = LabelTensor( + torch.cat((self.tensor, lt.tensor), dim=dim_to_append[0]), + labels={k: common_labels} + ) + print(result) + print('ggggggggg') + + return result + else: + raise NotImplementedError + + def _append(self, lt, mode): + print(self.labels, lt.labels) + + def __getitem__(self, index): + """ + Disable the slicing of the labels. + """ + text = ( + 'LabelTensor does not support slicing. ' + 'Use `extract` instead, or `tensor` to get the underlying tensor.' + ) + raise RuntimeError(text) @property def tensor(self): + """ + Return the underlying tensor. + """ return self.as_subclass(Tensor) def __len__(self) -> int: @@ -290,5 +420,5 @@ def __str__(self): s = f'labels({str(self.labels)})\n' else: s = 'no labels\n' - s += super().__str__() + s += self.tensor.__str__() return s \ No newline at end of file diff --git a/pina/optimizer/__init__.py b/pina/optimizer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pina/optimizer/optimizer_interface.py b/pina/optimizer/optimizer_interface.py new file mode 100644 index 000000000..3f38286f2 --- /dev/null +++ b/pina/optimizer/optimizer_interface.py @@ -0,0 +1,4 @@ +""" Abstract class for Optimizer """ + +class Optimizer: + pass \ No newline at end of file diff --git a/pina/optimizer/torch_optimizer.py b/pina/optimizer/torch_optimizer.py new file mode 100644 index 000000000..87a2c37cf --- /dev/null +++ b/pina/optimizer/torch_optimizer.py @@ -0,0 +1,17 @@ +from .optimizer_interface import Optimizer +from ..utils import check_consistency + +class TorchOptimizer(Optimizer): + + def __init__(self, optimizer_class, **kwargs): + check_consistency(optimizers, torch.optim.Optimizer, subclass=True) + + self.optimizer_class = optimizer_class + self.kwargs = kwargs + + def hook(self, parameters): + self.optimizer_instance = self.optimizer_class( + parameters, **self.kwargs + ) + + \ No newline at end of file diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index 311dbcec7..36859d145 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -249,6 +249,7 @@ def have_sampled_points(self): Check if all points for ``Location`` are sampled. """ + print(self._have_sampled_points) return all(self._have_sampled_points.values()) @property diff --git a/pina/solvers/pinn.py b/pina/solvers/pinn.py index 5cafbba63..2bb5b9246 100644 --- a/pina/solvers/pinn.py +++ b/pina/solvers/pinn.py @@ -150,7 +150,6 @@ def training_step(self, batch, batch_idx): condition_name = dataloader.loaders.condition_names[condition_id] condition = self.problem.conditions[condition_name] pts = batch['pts'] - if len(batch) == 2: samples = pts[condition_idx == condition_id] loss = self._loss_phys(samples, condition.equation) @@ -164,7 +163,7 @@ def training_step(self, batch, batch_idx): # TODO for users this us hard to remember when creating a new solver, to fix in a smarter way loss = loss.as_subclass(torch.Tensor) -# # add condition losses and accumulate logging for each epoch + # add condition losses and accumulate logging for each epoch condition_losses.append(loss * condition.data_weight) self.log(condition_name + '_loss', float(loss), prog_bar=True, logger=True, on_epoch=True, on_step=False) @@ -179,7 +178,7 @@ def training_step(self, batch, batch_idx): self.log('mean_loss', float(total_loss / len(condition_losses)), prog_bar=True, logger=True, on_epoch=True, on_step=False) - return total_loss + return total_loss/len(condition_losses) @property def scheduler(self): diff --git a/tests/test_label_tensor.py b/tests/test_label_tensor.py index 05dace5e3..ae0fca223 100644 --- a/tests/test_label_tensor.py +++ b/tests/test_label_tensor.py @@ -3,36 +3,131 @@ from pina import LabelTensor -data = torch.rand((20, 3)) -labels = ['a', 'b', 'c'] +data_1D = torch.rand((20)) +data_2D = torch.rand((20, 3)) +data_3D = torch.rand((20, 3, 2)) +data_ND = torch.rand([5]*10) + +labels_20 = [f'dof{i}' for i in range(20)] +labels_3 = [f'dim{i}' for i in range(3)] +labels_2 = [f'channel{i}' for i in range(2)] +labels_5 = [f'output{i}' for i in range(5)] + +# @pytest.mark.parametrize("rank", [1, 2, 10]) +def test_constructor_1D(): + # Label any component + LabelTensor(data_1D, labels=labels_20) + +def test_constructor_2D(): + # Label the column + LabelTensor(data_2D, labels=labels_3) + # Label any component 2D + LabelTensor(data_2D, labels=[labels_3, labels_20]) + LabelTensor(data_2D, labels={'D': labels_3, 'N': labels_20}) - -def test_constructor(): - LabelTensor(data, labels) - - -def test_wrong_constructor(): with pytest.raises(ValueError): - LabelTensor(data, ['a', 'b']) - - -def test_labels(): - tensor = LabelTensor(data, labels) - assert isinstance(tensor, torch.Tensor) - assert tensor.labels == labels + LabelTensor(data_2D, labels=labels_20) + LabelTensor(data_2D, labels=[labels_20, labels_3]) + LabelTensor(data_2D, labels=[labels_3, labels_20[:-1]]) + LabelTensor(data_2D, labels=[labels_3[1:], labels_20]) + +def test_constructor_3D(): + LabelTensor(data_3D, labels=labels_2) + LabelTensor(data_3D, labels=[labels_2, labels_3]) + LabelTensor(data_3D, labels=[labels_2, labels_3, labels_20]) + LabelTensor(data_3D, labels={ + 'C': labels_2, 'D': labels_3, 'N': labels_20}) with pytest.raises(ValueError): - tensor.labels = labels[:-1] - - -def test_extract(): - label_to_extract = ['a', 'c'] - tensor = LabelTensor(data, labels) - new = tensor.extract(label_to_extract) - assert new.labels == label_to_extract - assert new.shape[1] == len(label_to_extract) - assert torch.all(torch.isclose(data[:, 0::2], new)) - + LabelTensor(data_3D, labels=labels_3) + LabelTensor(data_3D, labels=[labels_3, labels_2]) +def test_constructor_ND(): + LabelTensor(data_ND, labels=labels_5) + LabelTensor(data_ND, labels=[labels_5]*10) + LabelTensor(data_ND, labels={f'O{i}': labels_5 for i in range(10)}) + with pytest.raises(ValueError): + LabelTensor(data_ND, labels=labels_20) + +# def test_labels(): +# tensor = LabelTensor(data, labels) +# assert isinstance(tensor, torch.Tensor) +# assert tensor.labels == labels +# with pytest.raises(ValueError): +# tensor.labels = labels[:-1] + + +@pytest.mark.parametrize("tensor", [ + LabelTensor(data_2D, labels=[labels_3, labels_20]), + LabelTensor(data_2D, labels=labels_3), + LabelTensor(data_3D, labels=[labels_2, labels_3, labels_20]), +]) +def test_extract_consistency(tensor): + label_to_extract = ['dim0', 'dim2'] + new = tensor.extract_(label_to_extract) + assert new.labels.keys() == tensor.labels.keys() + if len(tensor.labels) == 2: + assert new.labels[-2] == tensor.labels[-2] + elif len(tensor.labels) == 3: + assert new.labels[-3] == tensor.labels[-3] + elif len(tensor.labels) == 1: + assert new.labels[-1] == label_to_extract + # assert new.shape[1] == len(label_to_extract) + # assert torch.all(torch.isclose(data_2D[:, 0::2], new)) + + +@pytest.mark.parametrize("tensor", [ + LabelTensor(data_2D, labels=[labels_3, labels_20]), + LabelTensor(data_3D, labels=[labels_2, labels_3, labels_20]), +]) +def test_tensor_consistency(tensor): + tt = tensor.tensor + mean = tt.mean() + tt += 1. + assert isinstance(tt, torch.Tensor) + torch.testing.assert_close(tt, tensor.tensor) + torch.testing.assert_close(tt.mean(), mean + 1.) + +@pytest.mark.parametrize(("tensor1", "tensor2"), [ + ( + LabelTensor(data_2D, labels=labels_3), + LabelTensor(data_2D, labels=labels_3), + ), + ( + LabelTensor(data_2D, labels=labels_3), + LabelTensor(data_2D[:-2], labels=labels_3) + ), + # ( + # LabelTensor(data_3D, labels=labels_2), + # LabelTensor(data_3D, labels=labels_2), + # ) +]) +def test_append(tensor1, tensor2): + tensor = tensor1.append(tensor2, component=labels_3) + assert tensor.labels == tensor1.labels + assert tensor.shape[0] == tensor1.shape[0] + tensor2.shape[0] + assert torch.allclose(tensor, torch.cat((tensor1, tensor2), dim=0)) + +# @pytest.mark.parametrize(("tensor1", "tensor2"), [ +# ( +# LabelTensor(data_2D, labels=labels_3), +# LabelTensor(data_2D.T, labels=labels_3), +# ), +# ( +# LabelTensor(data_2D, labels=labels_3), +# LabelTensor(data_2D[:-2].T, labels=labels_3) +# ), +# # ( +# # LabelTensor(data_3D, labels=labels_2), +# # LabelTensor(data_3D, labels=labels_2), +# # ) +# ]) +# def test_append_transpose(tensor1, tensor2): +# tensor = tensor1.append(tensor2, component=labels_3) +# assert tensor.labels == tensor1.labels +# assert tensor.shape[0] == tensor1.shape[0] + tensor2.shape[1] +# assert torch.allclose(tensor, torch.cat((tensor1, tensor2.T), dim=0)) + +""" def test_extract_onelabel(): label_to_extract = ['a'] tensor = LabelTensor(data, labels) @@ -61,59 +156,60 @@ def test_extract_order(): assert new.shape[1] == len(label_to_extract) assert torch.all(torch.isclose(expected, new)) +""" -def test_merge(): - tensor = LabelTensor(data, labels) - tensor_a = tensor.extract('a') - tensor_b = tensor.extract('b') - tensor_c = tensor.extract('c') +# def test_merge(): +# tensor = LabelTensor(data, labels) +# tensor_a = tensor.extract('a') +# tensor_b = tensor.extract('b') +# tensor_c = tensor.extract('c') - tensor_bc = tensor_b.append(tensor_c) - assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) +# tensor_bc = tensor_b.append(tensor_c) +# assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) -def test_merge2(): - tensor = LabelTensor(data, labels) - tensor_b = tensor.extract('b') - tensor_c = tensor.extract('c') +# def test_merge2(): +# tensor = LabelTensor(data, labels) +# tensor_b = tensor.extract('b') +# tensor_c = tensor.extract('c') - tensor_bc = tensor_b.append(tensor_c) - assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) +# tensor_bc = tensor_b.append(tensor_c) +# assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) -def test_getitem(): - tensor = LabelTensor(data, labels) - tensor_view = tensor['a'] +# def test_getitem(): +# tensor = LabelTensor(data, labels) +# tensor_view = tensor['a'] - assert tensor_view.labels == ['a'] - assert torch.allclose(tensor_view.flatten(), data[:, 0]) +# assert tensor_view.labels == ['a'] +# assert torch.allclose(tensor_view.flatten(), data[:, 0]) - tensor_view = tensor['a', 'c'] +# tensor_view = tensor['a', 'c'] - assert tensor_view.labels == ['a', 'c'] - assert torch.allclose(tensor_view, data[:, 0::2]) +# assert tensor_view.labels == ['a', 'c'] +# assert torch.allclose(tensor_view, data[:, 0::2]) -def test_getitem2(): - tensor = LabelTensor(data, labels) - tensor_view = tensor[:5] - assert tensor_view.labels == labels - assert torch.allclose(tensor_view, data[:5]) +# def test_getitem2(): +# tensor = LabelTensor(data, labels) +# tensor_view = tensor[:5] +# assert tensor_view.labels == labels +# assert torch.allclose(tensor_view, data[:5]) - idx = torch.randperm(tensor.shape[0]) - tensor_view = tensor[idx] - assert tensor_view.labels == labels +# idx = torch.randperm(tensor.shape[0]) +# tensor_view = tensor[idx] +# assert tensor_view.labels == labels -def test_slice(): - tensor = LabelTensor(data, labels) - tensor_view = tensor[:5, :2] - assert tensor_view.labels == labels[:2] - assert torch.allclose(tensor_view, data[:5, :2]) +# def test_slice(): +# tensor = LabelTensor(data, labels) +# tensor_view = tensor[:5, :2] +# assert tensor_view.labels == labels[:2] +# assert torch.allclose(tensor_view, data[:5, :2]) - tensor_view2 = tensor[3] - assert tensor_view2.labels == labels - assert torch.allclose(tensor_view2, data[3]) +# tensor_view2 = tensor[3] +# assert tensor_view2.labels == labels +# assert torch.allclose(tensor_view2, data[3]) - tensor_view3 = tensor[:, 2] - assert tensor_view3.labels == labels[2] - assert torch.allclose(tensor_view3, data[:, 2].reshape(-1, 1)) +# tensor_view3 = tensor[:, 2] +# assert tensor_view3.labels == labels[2] +# assert torch.allclose(tensor_view3, data[:, 2].reshape(-1, 1))