From 82d3a005a387e55f3f84b632778ad4603fd34588 Mon Sep 17 00:00:00 2001 From: matthiasdold Date: Wed, 2 Apr 2025 12:28:00 +0200 Subject: [PATCH 1/5] implemented loading and first test --- moabb/paradigms/base.py | 91 ++++++++++++++++++++++++++--- moabb/tests/test_paradigms.py | 105 ++++++++++++++++++++++++++++++++++ 2 files changed, 188 insertions(+), 8 deletions(-) diff --git a/moabb/paradigms/base.py b/moabb/paradigms/base.py index d9757a948..581a2389f 100644 --- a/moabb/paradigms/base.py +++ b/moabb/paradigms/base.py @@ -1,15 +1,16 @@ import abc import logging from operator import methodcaller -from typing import List, Optional, Tuple +from typing import List, Literal, Optional, Tuple import mne import numpy as np import pandas as pd +from mne_bids.path import _find_matching_sidecar from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import FunctionTransformer -from moabb.datasets.base import BaseDataset +from moabb.datasets.base import BaseBIDSDataset, BaseDataset from moabb.datasets.bids_interface import StepType from moabb.datasets.preprocessing import ( EpochsToEvents, @@ -232,6 +233,7 @@ def get_data( # noqa: C901 return_raws=False, cache_config=None, postprocess_pipeline=None, + additional_metadata: Literal["default", "all"] | list[str] = "default", ): """ Return the data for a list of subject. @@ -265,6 +267,13 @@ def get_data( # noqa: C901 This pipeline must return an ``np.ndarray``. This pipeline must be "fixed" because it will not be trained, i.e. no call to ``fit`` will be made. + additional_metadata: Literal["default", "all"] | list[str] + Additional metadata to be loaded if return_epochs=True. + If "default", the default metadata will be loaded containing containing + `subject`, `session` and `run`. If "all", all columns of the `events.tsv` + file will be loaded. A list of column names can be passed to just + select these columns in addition to the three default values mentioned + before. Returns ------- @@ -306,6 +315,7 @@ def get_data( # noqa: C901 for session, runs in sessions.items(): for run in runs.keys(): proc = [data_i[subject][session][run] for data_i in data] + if any(obj is None for obj in proc): # this mean the run did not contain any selected event # go to next @@ -321,6 +331,38 @@ def get_data( # noqa: C901 if len(self.filters) == 1 else mne.concatenate_epochs(proc) ) + + # prepare additional metadata + if additional_metadata != "default": + if not isinstance(dataset, BaseBIDSDataset): + raise TypeError( + "Additional_metadata can only be used with BIDS datasets." + ) + + dm = load_bids_event_metadata( + dataset, subject=subject, session=session, run=run + ) + + # stack for multiple bandpass filtered versions + dm = pd.concat( + [ + dm.copy().assign(filter=i) + for i in range(len(self.filters)) + ], + ignore_index=True, + ) + + if additional_metadata == "all": + pass + elif isinstance(additional_metadata, list): + dm = dm[ + ["session", "subject", "run"] + additional_metadata + ] + else: + raise ValueError( + "Additional_metadata must be 'default', all' or a list of column names" + ) + elif return_raws: assert all(len(proc[0]) == len(p) for p in proc[1:]) n = 1 @@ -350,16 +392,22 @@ def get_data( # noqa: C901 met["subject"] = subject met["session"] = session met["run"] = run + metadata.append(met) if return_epochs: - x.metadata = ( - met.copy() - if len(self.filters) == 1 - else pd.concat( - [met.copy()] * len(self.filters), ignore_index=True + if additional_metadata == "default": + x.metadata = ( + met.copy() + if len(self.filters) == 1 + else pd.concat( + [met.copy()] * len(self.filters), ignore_index=True + ) ) - ) + else: + x.metadata = dm + # also overwrite in the metadata list + metadata[-1] = dm X.append(x) labels.append(lbs) @@ -556,3 +604,30 @@ def scoring(self): def _get_events_pipeline(self, dataset): event_id = self.used_events(dataset) return RawToEvents(event_id=event_id, interval=dataset.interval) + + +def load_bids_event_metadata( + data_set: BaseBIDSDataset, subject: str, session: str, run: str +) -> pd.DataFrame: + bids_paths = data_set.bids_paths(subject) + + # select only with matching session and run + bids_path_selected = [ + pth + for pth in bids_paths + if f"ses-{session}" in pth.basename and f"run-{run}" in pth.basename + ] + + if len(bids_path_selected) > 1: + raise ValueError("More than one matching BIDS path found.") + bids_path = bids_path_selected[0] + + events_fname = _find_matching_sidecar( + bids_path, suffix="events", extension=".tsv", on_error="warn" + ) + + dm = pd.read_csv(events_fname, sep="\t").assign( + subject=subject, session=session, run=run + ) + + return dm diff --git a/moabb/tests/test_paradigms.py b/moabb/tests/test_paradigms.py index 44e874738..6fc52e4f4 100644 --- a/moabb/tests/test_paradigms.py +++ b/moabb/tests/test_paradigms.py @@ -11,6 +11,9 @@ from mne.io import BaseRaw from moabb.datasets import BNCI2014_001 +from moabb.datasets.base import ( + LocalBIDSDataset, +) from moabb.datasets.fake import FakeDataset from moabb.paradigms import ( CVEP, @@ -1237,3 +1240,105 @@ def test_epochs(self, epochs_labels_metadata, dataset): np.testing.assert_array_almost_equal( epo.get_data()[0, :, 0] * dataset.unit_factor, X ) + + +class TestMetadata: + + @pytest.fixture(scope="class") + def cached_dataset_root(self, tmpdir_factory): + root = tmpdir_factory.mktemp("fake_bids") + dataset = FakeDataset( + event_list=["fake1", "fake2"], n_sessions=2, n_subjects=2, n_runs=1 + ) + dataset.get_data(cache_config=dict(save_raw=True, overwrite_raw=False, path=root)) + return root / "MNE-BIDS-fake-dataset-imagery-2-2--60--120--fake1-fake2--c3-cz-c4" + + def test_additional_metadata_extracts(self, cached_dataset_root): + + # --- The tsv files have metadata which would contain the following + # + # onset duration trial_type value sample + # 0.0078125 3.0 fake1 1 1 + # 1.984375 3.0 fake2 2 254 + # 3.96875 3.0 fake1 1 508 + # 5.953125 3.0 fake2 2 762 + # + # --- While onset, duration and trial_type, are implicitly available + # --- by the epoch design, we could want `value` and or `sample` as well + + dataset = LocalBIDSDataset( + cached_dataset_root, + events={"fake1": 1, "fake2": 2}, + interval=[0, 3], + paradigm="imagery", + ) + paradigm = MotorImagery() + + epo1, labels1, metadata1 = paradigm.get_data( + dataset=dataset, + subjects=["1"], + return_epochs=True, + ) + + epo2, labels2, metadata2 = paradigm.get_data( + dataset=dataset, + subjects=["1"], + return_epochs=True, + additional_metadata="all", + ) + + epo3, labels3, metadata3 = paradigm.get_data( + dataset=dataset, + subjects=["1"], + return_epochs=True, + additional_metadata=["value"], + ) + + assert epo1 == epo2 == epo3 + assert (labels1 == labels2).all() + assert (labels2 == labels3).all() + + assert "value" in metadata2.columns + assert "sample" in metadata2.columns + assert "value" in metadata3.columns + assert "sample" not in metadata3.columns + + # # [ ] could not find a paradigm which would take multiple filters, + # # ommit this for now + # def test_meta_for_multiple_filters(self, cached_dataset_root): + # dataset = LocalBIDSDataset( + # cached_dataset_root, + # events={"fake1": 1, "fake2": 2}, + # interval=[0, 3], + # paradigm="imagery", + # ) + # # paradigm = MotorImagery() + # # paradigm = FixedIntervalWindowsProcessing() + # + # epo1, labels1, metadata1 = paradigm.get_data( + # dataset=dataset, + # subjects=["1"], + # return_epochs=True, + # ) + # + # + # # This could be a test if we get a dataset for testing which is not a + # # basebids compliant + # + # def test_type_error_for_non_basebids_data(self, cached_dataset_root): + # + # dataset = LocalBIDSDataset( + # cached_dataset_root, + # events={"fake1": 1, "fake2": 2}, + # interval=[0, 3], + # paradigm="imagery", + # ) + # paradigm = CVEP() + # + # with pytest.raises(TypeError): + # paradigm.get_data( + # dataset=dataset, + # subjects=["1"], + # return_epochs=True, + # additional_metadata="all", + # ) From 89021a3a3bdeaa22d9ae372d9776fe3607d7bd3b Mon Sep 17 00:00:00 2001 From: matthiasdold Date: Wed, 2 Apr 2025 12:28:30 +0200 Subject: [PATCH 2/5] removed additional test idea comments --- moabb/tests/test_paradigms.py | 40 ----------------------------------- 1 file changed, 40 deletions(-) diff --git a/moabb/tests/test_paradigms.py b/moabb/tests/test_paradigms.py index 6fc52e4f4..7c69a542d 100644 --- a/moabb/tests/test_paradigms.py +++ b/moabb/tests/test_paradigms.py @@ -1302,43 +1302,3 @@ def test_additional_metadata_extracts(self, cached_dataset_root): assert "sample" in metadata2.columns assert "value" in metadata3.columns assert "sample" not in metadata3.columns - - # # [ ] could not find a paradigm which would take multiple filters, - # # ommit this for now - # def test_meta_for_multiple_filters(self, cached_dataset_root): - # dataset = LocalBIDSDataset( - # cached_dataset_root, - # events={"fake1": 1, "fake2": 2}, - # interval=[0, 3], - # paradigm="imagery", - # ) - # # paradigm = MotorImagery() - # # paradigm = FixedIntervalWindowsProcessing() - # - # epo1, labels1, metadata1 = paradigm.get_data( - # dataset=dataset, - # subjects=["1"], - # return_epochs=True, - # ) - # - # - # # This could be a test if we get a dataset for testing which is not a - # # basebids compliant - # - # def test_type_error_for_non_basebids_data(self, cached_dataset_root): - # - # dataset = LocalBIDSDataset( - # cached_dataset_root, - # events={"fake1": 1, "fake2": 2}, - # interval=[0, 3], - # paradigm="imagery", - # ) - # paradigm = CVEP() - # - # with pytest.raises(TypeError): - # paradigm.get_data( - # dataset=dataset, - # subjects=["1"], - # return_epochs=True, - # additional_metadata="all", - # ) From 25ccb02505075f31a3eededba63fc042aba6301c Mon Sep 17 00:00:00 2001 From: matthiasdold Date: Wed, 2 Apr 2025 12:49:11 +0200 Subject: [PATCH 3/5] added test cast for two selected columns --- moabb/tests/test_paradigms.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/moabb/tests/test_paradigms.py b/moabb/tests/test_paradigms.py index 7c69a542d..c55bd9f54 100644 --- a/moabb/tests/test_paradigms.py +++ b/moabb/tests/test_paradigms.py @@ -1294,6 +1294,13 @@ def test_additional_metadata_extracts(self, cached_dataset_root): additional_metadata=["value"], ) + epo4, labels4, metadata4 = paradigm.get_data( + dataset=dataset, + subjects=["1"], + return_epochs=True, + additional_metadata=["value", "duration"], + ) + assert epo1 == epo2 == epo3 assert (labels1 == labels2).all() assert (labels2 == labels3).all() @@ -1301,4 +1308,7 @@ def test_additional_metadata_extracts(self, cached_dataset_root): assert "value" in metadata2.columns assert "sample" in metadata2.columns assert "value" in metadata3.columns + assert "value" in metadata4.columns + assert "duration" in metadata4.columns assert "sample" not in metadata3.columns + assert "sample" not in metadata4.columns From 942a52026ca8023d3327bc231a228bb7ed678d00 Mon Sep 17 00:00:00 2001 From: matthiasdold Date: Mon, 7 Apr 2025 19:14:50 +0200 Subject: [PATCH 4/5] implemented filtering of events.tsv analogeous to mne-bids.read._handle_events_reading --- moabb/datasets/base.py | 92 +++++++++++++++++++++++ moabb/paradigms/base.py | 135 ++++++++++++++++------------------ moabb/tests/test_paradigms.py | 63 +++++++++++++++- 3 files changed, 218 insertions(+), 72 deletions(-) diff --git a/moabb/datasets/base.py b/moabb/datasets/base.py index 92f30fa2a..5941a3bdc 100644 --- a/moabb/datasets/base.py +++ b/moabb/datasets/base.py @@ -13,6 +13,7 @@ import mne_bids import pandas as pd +from mne_bids.path import _find_matching_sidecar from sklearn.pipeline import Pipeline from moabb.datasets.bids_interface import StepType, _interface_map @@ -674,6 +675,34 @@ def data_path( """ # noqa: E501 pass + def get_additional_metadata( + self, subject: str, session: str, run: str + ) -> None | pd.DataFrame: + """ + Load additional metadata for a specific subject, session, and run. + + This method is intended to be overridden by subclasses to provide + additional metadata specific to the dataset. The metadata is typically + loaded from an `events.tsv` file or similar data source. + + Parameters + ---------- + subject : str + The identifier for the subject. + session : str + The identifier for the session. + run : str + The identifier for the run. + + Returns + ------- + None | pd.DataFrame + A DataFrame containing the additional metadata if available, + otherwise None. + """ + + return None + class BaseBIDSDataset(BaseDataset): """Abstract BIDS dataset class. @@ -764,6 +793,69 @@ def _get_single_subject_data(self, subject): data.setdefault(session, {})[run] = raw return data + def get_additional_metadata( + self, subject: str, session: str, run: str + ) -> None | pd.DataFrame: + """ + Load additional metadata for a specific subject, session, and run. + This is just loading all metadata, filtering down to epochs levels + is done at ... + + + Parameters + ---------- + subject : str + The identifier for the subject. + session : str + The identifier for the session. + run : str + The identifier for the run. + + Returns + ------- + None | pd.DataFrame + A DataFrame containing the additional metadata if available, + otherwise None. + """ + + bids_paths = self.bids_paths(subject) + + # select only with matching session and run + bids_path_selected = [ + pth + for pth in bids_paths + if f"ses-{session}" in pth.basename and f"run-{run}" in pth.basename + ] + + if len(bids_path_selected) > 1: + raise ValueError("More than one matching BIDS path found.") + bids_path = bids_path_selected[0] + + events_fname = _find_matching_sidecar( + bids_path, suffix="events", extension=".tsv", on_error="warn" + ) + + dm = pd.read_csv(events_fname, sep="\t").assign( + subject=subject, session=session, run=run + ) + + # As long as this is not part of mne-bids https://github.com/mne-tools/mne-bids/pull/1389, + # we cannot will functionally replicate the filtering (as we only) + # need the dropping part + dm = dm[(dm.onset != "n/a") & (~dm.onset.isna())] + dm["onset"] = dm["onset"].astype(float) + + if "trial_type" in dm.columns: + dm = dm[(dm.trial_type != "n/a") & (~dm.onset.isna())] + elif "value" in dm.columns: + dm = dm[(dm.value != "n/a") & (~dm.onset.isna())] + + # for the bids_dataset we can assume that the events are taken from + # a `trial_type` columns -> filter on this + dm = dm[dm["trial_type"].isin(self.event_id.keys())] + + return dm + class LocalBIDSDataset(BaseBIDSDataset): """Generic local/private BIDS datasets. diff --git a/moabb/paradigms/base.py b/moabb/paradigms/base.py index 581a2389f..3a1288028 100644 --- a/moabb/paradigms/base.py +++ b/moabb/paradigms/base.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc import logging from operator import methodcaller @@ -6,7 +8,6 @@ import mne import numpy as np import pandas as pd -from mne_bids.path import _find_matching_sidecar from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import FunctionTransformer @@ -233,7 +234,7 @@ def get_data( # noqa: C901 return_raws=False, cache_config=None, postprocess_pipeline=None, - additional_metadata: Literal["default", "all"] | list[str] = "default", + additional_metadata: Literal["all"] | list[str] = None, ): """ Return the data for a list of subject. @@ -267,9 +268,9 @@ def get_data( # noqa: C901 This pipeline must return an ``np.ndarray``. This pipeline must be "fixed" because it will not be trained, i.e. no call to ``fit`` will be made. - additional_metadata: Literal["default", "all"] | list[str] + additional_metadata: Literal["all"] | list[str] | None Additional metadata to be loaded if return_epochs=True. - If "default", the default metadata will be loaded containing containing + If None, the default metadata will be loaded containing containing `subject`, `session` and `run`. If "all", all columns of the `events.tsv` file will be loaded. A list of column names can be passed to just select these columns in addition to the three default values mentioned @@ -316,6 +317,21 @@ def get_data( # noqa: C901 for run in runs.keys(): proc = [data_i[subject][session][run] for data_i in data] + if additional_metadata: + ext_metadata = [ + dataset.get_additional_metadata( + subject=subject, session=session, run=run + ) + ] * len(process_pipelines) + + if isinstance(additional_metadata, list): + ext_metadata = [ + dm[["session", "subject", "run"] + additional_metadata] + for dm in ext_metadata + ] + else: + ext_metadata = [None] * len(process_pipelines) + if any(obj is None for obj in proc): # this mean the run did not contain any selected event # go to next @@ -332,37 +348,6 @@ def get_data( # noqa: C901 else mne.concatenate_epochs(proc) ) - # prepare additional metadata - if additional_metadata != "default": - if not isinstance(dataset, BaseBIDSDataset): - raise TypeError( - "Additional_metadata can only be used with BIDS datasets." - ) - - dm = load_bids_event_metadata( - dataset, subject=subject, session=session, run=run - ) - - # stack for multiple bandpass filtered versions - dm = pd.concat( - [ - dm.copy().assign(filter=i) - for i in range(len(self.filters)) - ], - ignore_index=True, - ) - - if additional_metadata == "all": - pass - elif isinstance(additional_metadata, list): - dm = dm[ - ["session", "subject", "run"] + additional_metadata - ] - else: - raise ValueError( - "Additional_metadata must be 'default', all' or a list of column names" - ) - elif return_raws: assert all(len(proc[0]) == len(p) for p in proc[1:]) n = 1 @@ -395,19 +380,27 @@ def get_data( # noqa: C901 metadata.append(met) + # overwrite if additional is required + if additional_metadata: + # extend the metadata according to the filters + + dmeta_ext = ( + ext_metadata[0].copy() + if isinstance(ext_metadata[0], pd.DataFrame) + else pd.DataFrame() + ) + metadata[-1] = dmeta_ext + if return_epochs: - if additional_metadata == "default": - x.metadata = ( - met.copy() - if len(self.filters) == 1 - else pd.concat( - [met.copy()] * len(self.filters), ignore_index=True - ) + x.metadata = ( + metadata[-1].copy() + if len(self.filters) == 1 + else pd.concat( + [metadata[-1].copy()] * len(self.filters), + ignore_index=True, ) - else: - x.metadata = dm - # also overwrite in the metadata list - metadata[-1] = dm + ) + X.append(x) labels.append(lbs) @@ -606,28 +599,28 @@ def _get_events_pipeline(self, dataset): return RawToEvents(event_id=event_id, interval=dataset.interval) -def load_bids_event_metadata( - data_set: BaseBIDSDataset, subject: str, session: str, run: str -) -> pd.DataFrame: - bids_paths = data_set.bids_paths(subject) - - # select only with matching session and run - bids_path_selected = [ - pth - for pth in bids_paths - if f"ses-{session}" in pth.basename and f"run-{run}" in pth.basename - ] - - if len(bids_path_selected) > 1: - raise ValueError("More than one matching BIDS path found.") - bids_path = bids_path_selected[0] - - events_fname = _find_matching_sidecar( - bids_path, suffix="events", extension=".tsv", on_error="warn" - ) - - dm = pd.read_csv(events_fname, sep="\t").assign( - subject=subject, session=session, run=run - ) - - return dm +# def load_bids_event_metadata( +# data_set: BaseBIDSDataset, subject: str, session: str, run: str +# ) -> pd.DataFrame: +# bids_paths = data_set.bids_paths(subject) +# +# # select only with matching session and run +# bids_path_selected = [ +# pth +# for pth in bids_paths +# if f"ses-{session}" in pth.basename and f"run-{run}" in pth.basename +# ] +# +# if len(bids_path_selected) > 1: +# raise ValueError("More than one matching BIDS path found.") +# bids_path = bids_path_selected[0] +# +# events_fname = _find_matching_sidecar( +# bids_path, suffix="events", extension=".tsv", on_error="warn" +# ) +# +# dm = pd.read_csv(events_fname, sep="\t").assign( +# subject=subject, session=session, run=run +# ) +# +# return dm diff --git a/moabb/tests/test_paradigms.py b/moabb/tests/test_paradigms.py index c55bd9f54..497db03bb 100644 --- a/moabb/tests/test_paradigms.py +++ b/moabb/tests/test_paradigms.py @@ -3,12 +3,14 @@ import tempfile import unittest from math import ceil +from pathlib import Path import numpy as np import pandas as pd import pytest from mne import BaseEpochs from mne.io import BaseRaw +from mne_bids.path import _find_matching_sidecar from moabb.datasets import BNCI2014_001 from moabb.datasets.base import ( @@ -1253,7 +1255,11 @@ def cached_dataset_root(self, tmpdir_factory): dataset.get_data(cache_config=dict(save_raw=True, overwrite_raw=False, path=root)) return root / "MNE-BIDS-fake-dataset-imagery-2-2--60--120--fake1-fake2--c3-cz-c4" - def test_additional_metadata_extracts(self, cached_dataset_root): + def test_additional_metadata_extracts_aligned(self, cached_dataset_root): + """ + Test extraction of additional metadata if all rows in the events.tsv + were used to create annotations on the raw -> used for epoching + """ # --- The tsv files have metadata which would contain the following # @@ -1280,6 +1286,13 @@ def test_additional_metadata_extracts(self, cached_dataset_root): return_epochs=True, ) + raw, raw_labels, raw_metadata = paradigm.get_data( + dataset=dataset, + subjects=["1"], + return_epochs=False, + additional_metadata="all", + ) + epo2, labels2, metadata2 = paradigm.get_data( dataset=dataset, subjects=["1"], @@ -1305,6 +1318,11 @@ def test_additional_metadata_extracts(self, cached_dataset_root): assert (labels1 == labels2).all() assert (labels2 == labels3).all() + assert (raw_metadata == epo2.metadata).all().all() + assert (metadata2 == epo2.metadata).all().all() + + assert (metadata1.columns == ["subject", "session", "run"]).all() + assert "value" in metadata2.columns assert "sample" in metadata2.columns assert "value" in metadata3.columns @@ -1312,3 +1330,46 @@ def test_additional_metadata_extracts(self, cached_dataset_root): assert "duration" in metadata4.columns assert "sample" not in metadata3.columns assert "sample" not in metadata4.columns + + def test_additional_metadata_extracts_non_aligned(self, cached_dataset_root): + """ + Test extraction of additional metadata if NOT all rows in the events.tsv + were used to create annotations on the raw -> used for epoching + """ + + dataset = LocalBIDSDataset( + cached_dataset_root, + events={"fake1": 1}, + interval=[0, 3], + paradigm="imagery", + ) + + # modify the events.tsv to contain 'n/a' + events_fname = _find_matching_sidecar( + dataset.bids_paths("1")[0], suffix="events", extension=".tsv" + ) + df = pd.read_csv(events_fname, sep="\t") + df = df.assign(ix=range(len(df))) + for c in ["onset", "trial_type"]: + df[c] = df[c].astype(str) + df.loc[0, "onset"] = "n/a" + df.loc[2, "trial_type"] = "n/a" + df.to_csv(events_fname, sep="\t", index=False) + + paradigm = MotorImagery() + + epo, labels, metadata = paradigm.get_data( + dataset=dataset, subjects=["1"], return_epochs=True, additional_metadata="all" + ) + + assert ( + len(epo[metadata["session"] == "0"]) + == len(df[df["trial_type"] == "fake1"]) - 1 + ) # -1 for the one onset which is n/a! + + # test that the first fake1 value is skipped since the onset is n/a + # and the second is skipped as the trial_type is n/a + assert metadata.onset.iloc[0] == float(df[df.trial_type == "fake1"].onset.iloc[1]) + assert "n/a" not in df.trial_type + + assert (epo.metadata.fillna(0) == metadata.fillna(0)).all().all() From ca0792554e4ac05618b58adc37536b93adc531ac Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 17:15:12 +0000 Subject: [PATCH 5/5] [pre-commit.ci] auto fixes from pre-commit.com hooks --- moabb/paradigms/base.py | 2 +- moabb/tests/test_paradigms.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/moabb/paradigms/base.py b/moabb/paradigms/base.py index 3a1288028..578157723 100644 --- a/moabb/paradigms/base.py +++ b/moabb/paradigms/base.py @@ -11,7 +11,7 @@ from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import FunctionTransformer -from moabb.datasets.base import BaseBIDSDataset, BaseDataset +from moabb.datasets.base import BaseDataset from moabb.datasets.bids_interface import StepType from moabb.datasets.preprocessing import ( EpochsToEvents, diff --git a/moabb/tests/test_paradigms.py b/moabb/tests/test_paradigms.py index 497db03bb..f43405581 100644 --- a/moabb/tests/test_paradigms.py +++ b/moabb/tests/test_paradigms.py @@ -3,7 +3,6 @@ import tempfile import unittest from math import ceil -from pathlib import Path import numpy as np import pandas as pd