diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5f895369..9f627f11 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -20,13 +20,15 @@ jobs: with: python-version: 3.8 - - name: Install dependencies - run: | - make install-deps-apt - python -m pip install --upgrade pip wheel - python -m pip install attrdict - - make install-deps-wxpython + - name: Install conda + uses: conda-incubator/setup-miniconda@v3 + with: + environment-file: environments/eeg-expy-docsbuild.yml + auto-activate-base: false + python-version: ${{ matrix.python_version }} + activate-environment: eeg-expy-full + channels: conda-forge + miniconda-version: "latest" - name: Build project run: | diff --git a/eegnb/__init__.py b/eegnb/__init__.py index 02ebd828..01bab5ee 100644 --- a/eegnb/__init__.py +++ b/eegnb/__init__.py @@ -36,7 +36,8 @@ def _get_recording_dir( ) # check if directory exists, if not, make the directory - if not path.exists(recording_dir): + # Skip directory creation if wildcards are present (for pattern matching) + if not any('*' in str(part) for part in [subject_str, session_str]) and not path.exists(recording_dir): makedirs(recording_dir) return recording_dir diff --git a/eegnb/analysis/utils.py b/eegnb/analysis/utils.py index d9450981..ef0cebf3 100644 --- a/eegnb/analysis/utils.py +++ b/eegnb/analysis/utils.py @@ -175,10 +175,9 @@ def load_data( """ subject_int = int(subject) - session_int = int(session) subject_str = "*" if subject == "all" else f"subject{subject_int:04}" - session_str = "*" if session == "all" else f"session{session_int:03}" + session_str = "*" if session == "all" else f"session{int(session):03}" recdir = _get_recording_dir(device_name, experiment, subject_str, session_str, site, data_dir) data_path = os.path.join(data_dir, recdir, "*.csv") diff --git a/eegnb/experiments/BlockExperiment.py b/eegnb/experiments/BlockExperiment.py new file mode 100644 index 00000000..4eeb1d69 --- /dev/null +++ b/eegnb/experiments/BlockExperiment.py @@ -0,0 +1,141 @@ +""" +BlockExperiment Class - Extends BaseExperiment with block-based functionality + +This class provides block-based experiment capabilities by inheriting from BaseExperiment +and overriding the run method to handle multiple blocks. It loads stimulus only once +and reuses it across blocks, while allowing block-specific instructions. + +Experiments that need block-based execution should inherit from this class instead of BaseExperiment. +""" +from abc import ABC +from time import time + +from .Experiment import BaseExperiment + + +class BlockExperiment(BaseExperiment, ABC): + """ + Extended experiment class that inherits from BaseExperiment to provide block-based functionality. + + This class is designed for experiments that need to run multiple blocks, with each block + having its own instructions and duration. It loads stimulus only once and reuses it across blocks. + """ + + def __init__(self, exp_name, block_duration, eeg, save_fn, block_trial_size, n_blocks, iti: float, soa: float, jitter: float, + use_vr=False, use_fullscr=True, stereoscopic=False): + """ Initializer for the Block Experiment Class + + Args: + exp_name (str): Name of the experiment + block_duration (float): Duration of each block in seconds + eeg: EEG device object for recording + save_fn (str): Save filename for data + block_trial_size (int): Number of trials per block + n_blocks (int): Number of blocks to run + iti (float): Inter-trial interval + soa (float): Stimulus on arrival + jitter (float): Random delay between stimulus + use_vr (bool): Use VR for displaying stimulus + use_fullscr (bool): Use fullscreen mode + """ + # Calculate total trials for the base class + total_trials = block_trial_size * n_blocks + + # Initialize the base experiment with total trials + # Pass None for duration if block_duration is None to ignore time spent in instructions + super().__init__(exp_name, block_duration, eeg, save_fn, total_trials, iti, soa, jitter, use_vr, use_fullscr, stereoscopic) + + # Store block-specific parameters + self.block_duration = block_duration + self.block_trial_size = block_trial_size + self.n_blocks = n_blocks + + # Current block index + self.current_block_index = 0 + + # Original save filename + self.original_save_fn = save_fn + + # Flag to track if stimulus has been loaded + self.stimulus_loaded = False + + def present_block_instructions(self, current_block): + """ + Display instructions for the current block to the user. + + This method is meant to be overridden by child classes to provide + experiment-specific instructions before each block. The base implementation + simply flips the window without adding any text. + + This method is called by __show_block_instructions in a loop until the user + provides input to continue or cancel the experiment. + + Args: + current_block (int): The current block number (0-indexed), used to customize + instructions for specific blocks if needed. + """ + self.window.flip() + + def _show_block_instructions(self, block_number): + """ + Show instructions for a specific block + + Args: + block_number (int): Current block number (0-indexed) + + Returns: + tuple: (continue_experiment, instruction_end_time) + - continue_experiment (bool): Whether to continue the experiment + """ + + # Clear any previous input + self._clear_user_input() + + # Wait for user input to continue + while True: + # Display the instruction text + super()._draw(lambda: self.present_block_instructions(block_number)) + + if self._user_input('start'): + return True + elif self._user_input('cancel'): + return False + + def run(self, instructions=True): + """ + Run the experiment as a series of blocks + + This method overrides BaseExperiment.run() to handle multiple blocks. + + Args: + instructions (bool): Whether to show the initial experiment instructions + """ + # Setup the experiment (creates window, loads stimulus once) + if not self.setup(instructions): + return False + + # Start EEG Stream once for all blocks + if self.eeg: + print("Wait for the EEG-stream to start...") + self.eeg.start(self.save_fn) + print("EEG Stream started") + + # Run each block + for block_index in range(self.n_blocks): + self.current_block_index = block_index + print(f"Starting block {block_index + 1} of {self.n_blocks}") + + # Show block-specific instructions + if not self._show_block_instructions(block_index): + break + + # Run this block + if not self._run_trial_loop(start_time=time(), duration=self.block_duration): + break + + # Stop EEG Stream after all blocks + if self.eeg: + self.eeg.stop() + + # Close window at the end of all blocks + self.window.close() diff --git a/eegnb/experiments/Experiment.py b/eegnb/experiments/Experiment.py index 4d5e287b..ef16864d 100644 --- a/eegnb/experiments/Experiment.py +++ b/eegnb/experiments/Experiment.py @@ -8,8 +8,9 @@ obj.run() """ -from abc import abstractmethod +from abc import abstractmethod, ABC from typing import Callable +from eegnb.devices.eeg import EEG from psychopy import prefs from psychopy.visual.rift import Rift #change the pref libraty to PTB and set the latency mode to high precision @@ -26,37 +27,59 @@ from eegnb import generate_save_fn -class BaseExperiment: +class BaseExperiment(ABC): def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, soa: float, jitter: float, - use_vr=False, use_fullscr = True): + use_vr=False, use_fullscr = True, stereoscopic = False): """ Initializer for the Base Experiment Class Args: + exp_name (str): Name of the experiment + duration (float): Duration of the experiment in seconds + eeg: EEG device object for recording + save_fn (str): Save filename function for data n_trials (int): Number of trials/stimulus iti (float): Inter-trial interval soa (float): Stimulus on arrival jitter (float): Random delay between stimulus use_vr (bool): Use VR for displaying stimulus + use_fullscr (bool): Use fullscreen mode """ self.exp_name = exp_name self.instruction_text = """\nWelcome to the {} experiment!\nStay still, focus on the centre of the screen, and try not to blink. \nThis block will run for %s seconds.\n Press spacebar to continue. \n""".format(self.exp_name) self.duration = duration - self.eeg = eeg + self.eeg: EEG = eeg self.save_fn = save_fn self.n_trials = n_trials self.iti = iti self.soa = soa self.jitter = jitter self.use_vr = use_vr + self.stereoscopic = stereoscopic if use_vr: # VR interface accessible by specific experiment classes for customizing and using controllers. - self.rift: Rift = visual.Rift(monoscopic=True, headLocked=True) + self.rift: Rift = visual.Rift(monoscopic=not stereoscopic, headLocked=True) + # eye for presentation + if stereoscopic: + self.left_eye_x_pos = 0.2 + self.right_eye_x_pos = -0.2 + else: + self.left_eye_x_pos = 0 + self.right_eye_x_pos = 0 + self.use_fullscr = use_fullscr self.window_size = [1600,800] + # Initializing the record duration and the marker names + self.record_duration = np.float32(self.duration) + self.markernames = [1, 2] + + # Setting up the trial and parameter list + self.parameter = np.random.binomial(1, 0.5, self.n_trials) + self.trials = DataFrame(dict(parameter=self.parameter, timestamp=np.zeros(self.n_trials))) + @abstractmethod def load_stimulus(self): """ @@ -78,17 +101,20 @@ def present_stimulus(self, idx : int): """ raise NotImplementedError - def setup(self, instructions=True): + def present_iti(self): + """ + Method that presents the inter-trial interval display for the specific experiment. - # Initializing the record duration and the marker names - self.record_duration = np.float32(self.duration) - self.markernames = [1, 2] - - # Setting up the trial and parameter list - self.parameter = np.random.binomial(1, 0.5, self.n_trials) - self.trials = DataFrame(dict(parameter=self.parameter, timestamp=np.zeros(self.n_trials))) + This method defines what is shown on the screen during the period between stimuli. + It could be a blank screen, a fixation cross, or any other appropriate display. + + This is an optional method - the default implementation simply flips the window with no additional content. + Subclasses can override this method to provide custom ITI displays. + """ + self.window.flip() - # Setting up Graphics + def setup(self, instructions=True): + # Setting up Graphics self.window = ( self.rift if self.use_vr else visual.Window(self.window_size, monitor="testMonitor", units="deg", fullscr=self.use_fullscr)) @@ -98,7 +124,7 @@ def setup(self, instructions=True): # Show Instruction Screen if not skipped by the user if instructions: - self.show_instructions() + return self.show_instructions() # Checking for EEG to setup the EEG stream if self.eeg: @@ -113,6 +139,7 @@ def setup(self, instructions=True): print( f"No path for a save file was passed to the experiment. Saving data to {self.save_fn}" ) + return True def show_instructions(self): """ @@ -128,18 +155,22 @@ def show_instructions(self): self.window.mouseVisible = False # clear/reset any old key/controller events - self.__clear_user_input() + self._clear_user_input() # Waiting for the user to press the spacebar or controller button or trigger to start the experiment - while not self.__user_input('start'): + while not self._user_input('start'): # Displaying the instructions on the screen text = visual.TextStim(win=self.window, text=self.instruction_text, color=[-1, -1, -1]) - self.__draw(lambda: self.__draw_instructions(text)) + self._draw(lambda: self.__draw_instructions(text)) # Enabling the cursor again self.window.mouseVisible = True - def __user_input(self, input_type): + if self._user_input('cancel'): + return False + return True + + def _user_input(self, input_type): if input_type == 'start': key_input = 'spacebar' vr_inputs = [ @@ -156,6 +187,9 @@ def __user_input(self, input_type): ('Xbox', 'B', None) ] + else: + raise Exception(f'Invalid input_type: {input_type}') + if len(event.getKeys(keyList=key_input)) > 0: return True @@ -193,10 +227,16 @@ def get_vr_input(self, vr_controller, button=None, trigger=False): return False def __draw_instructions(self, text): - text.draw() + if self.use_vr and self.stereoscopic: + for eye, x_pos in [("left", self.left_eye_x_pos), ("right", self.right_eye_x_pos)]: + self.window.setBuffer(eye) + text.pos = (x_pos, 0) + text.draw() + else: + text.draw() self.window.flip() - def __draw(self, present_stimulus: Callable): + def _draw(self, present_stimulus: Callable): """ Set the current eye position and projection for all given stimulus, then draw all stimulus and flip the window/buffer @@ -207,7 +247,7 @@ def __draw(self, present_stimulus: Callable): self.window.setDefaultView() present_stimulus() - def __clear_user_input(self): + def _clear_user_input(self): event.getKeys() self.clear_vr_input() @@ -217,14 +257,62 @@ def clear_vr_input(self): """ if self.use_vr: self.rift.updateInputState() + + def _run_trial_loop(self, start_time, duration): + """ + Run the trial presentation loop + + This method handles the common trial presentation logic used by both + BaseExperiment.run() and BlockExperiment._run_block(). + + Args: + start_time (float): Time when the trial loop started + duration (float): Maximum duration of the trial loop in seconds - def run(self, instructions=True): - """ Do the present operation for a bunch of experiments """ + """ def iti_with_jitter(): return self.iti + np.random.rand() * self.jitter - # Setup the experiment, alternatively could get rid of this line, something to think about + # Initialize trial variables + current_trial = trial_end_time = -1 + trial_start_time = None + rendering_trial = -1 + + # Clear/reset user input buffer + self._clear_user_input() + + # Run the trial loop + while (time() - start_time) < duration: + elapsed_time = time() - start_time + + # Do not present stimulus until current trial begins(Adhere to inter-trial interval). + if elapsed_time > trial_end_time: + current_trial += 1 + + # Calculate timing for this trial + trial_start_time = elapsed_time + iti_with_jitter() + trial_end_time = trial_start_time + self.soa + + # Do not present stimulus after trial has ended(stimulus on arrival interval). + if elapsed_time >= trial_start_time: + # if current trial number changed present new stimulus. + if current_trial > rendering_trial: + # Stimulus presentation overwritten by specific experiment + self._draw(lambda: self.present_stimulus(current_trial)) + rendering_trial = current_trial + else: + self._draw(lambda: self.present_iti()) + + if self._user_input('cancel'): + return False + + return True + + def run(self, instructions=True): + """ Do the present operation for a bunch of experiments """ + + # Setup the experiment self.setup(instructions) print("Wait for the EEG-stream to start...") @@ -235,37 +323,11 @@ def iti_with_jitter(): print("EEG Stream started") - # Run trial until a key is pressed or experiment duration has expired. - start = time() - current_trial = current_trial_end = -1 - current_trial_begin = None - - # Current trial being rendered - rendering_trial = -1 - - # Clear/reset user input buffer - self.__clear_user_input() - - while not self.__user_input('cancel') and (time() - start) < self.record_duration: - - current_experiment_seconds = time() - start - # Do not present stimulus until current trial begins(Adhere to inter-trial interval). - if current_trial_end < current_experiment_seconds: - current_trial += 1 - current_trial_begin = current_experiment_seconds + iti_with_jitter() - current_trial_end = current_trial_begin + self.soa - - # Do not present stimulus after trial has ended(stimulus on arrival interval). - elif current_trial_begin < current_experiment_seconds: - - # if current trial number changed get new choice of image. - if rendering_trial < current_trial: - # Some form of presenting the stimulus - sometimes order changed in lower files like ssvep - # Stimulus presentation overwritten by specific experiment - self.__draw(lambda: self.present_stimulus(current_trial)) - rendering_trial = current_trial - else: - self.__draw(lambda: self.window.flip()) + # Record experiment until a key is pressed or duration has expired. + record_start_time = time() + + # Run the trial loop + self._run_trial_loop(record_start_time, self.record_duration) # Clearing the screen for the next trial event.clearEvents() diff --git a/eegnb/experiments/visual_vep/pattern_reversal_vep.py b/eegnb/experiments/visual_vep/pattern_reversal_vep.py new file mode 100644 index 00000000..8b450512 --- /dev/null +++ b/eegnb/experiments/visual_vep/pattern_reversal_vep.py @@ -0,0 +1,214 @@ +from time import time +import numpy as np + +from psychopy import visual +from typing import Optional, Dict, Any +from eegnb.devices.eeg import EEG +from eegnb.experiments.BlockExperiment import BlockExperiment +from stimupy.stimuli.checkerboards import contrast_contrast + +QUEST_PPD = 20 + +class VisualPatternReversalVEP(BlockExperiment): + + def __init__(self, display_refresh_rate: int, eeg: Optional[EEG] = None, save_fn=None, + block_duration_seconds=50, block_trial_size: int=100, n_blocks: int=4, use_vr=False, use_fullscr=True): + + self.display_refresh_rate = display_refresh_rate + soa=0.5 + iti=0 + jitter=0 + + super().__init__("Visual Pattern Reversal VEP", block_duration_seconds, eeg, save_fn, block_trial_size, n_blocks, iti, soa, jitter, use_vr, use_fullscr, stereoscopic=True) + + self.instruction_text = f"""Welcome to the Visual Pattern Reversal VEP experiment! + + This experiment will run for {n_blocks} blocks of {block_duration_seconds} seconds each. + + Press spacebar or controller to continue. + """ + + # Setting up the trial and parameter list + left_eye = 0 + right_eye = 1 + # Alternate between left and right eye blocks + block_eyes = [] + for block_num in range(n_blocks): + eye = left_eye if block_num % 2 == 0 else right_eye + block_eyes.extend([eye] * block_trial_size) + self.parameter = np.array(block_eyes) + + @staticmethod + def create_monitor_checkerboard(intensity_checks): + # Standard parameters for monitor-based pattern reversal VEP + # Using standard 1 degree check size at 30 pixels per degree + return contrast_contrast( + visual_size=(16, 16), # aspect ratio in degrees + ppd=72, # pixels per degree + frequency=(0.5, 0.5), # spatial frequency of the checkerboard (0.5 cpd = 1 degree check size) + intensity_checks=intensity_checks, + target_shape=(0, 0), + alpha=0, + tau=0 + ) + + @staticmethod + def create_vr_checkerboard(intensity_checks): + # Optimized parameters for Oculus/Meta Quest 2 with PC link + # Quest 2 has approximately 20 pixels per degree and a ~90° FOV + # Using standard 1 degree check size (0.5 cpd) + return contrast_contrast( + visual_size=(20, 20), # size in degrees - covers a good portion of the FOV + ppd=QUEST_PPD, # pixels per degree for Quest 2 + frequency=(0.5, 0.5), # spatial frequency (0.5 cpd = 1 degree check size) + intensity_checks=intensity_checks, + target_shape=(0, 0), + alpha=0, + tau=0 + ) + + def load_stimulus(self) -> Dict[str, Any]: + # Frame rate, in Hz + # GetActualFrameRate() crashes in psychxr due to 'EndFrame called before BeginFrame' + actual_frame_rate = np.round(self.window.displayRefreshRate if self.use_vr else self.window.getActualFrameRate()) + # Ensure the expected frame rate matches and is divisable by the stimulus rate(soa) + assert actual_frame_rate % self.soa == 0, f"Expected frame rate divisable by stimulus rate: {self.soa}, but got {actual_frame_rate} Hz" + assert self.display_refresh_rate == actual_frame_rate, f"Expected frame rate {self.display_refresh_rate} Hz, but got {actual_frame_rate} Hz" + + if self.use_vr: + # Create the VR checkerboard + create_checkerboard = self.create_vr_checkerboard + # the window is large over the eye, checkerboard should only cover the central vision + size = self.window.size / 1.5 + else: + # Create the Monitor checkerboard + create_checkerboard = self.create_monitor_checkerboard + size = (self.window_size[1], self.window_size[1]) + + # The surrounding / periphery needs to be dark when not using vr. + # Also used for covering eye which is not being stimulated. + self.black_background = visual.Rect(self.window, + width=self.window.size[0], + height=self.window.size[1], + fillColor='black') + + # A grey background behind the checkerboard must be used in vr to maintain luminence. + self.grey_background = visual.Rect(self.window, + width=self.window.size[0], + height=self.window.size[1], + fillColor=[-0.22, -0.22, -0.22]) + + # Create checkerboard stimuli + def create_checkerboard_stim(intensity_checks, pos): + return visual.ImageStim(self.window, + image=create_checkerboard(intensity_checks)['img'], + units='pix', size=size, color='white', pos=pos) + + # Create fixation stimuli + def create_fixation_stim(pos): + fixation = visual.GratingStim( + win=self.window, + pos=pos, + sf=400 if self.use_vr else 0.2, + color=[1, 0, 0] + ) + fixation.size = 0.02 if self.use_vr else 0.4 + return fixation + + # Create VR block instruction stimuli + def create_vr_block_instruction(pos): + return visual.TextStim(win=self.window, text="Focus on the red dot, and try not to blink whilst the squares are flashing, press the spacebar or pull the controller trigger when ready to commence.", color=[-1, -1, -1], + pos=pos, height=0.1) + + # Create and position stimulus + def create_eye_stimuli(eye_x_pos, pix_x_pos): + return { + 'checkerboards': [ + create_checkerboard_stim((1, -1), pos=(pix_x_pos, 0)), + create_checkerboard_stim((-1, 1), pos=(pix_x_pos, 0)) + ], + 'fixation': create_fixation_stim([eye_x_pos, 0]), + 'vr_block_instructions': create_vr_block_instruction((eye_x_pos, 0)) + } + + # Structure all stimuli in organized dictionary + if self.use_vr: + # Calculate pixel positions for stereoscopic presentation + window_width = self.window.size[0] + left_pix_x_pos = self.left_eye_x_pos * (window_width / 2) + right_pix_x_pos = self.right_eye_x_pos * (window_width / 2) + + return { + 'left': create_eye_stimuli(self.left_eye_x_pos, left_pix_x_pos), + 'right': create_eye_stimuli(self.right_eye_x_pos, right_pix_x_pos) + } + else: + return { + 'monoscopic': create_eye_stimuli(0, 0) + } + + def _present_vr_block_instructions(self, open_eye, closed_eye): + self.window.setBuffer(open_eye) + self.stim[open_eye]['vr_block_instructions'].draw() + self.stim[open_eye]['fixation'].draw() + self.window.setBuffer(closed_eye) + self.black_background.draw() + + def present_block_instructions(self, current_block: int) -> None: + if self.use_vr: + if current_block % 2 == 0: + self._present_vr_block_instructions(open_eye="left", closed_eye="right") + else: + self._present_vr_block_instructions(open_eye="right", closed_eye="left") + else: + if current_block % 2 == 0: + instruction_text = ( + "Close your right eye, then focus on the red dot with your left eye. " + "Press spacebar or controller when ready." + ) + else: + instruction_text = ( + "Close your left eye, then focus on the red dot with your right eye. " + "Press spacebar or controller when ready." + ) + text = visual.TextStim(win=self.window, text=instruction_text, color=[-1, -1, -1]) + text.draw() + self.stim['monoscopic']['fixation'].draw() + self.window.flip() + + def present_stimulus(self, idx: int): + # Get the label of the trial + trial_idx = self.current_block_index * self.block_trial_size + idx + label = self.parameter[trial_idx] + + open_eye = 'left' if label == 0 else 'right' + closed_eye = 'left' if label == 1 else 'right' + + # draw checkerboard and fixation + if self.use_vr: + self.window.setBuffer(open_eye) + self.grey_background.draw() + display = self.stim['left' if label == 0 else 'right'] + else: + self.black_background.draw() + display = self.stim['monoscopic'] + + checkerboard_frame = idx % 2 + display['checkerboards'][checkerboard_frame].draw() + display['fixation'].draw() + + if self.use_vr: + self.window.setBuffer(closed_eye) + self.black_background.draw() + self.window.flip() + + # Pushing the sample to the EEG + marker = self.markernames[label] + self.eeg.push_sample(marker=marker, timestamp=time()) + + def present_iti(self): + if self.use_vr: + for eye in ['left', 'right']: + self.window.setBuffer(eye) + self.black_background.draw() + self.window.flip() diff --git a/environments/eeg-expy-docsbuild.yml b/environments/eeg-expy-docsbuild.yml index 06dbaa5d..e001934e 100644 --- a/environments/eeg-expy-docsbuild.yml +++ b/environments/eeg-expy-docsbuild.yml @@ -1,9 +1,11 @@ -name: eeg-expy-docsbuild channels: - conda-forge dependencies: # System-level dependencies - - python>=3.8,<=3.13 + + # conda overrides current environment python version when not using --freeze-installed, and installs a random version of python... + # - python>=3.8,<=3.13 + - pytables # install pytables for macOS arm64, so do not need to build from source. - rust # used by docsbuild - pip diff --git a/environments/eeg-expy-full.yml b/environments/eeg-expy-full.yml index 05cbd476..90040b2c 100644 --- a/environments/eeg-expy-full.yml +++ b/environments/eeg-expy-full.yml @@ -1,9 +1,11 @@ -name: eeg-expy-full channels: - conda-forge dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + + # conda overrides current environment python version when not using --freeze-installed, and installs a random version of python... + # - python>=3.8,<=3.10 # psychopy <= 3.10 + - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - pytables # install pytables for macOS arm64, so do not need to build from source. - rust # used by docsbuild diff --git a/environments/eeg-expy-stimpres.yml b/environments/eeg-expy-stimpres.yml index c704b04d..d89da65b 100644 --- a/environments/eeg-expy-stimpres.yml +++ b/environments/eeg-expy-stimpres.yml @@ -1,9 +1,11 @@ -name: eeg-expy-stimpres channels: - conda-forge dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + + # conda overrides current environment python version when not using --freeze-installed, and installs a random version of python... + #- python>=3.8,<=3.10 # psychopy <= 3.10 + - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" - pip diff --git a/environments/eeg-expy-streaming.yml b/environments/eeg-expy-streaming.yml index 8a8a751a..129370bb 100644 --- a/environments/eeg-expy-streaming.yml +++ b/environments/eeg-expy-streaming.yml @@ -1,9 +1,11 @@ -name: eeg-expy-streaming channels: - conda-forge dependencies: # System-level dependencies - - python>=3.8,<=3.13 + + # conda overrides current environment python version when not using --freeze-installed, and installs a random version of python... + #- python>=3.8,<=3.13 + - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - pip - pip: diff --git a/environments/eeg-expy-streamstim.yml b/environments/eeg-expy-streamstim.yml index ec355171..d65eb0c9 100644 --- a/environments/eeg-expy-streamstim.yml +++ b/environments/eeg-expy-streamstim.yml @@ -1,10 +1,12 @@ -name: eeg-expy-streamstim channels: - conda-forge - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + + # conda overrides current environment python version when not using --freeze-installed, and installs a random version of python... + #- python>=3.8,<=3.10 # psychopy <= 3.10 + - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" diff --git a/examples/visual_block_pattern_reversal/00x__block_pattern_reversal_run_experiment.py b/examples/visual_block_pattern_reversal/00x__block_pattern_reversal_run_experiment.py new file mode 100644 index 00000000..0cf6fe89 --- /dev/null +++ b/examples/visual_block_pattern_reversal/00x__block_pattern_reversal_run_experiment.py @@ -0,0 +1,58 @@ +""" +P100 run experiment +=============================== + +This example demonstrates the initiation of an EEG stream with eeg-notebooks, and how to run +an experiment. + +""" +from os import path, getenv + +################################################################################################### +# Setup +# --------------------- +# +# Imports +from eegnb import generate_save_fn +from eegnb.devices.eeg import EEG +from eegnb.experiments.visual_vep.pattern_reversal_vep import VisualPatternReversalVEP +import platform + +################################################################################################### +# Initiate EEG device +# --------------------- +# +# Start EEG device + +if platform.system() == "Windows": + serial_port = "COM3" +else: + serial_port = "/dev/cu.usbserial-DM03H289" +eeg_device = EEG(device="cyton", + ch_names=['CFz', 'CPz', 'C3', 'C4', 'PO3', 'PO4', 'POz', 'Oz'], + serial_port=serial_port) +# eeg_device = EEG(device="synthetic") + +# Create save file name +data_dir = getenv('DATA_DIR') +data_dir = path.join(path.expanduser("~/"), data_dir, "data") +save_fn = generate_save_fn(eeg_device.device_name, + experiment="block_both_eyes_pattern_reversal-mark_iv_headset", + site=platform.system() + "_acer_34_100hz", + subject_id=0, + session_nb=1, + data_dir=data_dir) +print(save_fn) + +# replace filename with new filename + +################################################################################################### +# Run experiment +# --------------------- +# + +pattern_reversal_vep = VisualPatternReversalVEP(eeg=eeg_device, save_fn=save_fn, use_fullscr=True) +pattern_reversal_vep.run() + +# TODO: save latency info and other metadata +# pattern_reversal_vep.save_metadata() diff --git a/examples/visual_block_pattern_reversal/01r__block_pattern_reversal_viz.ipynb b/examples/visual_block_pattern_reversal/01r__block_pattern_reversal_viz.ipynb new file mode 100644 index 00000000..ed5c0263 --- /dev/null +++ b/examples/visual_block_pattern_reversal/01r__block_pattern_reversal_viz.ipynb @@ -0,0 +1,277 @@ +{ + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "source": [ + "import np\n", + "import vep_utils\n", + "\n", + "\"\"\"\n", + "Pattern reversal Load and Visualize Data\n", + "===============================\n", + "\n", + "This example demonstrates loading, organizing, and visualizing EP response data from the visual P100 experiment. \n", + "\n", + "An animation of a checkerboard reversal is shown(the checkerboard squares' colours are toggled once each half a second).\n", + "\n", + "The data used is the first subject and first session of the one of the eeg-notebooks P100 example datasets.\n", + "It was recorded using an OpenBCI Ultracortex EEG headset(Mark IV) with it's last five electrodes placed in the headset's\n", + "node locations of (PO1, Oz, PO2, P3 and P4).\n", + "These headset node locations were used to fit around a Meta Quest 2 headset, which tilted/angled the headset backwards\n", + "so that the real locations of the electrodes are closer to the occipital lobe - O1, Iz, O2, PO1 and PO2.\n", + "The session consisted of using the Meta Quest 2 linked with a PC to display the checkerboard reversal animation\n", + "for thirty seconds of continuous recording. \n", + "\n", + "We first use the `fetch_datasets` to obtain a list of filenames. If these files are not already present \n", + "in the specified data directory, they will be quickly downloaded from the cloud. \n", + "\n", + "After loading the data from the occiptal channels, we place it in an MNE `Epochs` object, and then an `Evoked` object to obtain\n", + "the trial-averaged delay of the response. \n", + "\n", + "The final figure plotted at the end shows the P100 response EP waveform.\n", + "\"\"\"\n", + "\n", + "###################################################################################################\n", + "# Setup\n", + "# ---------------------\n", + "\n", + "# Some standard pythonic imports\n", + "import os\n", + "from collections import OrderedDict\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "# MNE functions\n", + "from mne import Epochs,find_events\n", + "\n", + "# EEG-Notebooks functions\n", + "from eegnb.analysis.utils import load_data\n", + "from vep_utils import plot_vep\n", + "from os import path, getenv\n", + "\n", + "###################################################################################################\n", + "# Load Data\n", + "# ---------------------\n", + "#\n", + "# We will use the eeg-notebooks P100 example dataset\n", + "#\n", + "# Note that if you are running this locally, the following cell will download\n", + "# the example dataset, if you do not already have it.\n", + "#\n", + "\n", + "###################################################################################################\n", + "\n", + "data_dir = path.join(path.expanduser(\"~/\"), getenv('DATA_DIR'), \"data\")\n", + "raw = load_data(subject=0,session=1,\n", + " experiment='block_both_eyes_pattern_reversal-mark_iv_headset', site='windows_acer_34_100hz', device_name='cyton',\n", + " data_dir=data_dir)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "###################################################################################################\n", + "# Visualize the power spectrum\n", + "# ----------------------------\n", + "\n", + "raw.plot_psd()\n", + "\n", + "###################################################################################################\n", + "# Filtering\n", + "# ----------------------------\n", + "\n", + "raw.filter(1,30, method='fir')\n", + "raw.plot_psd(fmin=1, fmax=30)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "\n", + "###################################################################################################\n", + "# Epoching\n", + "# ----------------------------\n", + "\n", + "# Create an array containing the timestamps and which eye was presented the stimulus\n", + "events = find_events(raw)\n", + "event_id = {'left_eye': 1, 'right_eye': 2}\n", + "\n", + "# Create an MNE Epochs object representing all the epochs around stimulus presentation\n", + "epochs = Epochs(raw, events=events, event_id=event_id,\n", + " tmin=-0.1, tmax=0.4, baseline=None,\n", + " reject={'eeg': 65e-6}, preload=True,\n", + " verbose=False, picks=[7])\n", + "epochs.shift_time(-vep_utils.windows_lag())\n", + "print('sample drop %: ', (1 - len(epochs.events)/len(events)) * 100)\n", + "epochs" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "###################################################################################################\n", + "# Epoch average\n", + "# ----------------------------\n", + "evoked = epochs.average()\n", + "evoked.plot(spatial_colors=True, show=False)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "source": [ + "evoked_potentials = epochs['left_eye'].average(picks=['Oz'])\n", + "plot_vep(evoked_potentials)" + ], + "metadata": { + "collapsed": false + }, + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "evoked_potentials = epochs['right_eye'].average(picks=['Oz'])\n", + "plot_vep(evoked_potentials)" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "###################################################################################################\n", + "# Compare evoked potentials by event type\n", + "# ----------------------------\n", + "\n", + "# Create separate evoked responses for each event type\n", + "evoked_left = epochs['left_eye'].average(picks=['Oz'])\n", + "evoked_right = epochs['right_eye'].average(picks=['Oz'])\n", + "\n", + "# Plot both conditions on the same figure for comparison\n", + "import matplotlib.pyplot as plt\n", + "\n", + "fig, ax = plt.subplots(figsize=(10, 6))\n", + "\n", + "# Extract time points and data\n", + "times = evoked_left.times * 1000 # Convert to milliseconds\n", + "left_data = evoked_left.data[0] * 1e6 # Convert to microvolts\n", + "right_data = evoked_right.data[0] * 1e6 # Convert to microvolts\n", + "\n", + "# Plot both conditions\n", + "ax.plot(times, left_data, label='Left Eye', color='blue', linewidth=2)\n", + "ax.plot(times, right_data, label='Right Eye', color='red', linewidth=2)\n", + "\n", + "# Add formatting\n", + "ax.set_xlabel('Time (ms)')\n", + "ax.set_ylabel('Amplitude (μV)')\n", + "ax.set_title('Comparison of Evoked Potentials: Left Eye vs Right Eye')\n", + "ax.legend()\n", + "ax.grid(True, alpha=0.3)\n", + "ax.axhline(y=0, color='black', linestyle='-', alpha=0.3)\n", + "ax.axvline(x=0, color='black', linestyle='--', alpha=0.5, label='Stimulus Onset')\n", + "\n", + "plt.tight_layout()\n", + "plt.show()\n", + "\n", + "# Print summary statistics\n", + "print(f\"Left eye - Number of epochs: {len(epochs['left_eye'])}\")\n", + "print(f\"Right eye - Number of epochs: {len(epochs['right_eye'])}\")\n", + "\n", + "# Find P100 peak for each condition (typically around 100ms)\n", + "p100_window = (80, 120) # milliseconds\n", + "time_mask = (times >= p100_window[0]) & (times <= p100_window[1])\n", + "\n", + "left_p100_idx = np.argmax(left_data[time_mask])\n", + "right_p100_idx = np.argmax(right_data[time_mask])\n", + "\n", + "left_p100_time = times[time_mask][left_p100_idx]\n", + "left_p100_amp = left_data[time_mask][left_p100_idx]\n", + "\n", + "right_p100_time = times[time_mask][right_p100_idx]\n", + "right_p100_amp = right_data[time_mask][right_p100_idx]\n", + "\n", + "print(f\"\\nP100 Peak Analysis:\")\n", + "print(f\"Left eye - Peak at {left_p100_time:.1f}ms, amplitude: {left_p100_amp:.2f}μV\")\n", + "print(f\"Right eye - Peak at {right_p100_time:.1f}ms, amplitude: {right_p100_amp:.2f}μV\")" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "###################################################################################################\n", + "# Create difference wave\n", + "# ----------------------------\n", + "\n", + "# Calculate the difference between conditions\n", + "difference_data = left_data - right_data\n", + "\n", + "fig, ax = plt.subplots(figsize=(10, 6))\n", + "ax.plot(times, difference_data, label='Left - Right', color='green', linewidth=2)\n", + "ax.set_xlabel('Time (ms)')\n", + "ax.set_ylabel('Amplitude Difference (μV)')\n", + "ax.set_title('Difference Wave: Left Eye - Right Eye')\n", + "ax.grid(True, alpha=0.3)\n", + "ax.axhline(y=0, color='black', linestyle='-', alpha=0.3)\n", + "ax.axvline(x=0, color='black', linestyle='--', alpha=0.5, label='Stimulus Onset')\n", + "ax.legend()\n", + "\n", + "plt.tight_layout()\n", + "plt.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": "", + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "name": "python3", + "language": "python", + "display_name": "Python 3 (ipykernel)" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "6c096d3d5a52aa51b1da1c53f69d12a5c697c7b765ecfb9c622a0b909667c12d" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/requirements.txt b/requirements.txt index 721634bc..e7f200b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,8 @@ scikit-learn>=0.23.2 pandas>=1.1.4 -numpy>=1.26.0; python_version >= "3.9" +# psychxr build pinned to this version of numpy. +numpy>=1.26,<1.27; python_version >= "3.9" numpy<=1.24.4; python_version == "3.8" mne>=0.20.8 seaborn>=0.11.0 @@ -58,7 +59,8 @@ ffpyplayer==4.5.2 # 4.5.3 fails to build as wheel. psychtoolbox scikit-learn>=0.23.2 pandas>=1.1.4 -numpy>=1.26.0; python_version >= "3.9" +# psychxr build pinned to this version of numpy. +numpy>=1.26,<1.27; python_version >= "3.9" numpy==1.24.4; python_version == "3.8" mne>=0.20.8 seaborn>=0.11.0 @@ -85,7 +87,8 @@ pyglet==1.4.11 ; platform_system == "Windows" psychxr>=0.2.4rc2; platform_system == "Windows" and python_version <= "3.9" - +# Used for generating checkerboard in pattern reversal experiment +stimupy ## ~~ Docsbuild Requirements ~~ recommonmark