diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d45529a6..bae603ae4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,11 @@ # Change Log All notable changes to this project will be documented in this file. +## [2.13.4] = 2022-02-23 +- Bug fix in ecephys +- Added support for VBN source density jobs. +- Bug fix for pg8000 + ## [2.13.3] = 2022-02-02 - Add ability to extract running speed from mulit-stimulus experiments - Compatible with pandas 1.4 diff --git a/allensdk/__init__.py b/allensdk/__init__.py index a6c2b7ebb..5496c4e67 100644 --- a/allensdk/__init__.py +++ b/allensdk/__init__.py @@ -35,7 +35,7 @@ # import logging -__version__ = '2.13.3' +__version__ = '2.13.4' try: diff --git a/allensdk/brain_observatory/ecephys/current_source_density/__main__.py b/allensdk/brain_observatory/ecephys/current_source_density/__main__.py index 1d6eef4f3..76560ade9 100644 --- a/allensdk/brain_observatory/ecephys/current_source_density/__main__.py +++ b/allensdk/brain_observatory/ecephys/current_source_density/__main__.py @@ -55,11 +55,17 @@ def get_inputs_from_lims(args) -> dict: data['pre_stimulus_time'] = float(data['pre_stimulus_time']) data['post_stimulus_time'] = float(data['post_stimulus_time']) - data['surface_channel_adjustment'] = int(data['surface_channel_adjustment']) + data['surface_channel_adjustment'] = int( + data['surface_channel_adjustment'] + ) for probe in data['probes']: - probe['surface_channel_adjustment'] = int(probe['surface_channel_adjustment']) - probe['csd_output_path'] = os.path.join(output_root, os.path.split(probe['csd_output_path'])[-1]) + probe['surface_channel_adjustment'] = int( + probe['surface_channel_adjustment'] + ) + probe['csd_output_path'] = os.path.join( + output_root, os.path.split(probe['csd_output_path'])[-1] + ) probe['phase'] = str(probe['phase']) return data @@ -85,15 +91,18 @@ def run_csd(args: dict) -> dict: pre_stimulus_time=args['pre_stimulus_time'], post_stimulus_time=args['post_stimulus_time'], num_trials=args['num_trials'], - stimulus_index=args['stimulus']['index'] + stimulus_index=args['stimulus']['index'], + start_field=args['start_field'] ) logging.info('Loading LFP data') lfp_data_file = ContinuousFile(probe['lfp_data_path'], probe['lfp_timestamps_path'], probe['total_channels']) - lfp_raw, timestamps = lfp_data_file.load(memmap=args['memmap'], - memmap_thresh=args['memmap_thresh']) + lfp_raw, timestamps = lfp_data_file.load( + memmap=args['memmap'], + memmap_thresh=args['memmap_thresh'] + ) if probe['phase'].lower() == '3a': lfp_channels = lfp_data_file.get_lfp_channel_order() @@ -101,16 +110,20 @@ def run_csd(args: dict) -> dict: lfp_channels = np.arange(0, probe['total_channels']) logging.info('Accumulating LFP data') - accumulated_lfp_data = accumulate_lfp_data(timestamps=timestamps, - lfp_raw=lfp_raw, - lfp_channels=lfp_channels, - trial_windows=trial_windows, - volts_per_bit=args['volts_per_bit']) + accumulated_lfp_data = accumulate_lfp_data( + timestamps=timestamps, + lfp_raw=lfp_raw, + lfp_channels=lfp_channels, + trial_windows=trial_windows, + volts_per_bit=args['volts_per_bit'] + ) logging.info('Removing noisy and reference channels') - clean_lfp, clean_channels = select_good_channels(lfp=accumulated_lfp_data, - reference_channels=probe['reference_channels'], - noisy_channel_threshold=args['noisy_channel_threshold']) + clean_lfp, clean_channels = select_good_channels( + lfp=accumulated_lfp_data, + reference_channels=probe['reference_channels'], + noisy_channel_threshold=args['noisy_channel_threshold'] + ) logging.info('Bandpass filtering LFP channel data') filt_lfp = filter_lfp_channels(lfp=clean_lfp, @@ -119,19 +132,29 @@ def run_csd(args: dict) -> dict: filter_order=args['filter_order']) logging.info('Interpolating LFP channel locations') - actual_locs = make_actual_channel_locations(0, accumulated_lfp_data.shape[1]) + actual_locs = make_actual_channel_locations( + 0, + accumulated_lfp_data.shape[1] + ) clean_actual_locs = actual_locs[clean_channels, :] - interp_locs = make_interp_channel_locations(0, accumulated_lfp_data.shape[1]) - interp_lfp, spacing = interp_channel_locs(lfp=filt_lfp, - actual_locs=clean_actual_locs, - interp_locs=interp_locs) + interp_locs = make_interp_channel_locations( + 0, + accumulated_lfp_data.shape[1] + ) + interp_lfp, spacing = interp_channel_locs( + lfp=filt_lfp, + actual_locs=clean_actual_locs, + interp_locs=interp_locs + ) logging.info('Averaging LFPs over trials') trial_mean_lfp = np.nanmean(interp_lfp, axis=0) logging.info('Computing CSD') - current_source_density, csd_channels = compute_csd(trial_mean_lfp=trial_mean_lfp, - spacing=spacing) + current_source_density, csd_channels = compute_csd( + trial_mean_lfp=trial_mean_lfp, + spacing=spacing + ) logging.info('Saving data') write_csd_to_h5( diff --git a/allensdk/brain_observatory/ecephys/current_source_density/_current_source_density.py b/allensdk/brain_observatory/ecephys/current_source_density/_current_source_density.py index c80f6b019..7f43bf442 100644 --- a/allensdk/brain_observatory/ecephys/current_source_density/_current_source_density.py +++ b/allensdk/brain_observatory/ecephys/current_source_density/_current_source_density.py @@ -9,12 +9,17 @@ def extract_trial_windows( - stimulus_table: pd.DataFrame, stimulus_name: str, - time_step: float, pre_stimulus_time: float, post_stimulus_time: float, + stimulus_table: pd.DataFrame, + stimulus_name: str, + time_step: float, + pre_stimulus_time: float, + post_stimulus_time: float, num_trials: Optional[int] = None, stimulus_index: Optional[int] = None, - name_field: str = 'stimulus_name', index_field: str = 'stimulus_index', - start_field: str = 'Start', end_field: str = 'End' + name_field: str = 'stimulus_name', + index_field: str = 'stimulus_index', + start_field: str = 'Start', + end_field: str = 'End' ) -> Tuple[List[np.ndarray], np.ndarray]: '''Obtains time interval surrounding stimulus sweep onsets diff --git a/allensdk/brain_observatory/ecephys/current_source_density/_filter_utils.py b/allensdk/brain_observatory/ecephys/current_source_density/_filter_utils.py index 262422cd6..06e15394a 100644 --- a/allensdk/brain_observatory/ecephys/current_source_density/_filter_utils.py +++ b/allensdk/brain_observatory/ecephys/current_source_density/_filter_utils.py @@ -31,7 +31,12 @@ def select_good_channels(lfp: np.ndarray, channel_variance = np.mean(np.std(lfp, 2), 0) noisy_channels = np.where(channel_variance > noisy_channel_threshold)[0] - to_remove = np.concatenate((np.array(reference_channels), noisy_channels)) + to_remove = np.concatenate( + ( + np.array(reference_channels), + noisy_channels + ) + ).astype(int) good_indices = np.delete(np.arange(0, lfp.shape[1]), to_remove) # Remove noisy or reference channels (axis=1) diff --git a/allensdk/brain_observatory/ecephys/current_source_density/_schemas.py b/allensdk/brain_observatory/ecephys/current_source_density/_schemas.py index 6d5c0b0bc..18465aafa 100644 --- a/allensdk/brain_observatory/ecephys/current_source_density/_schemas.py +++ b/allensdk/brain_observatory/ecephys/current_source_density/_schemas.py @@ -83,6 +83,10 @@ class InputParameters(ArgSchema): noisy_channel_threshold = Float(default=1500.0, help='Threshold for removing noisy ' 'channels from analysis') + start_field = String( + default='Start', + help='Column from which to extract start times.' + ) class ProbeOutputParameters(DefaultSchema): diff --git a/allensdk/internal/core/lims_utilities.py b/allensdk/internal/core/lims_utilities.py index fbdb43aff..af293d8b2 100644 --- a/allensdk/internal/core/lims_utilities.py +++ b/allensdk/internal/core/lims_utilities.py @@ -1,4 +1,7 @@ -import os, platform, re, logging +import os +import platform +import re +import logging from allensdk.core.json_utilities import read_url_get HDF5_FILE_TYPE_ID = 306905526 @@ -11,13 +14,14 @@ def get_well_known_files_by_type(wkfs, wkf_type_id): - out = [ os.path.join( wkf['storage_directory'], wkf['filename'] ) - for wkf in wkfs - if wkf.get('well_known_file_type_id',None) == wkf_type_id ] + out = [os.path.join(wkf['storage_directory'], wkf['filename']) + for wkf in wkfs + if wkf.get('well_known_file_type_id', None) == wkf_type_id] if len(out) == 0: - raise IOError("Could not find well known files with type %d." % wkf_type_id) - + raise IOError( + "Could not find well known files with type %d." % wkf_type_id) + return out @@ -26,35 +30,42 @@ def get_well_known_file_by_type(wkfs, wkf_type_id): nout = len(out) if nout != 1: - raise IOError("Expected single well known file with type %d. Got %d." % (wkf_type_id, nout)) - + raise IOError( + "Expected single well known file with type %d. Got %d." % ( + wkf_type_id, nout)) + return out[0] def get_well_known_files_by_name(wkfs, filename): - out = [ os.path.join( wkf['storage_directory'], wkf['filename'] ) - for wkf in wkfs - if wkf['filename'] == filename ] + out = [os.path.join(wkf['storage_directory'], wkf['filename']) + for wkf in wkfs + if wkf['filename'] == filename] if len(out) == 0: - raise IOError("Could not find well known files with name %s." % filename) + raise IOError( + "Could not find well known files with name %s." % filename) return out + def get_well_known_file_by_name(wkfs, filename): out = get_well_known_files_by_name(wkfs, filename) nout = len(out) if nout != 1: - raise IOError("Expected single well known file with name %s. Got %d." % (filename, nout)) - + raise IOError( + "Expected single well known file with name %s. Got %d." % ( + filename, nout)) + return out[0] + def append_well_known_file(wkfs, path, wkf_type_id=None, content_type=None): record = { 'filename': os.path.basename(path), 'storage_directory': os.path.dirname(path) - } + } if wkf_type_id is not None: record['well_known_file_type_id'] = wkf_type_id @@ -64,36 +75,53 @@ def append_well_known_file(wkfs, path, wkf_type_id=None, content_type=None): for wkf in wkfs: if wkf['filename'] == record['filename']: - logging.debug("found existing well known file record for %s, updating", path) + logging.debug( + "found existing well known file record for %s, updating", path) wkf.update(record) return - logging.debug("could not find existing well known file record for %s, appending", path) + logging.debug( + "could not find existing well known file record for %s, appending", + path) wkfs.append(record) -def _connect(user="limsreader", host="limsdb2", database="lims2", password="limsro", port=5432): + +def _connect(user="limsreader", host="limsdb2", database="lims2", + password="limsro", port=5432): import pg8000 - conn = pg8000.connect(user=user, host=host, database=database, password=password, port=port) + conn = pg8000.connect(user=user, host=host, database=database, + password=password, port=port) return conn, conn.cursor() + def _select(cursor, query): cursor.execute(query) - columns = [ d[0].decode("utf-8") for d in cursor.description ] - return [ dict(zip(columns, c)) for c in cursor.fetchall() ] + columns = [d[0].decode("utf-8") if isinstance(d[0], bytes) else d[0] for d + in cursor.description] + return [dict(zip(columns, c)) for c in cursor.fetchall()] + def select(cursor, query): - raise DeprecationWarning("lims_utilities.select is deprecated. Please use lims_utilities.query instead.") + raise DeprecationWarning( + "lims_utilities.select is deprecated. Please use " + "lims_utilities.query instead.") + -def connect(user="limsreader", host="limsdb2", database="lims2", password="limsro", port=5432): - raise DeprecationWarning("lims_utilities.connect is deprecated. Please use lims_utilities.query instead.") +def connect(user="limsreader", host="limsdb2", database="lims2", + password="limsro", port=5432): + raise DeprecationWarning( + "lims_utilities.connect is deprecated. Please use " + "lims_utilities.query instead.") -def query(query, user="limsreader", host="limsdb2", database="lims2", password="limsro", port=5432): + +def query(query, user="limsreader", host="limsdb2", database="lims2", + password="limsro", port=5432): conn, cursor = _connect(user, host, database, password, port) # Guard against non-ascii characters in query - query = ''.join([i if ord(i) < 128 else ' ' for i in query]) - + query = ''.join([i if ord(i) < 128 else ' ' for i in query]) + try: results = _select(cursor, query) finally: @@ -101,16 +129,18 @@ def query(query, user="limsreader", host="limsdb2", database="lims2", password=" conn.close() return results + def safe_system_path(file_name): if platform.system() == "Windows": return linux_to_windows(file_name) else: return convert_from_titan_linux(os.path.normpath(file_name)) + def convert_from_titan_linux(file_name): # Lookup table mapping project to program - project_to_program= { - "neuralcoding": "braintv", + project_to_program = { + "neuralcoding": "braintv", '0378': "celltypes", 'conn': "celltypes", 'ctyconn': "celltypes", @@ -128,7 +158,7 @@ def convert_from_titan_linux(file_name): newpath = os.path.normpath(os.path.join( '/allen', 'programs', - project_to_program.get(m.group(1),'undefined'), + project_to_program.get(m.group(1), 'undefined'), 'production', m.group(1), m.group(2) @@ -136,10 +166,11 @@ def convert_from_titan_linux(file_name): return newpath return file_name + def linux_to_windows(file_name): # Lookup table mapping project to program - project_to_program= { - "neuralcoding": "braintv", + project_to_program = { + "neuralcoding": "braintv", '0378': "celltypes", 'conn': "celltypes", 'ctyconn': "celltypes", @@ -154,14 +185,15 @@ def linux_to_windows(file_name): # Simple case for new world order m = re.match('/allen', file_name) if m: - return "\\" + file_name.replace('/','\\') + return "\\" + file_name.replace('/', '\\') # /data/ paths are being retained (for now) # this will need to be extended to map directories to # /allen/{programs,aibs}/workgroups/foo m = re.match('/data/([^/]+)/(.*)', file_name) if m: - return os.path.normpath(os.path.join('\\\\aibsdata', m.group(1), m.group(2))) + return os.path.normpath( + os.path.join('\\\\aibsdata', m.group(1), m.group(2))) # Tough intermediary state where we have old paths # being translated to new paths @@ -170,7 +202,7 @@ def linux_to_windows(file_name): newpath = os.path.normpath(os.path.join( '\\\\allen', 'programs', - project_to_program.get(m.group(1),'undefined'), + project_to_program.get(m.group(1), 'undefined'), 'production', m.group(1), m.group(2) diff --git a/doc_requirements.txt b/doc_requirements.txt index 2105798a1..91b229b66 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -34,3 +34,4 @@ seaborn<1.0.0 aiohttp==3.7.4 nest_asyncio==1.2.0 docutils<0.18 +markupsafe==2.0.1 diff --git a/doc_template/index.rst b/doc_template/index.rst index 82e4afcd9..b06fa3930 100644 --- a/doc_template/index.rst +++ b/doc_template/index.rst @@ -118,6 +118,12 @@ The Allen SDK provides Python code for accessing experimental metadata along wit See the `mouse connectivity section `_ for more details. +What's new - 2.13.4 +---------------------------------------------------------------------- +- Bug fix in ecephys +- Added support for VBN source density jobs. +- Bug fix for pg8000 + What's new - 2.13.3 ---------------------------------------------------------------------- - Add ability to extract running speed from mulit-stimulus experiments