Skip to content

Commit

Permalink
Merge pull request #87 from catalystneuro/debug_and_consistency
Browse files Browse the repository at this point in the history
Debugs
  • Loading branch information
CodyCBakerPhD authored Sep 24, 2024
2 parents 2ed4bc7 + b53e60d commit e27eaeb
Show file tree
Hide file tree
Showing 7 changed files with 314 additions and 12 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ dependencies = [
"neuroconv",
"spikeinterface",
"probeinterface",
"ndx-pose>=0.1.1",
"ndx-pose==0.1.1",
"ndx-ibl==0.1.0",
"ONE-api",
"ibllib",
Expand Down
23 changes: 14 additions & 9 deletions src/ibl_to_nwb/_scripts/convert_brainwide_map_processed_only.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
import os

os.environ["JUPYTER_PLATFORM_DIRS"] = "1" # Annoying

import os
from pathlib import Path

from one.api import ONE

from ibl_to_nwb import (
BrainwideMapConverter,
from ibl_to_nwb.converters import BrainwideMapConverter
from ibl_to_nwb.datainterfaces import (
BrainwideMapTrialsInterface,
IblPoseEstimationInterface,
IblSortingInterface,
Expand All @@ -17,6 +12,7 @@
RoiMotionEnergyInterface,
WheelInterface,
)
from ibl_to_nwb.testing import check_written_nwbfile_for_consistency

session_id = "d32876dd-8303-4720-8e7e-20678dc2fd71"

Expand All @@ -25,6 +21,7 @@
revision = None

base_path = Path("E:/IBL")
base_path.mkdir(exist_ok=True)
nwbfiles_folder_path = base_path / "nwbfiles"
nwbfiles_folder_path.mkdir(exist_ok=True)

Expand All @@ -51,7 +48,12 @@
camera_name = pose_estimation_file.replace("alf/_ibl_", "").replace(".dlc.pqt", "")
data_interfaces.append(
IblPoseEstimationInterface(
one=ibl_client, session=session_id, camera_name=camera_name, include_video=False, revision=revision
one=ibl_client,
session=session_id,
camera_name=camera_name,
include_video=False,
include_pose=True,
revision=revision,
)
)

Expand All @@ -77,10 +79,13 @@
subject_id = metadata["Subject"]["subject_id"]

subject_folder_path = nwbfiles_folder_path / f"sub-{subject_id}"
nwbfile_path = nwbfiles_folder_path / f"sub-{subject_id}_ses-{session_id}_desc-processed_behavior+ecephys.nwb"
subject_folder_path.mkdir(exist_ok=True)
nwbfile_path = subject_folder_path / f"sub-{subject_id}_ses-{session_id}_desc-processed_behavior+ecephys.nwb"

session_converter.run_conversion(
nwbfile_path=nwbfile_path,
metadata=metadata,
overwrite=True,
)

check_written_nwbfile_for_consistency(one=ibl_client, nwbfile_path=nwbfile_path)
3 changes: 2 additions & 1 deletion src/ibl_to_nwb/converters/_brainwide_map_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ class BrainwideMapConverter(IblConverter):
def get_metadata(self) -> dict:
metadata = super().get_metadata()

experiment_metadata = load_dict_from_file(file_path=Path(__file__).parent / "brainwide_map_general.yml")
brainwide_map_metadata_file_path = Path(__file__).parent.parent / "_metadata" / "brainwide_map_general.yml"
experiment_metadata = load_dict_from_file(file_path=brainwide_map_metadata_file_path)
metadata = dict_deep_update(metadata, experiment_metadata)

return metadata
2 changes: 1 addition & 1 deletion src/ibl_to_nwb/converters/_iblconverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def run_conversion(
)

if backend_configuration is None:
backend_configuration = self.get_default_backend_configuration(nwbfile=nwbfile_out, backend=backend)
backend_configuration = self.get_default_backend_configuration(nwbfile=nwbfile_out, backend="hdf5")

configure_backend(nwbfile=nwbfile_out, backend_configuration=backend_configuration)

Expand Down
2 changes: 2 additions & 0 deletions src/ibl_to_nwb/datainterfaces/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@
from ._pupil_tracking import PupilTrackingInterface
from ._roi_motion_energy import RoiMotionEnergyInterface
from ._wheel_movement import WheelInterface
from ._brainwide_map_trials import BrainwideMapTrialsInterface

__all__ = [
"BrainwideMapTrialsInterface",
"IblPoseEstimationInterface",
"IblSortingExtractor",
"IblSortingInterface",
Expand Down
1 change: 1 addition & 0 deletions src/ibl_to_nwb/testing/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from ._consistency_checks import check_written_nwbfile_for_consistency
293 changes: 293 additions & 0 deletions src/ibl_to_nwb/testing/_consistency_checks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,293 @@
# from pathlib import Path
#
# from numpy.testing import assert_array_equal
# from one.api import ONE
# from pandas.testing import assert_frame_equal
# from pynwb import NWBHDF5IO, NWBFile
#
#
# def check_written_nwbfile_for_consistency(*, one: ONE, nwbfile_path: Path):
# """
# Check the processed-only NWB file for consistency with the equivalent calls to the ONE API.
#
# Parameters
# ----------
# one : ONE
# Initialized ONE client.
# nwbfile_path : Path
# Path to the NWB file.
# """
# with NWBHDF5IO(path=nwbfile_path, mode="r") as io:
# nwbfile = io.read()
# eid = nwbfile.session_id
#
# _check_wheel_data(eid=eid, nwbfile=nwbfile, one=one)
# # TODO: fill in the rest
#
#
# def _check_wheel_data(*, eid: str, one: ONE, nwbfile: NWBFile):
# processing_module = nwbfile.processing["behavior"]
# wheel_position_series = processing_module.data_interfaces["CompassDirection"].spatial_series["WheelPositionSeries"]
# wheel_movement_table = nwbfile.processing["behavior"].data_interfaces["WheelMovementIntervals"][:]
#
# # wheel position
# data_from_ONE = one.load_dataset(id=eid, dataset="_ibl_wheel.position", collection="alf")
# data_from_NWB = wheel_position_series.data[:]
# assert_array_equal(x=data_from_ONE, y=data_from_NWB)
#
# # wheel timestamps
# data_from_ONE = one.load_dataset(id=eid, dataset="_ibl_wheel.timestamps", collection="alf")
# data_from_NWB = wheel_position_series.timestamps[:]
# assert_array_equal(x=data_from_ONE, y=data_from_NWB)
#
# # wheel movement intervals
# data_from_ONE = one.load_dataset(id=eid, dataset="_ibl_wheelMoves.intervals", collection="alf")
# data_from_NWB = wheel_movement_table[["start_time", "stop_time"]].values
# assert_frame_equal(left=data_from_ONE, right=data_from_NWB)
#
# # peak amplitude of wheel movement
# data_from_ONE = one.load_dataset(id=eid, dataset="_ibl_wheelMoves.peakAmplitude", collection="alf")
# data_from_NWB = wheel_movement_table["peak_amplitude"].values
# assert_array_equal(x=data_from_ONE, y=data_from_NWB)
#
#
# # def test_LickInterface(
# # nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None
# # ):
# # """read-after-write test for the datainterface `LickInterface`.
# # TODO DOCME
# # Args:
# # nwbfile (NWBFile): nwbfile object.
# # one (ONE): ONE object.
# # eid (str): experiment uuid / equivalent to session_id
# # """
# # table = nwbfile.processing["behavior"].data_interfaces["LickTimes"][:]
# # data_nwb = table["lick_time"].values
# # data_one = one.load_dataset(eid, "licks.times")
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# #
# # def test_RoiMotionEnergyInterface(
# # nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None
# # ):
# # """read-after-write test for the datainterface `RoiMotionEnergyInterface`.
# # TODO DOCME
# # Args:
# # nwbfile (NWBFile): nwbfile object.
# # one (ONE): ONE object.
# # eid (str): experiment uuid / equivalent to session_id
# # """
# # camera_views = ["body", "left", "right"]
# #
# # for view in camera_views:
# # # data
# # data_nwb = nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].data[:]
# # data_one = one.load_dataset(eid, "%sCamera.ROIMotionEnergy" % view, collection="alf")
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# # # timestamps
# # data_nwb = (
# # nwbfile.processing["behavior"].data_interfaces["%sCameraMotionEnergy" % view.capitalize()].timestamps[:]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf")
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# #
# # def test_IblPoseEstimationInterface(
# # nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None
# # ):
# # """read-after-write test for the datainterface `IblPoseEstimationInterface`.
# # TODO DOCME
# # Args:
# # nwbfile (NWBFile): nwbfile object.
# # one (ONE): ONE object.
# # eid (str): experiment uuid / equivalent to session_id
# # """
# #
# # camera_views = ["body", "left", "right"]
# #
# # for view in camera_views:
# # nodes = nwbfile.processing["behavior"].data_interfaces["PoseEstimation%sCamera" % view.capitalize()].nodes[:]
# #
# # for node in nodes:
# # # x
# # data_nwb = (
# # nwbfile.processing["behavior"]
# # .data_interfaces["PoseEstimation%sCamera" % view.capitalize()]
# # .pose_estimation_series[node]
# # .data[:][:, 0]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_x" % node].values
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# # # y
# # data_nwb = (
# # nwbfile.processing["behavior"]
# # .data_interfaces["PoseEstimation%sCamera" % view.capitalize()]
# # .pose_estimation_series[node]
# # .data[:][:, 1]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")["%s_y" % node].values
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# # # confidence
# # data_nwb = (
# # nwbfile.processing["behavior"]
# # .data_interfaces["PoseEstimation%sCamera" % view.capitalize()]
# # .pose_estimation_series[node]
# # .confidence[:]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.dlc.pqt" % view, collection="alf")[
# # "%s_likelihood" % node
# # ].values
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# # # timestamps
# # data_nwb = (
# # nwbfile.processing["behavior"]
# # .data_interfaces["PoseEstimation%sCamera" % view.capitalize()]
# # .pose_estimation_series[node]
# # .timestamps[:]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.times" % view, collection="alf")
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# #
# # def test_BrainwideMapTrialsInterface(
# # nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None
# # ):
# # """read-after-write test for the datainterface `BrainwideMapTrialsInterface`.
# # TODO DOCME
# # Args:
# # nwbfile (NWBFile): nwbfile object.
# # one (ONE): ONE object.
# # eid (str): experiment uuid / equivalent to session_id
# # """
# #
# # data_nwb = nwbfile.trials[:]
# # data_one = one.load_dataset(eid, "_ibl_trials.table", collection="alf")
# #
# # naming_map = {
# # "start_time": "intervals_0",
# # "stop_time": "intervals_1",
# # "choice": "choice",
# # "feedback_type": "feedbackType",
# # "reward_volume": "rewardVolume",
# # "contrast_left": "contrastLeft",
# # "contrast_right": "contrastRight",
# # "probability_left": "probabilityLeft",
# # "feedback_time": "feedback_times",
# # "response_time": "response_times",
# # # 'stim_off_time': '',
# # "stim_on_time": "stimOn_times",
# # "go_cue_time": "goCue_times",
# # "first_movement_time": "firstMovement_times",
# # }
# # naming_map = {v: k for k, v in naming_map.items()}
# #
# # check_tables(data_one, data_nwb, naming_map=naming_map)
# #
# #
# # def test_PupilTrackingInterface(
# # nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None
# # ):
# # """read-after-write test for the datainterface `PupilTrackingInterface`.
# # TODO DOCME
# # Args:
# # nwbfile (NWBFile): nwbfile object.
# # one (ONE): ONE object.
# # eid (str): experiment uuid / equivalent to session_id
# # """
# #
# # camera_views = ["left", "right"]
# # for view in camera_views:
# # # raw
# # data_nwb = (
# # nwbfile.processing["behavior"]
# # .data_interfaces["%sPupilTracking" % view.capitalize()]
# # .time_series["%sRawPupilDiameter" % view.capitalize()]
# # .data[:]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[
# # "pupilDiameter_raw"
# # ].values
# #
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# # # smooth
# # data_nwb = (
# # nwbfile.processing["behavior"]
# # .data_interfaces["%sPupilTracking" % view.capitalize()]
# # .time_series["%sSmoothedPupilDiameter" % view.capitalize()]
# # .data[:]
# # )
# # data_one = one.load_dataset(eid, "_ibl_%sCamera.features.pqt" % view, collection="alf")[
# # "pupilDiameter_smooth"
# # ].values
# #
# # check_arrays(data_nwb, data_one, full_check=full_check)
# #
# #
# # def test_IblSortingInterface(
# # nwbfile: NWBFile, one: ONE, eid: str, full_check: bool = False, verbose: bool = False, revision: str = None
# # ):
# # """_summary_
# # Args:
# # nwbfile (_type_): _description_
# # one (_type_): _description_
# # eid (_type_): _description_
# # full_check (bool, optional): _description_. Defaults to False.
# # revision (_type_, optional): _description_. Defaults to None.
# # Returns:
# # _type_: _description_
# # """
# #
# # units_table = nwbfile.units[:]
# # probe_names = units_table["probe_name"].unique()
# #
# # if full_check:
# # inds = units_table.index
# # else:
# # inds = units_table.sample(20).index
# #
# # spike_times = {}
# # spike_clusters = {}
# # cluster_uuids = {}
# #
# # # for fast spike extraction
# # def get_spikes_for_cluster(spike_clusters, spike_times, cluster):
# # # requires that spike_times and spike_clusters are sorted
# # start_ix, stop_ix = np.searchsorted(spike_clusters, [cluster, cluster + 1])
# # return np.sort(spike_times[start_ix:stop_ix])
# #
# # # get and prep data once
# # for probe_name in probe_names:
# #
# # # include revision TODO FIXME this will likely change - check back in with Miles
# # if revision is not None:
# # collection = f"alf/{probe_name}/pykilosort/{revision}"
# # else:
# # collection = f"alf/{probe_name}/pykilosort"
# #
# # spike_times[probe_name] = one.load_dataset(eid, "spikes.times", collection=collection)
# # spike_clusters[probe_name] = one.load_dataset(eid, "spikes.clusters", collection=collection)
# # cluster_uuids[probe_name] = one.load_dataset(eid, "clusters.uuids", collection=collection)
# #
# # # pre-sort for fast access
# # sort_ix = np.argsort(spike_clusters[probe_name])
# # spike_clusters[probe_name] = spike_clusters[probe_name][sort_ix]
# # spike_times[probe_name] = spike_times[probe_name][sort_ix]
# #
# # for ix in inds:
# # probe_name = units_table.loc[ix, "probe_name"]
# # uuid = units_table.loc[ix, "uuid"]
# # nwb_spike_times = units_table.loc[ix, "spike_times"]
# #
# # cluster_id = np.where(cluster_uuids[probe_name] == uuid)[0][0]
# # one_spike_times = get_spikes_for_cluster(spike_clusters[probe_name], spike_times[probe_name], cluster_id)
# #
# # # more verbose but slower for more than ~20 checks
# # # one_spike_times = spike_times[probe_name][spike_clusters[probe_name] == cluster_id]
# #
# # # testing
# # testing.assert_array_less(np.max((one_spike_times - nwb_spike_times) * 30000), 1)

0 comments on commit e27eaeb

Please sign in to comment.