Skip to content

Commit

Permalink
Merge branch 'main' into splintered_single_file_improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
CodyCBakerPhD authored Jul 17, 2024
2 parents 06b111b + c846375 commit 864ce7f
Show file tree
Hide file tree
Showing 8 changed files with 242 additions and 187 deletions.
56 changes: 42 additions & 14 deletions src/leifer_lab_to_nwb/randi_nature_2023/convert_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import datetime
import pathlib
import warnings

import pandas
import pynwb
Expand All @@ -11,7 +12,7 @@

# STUB_TEST=True creates 'preview' files that truncate all major data blocks; useful for ensuring process runs smoothly
# STUB_TEST=False performs a full file conversion
STUB_TEST = True
STUB_TEST = False


# Define base folder of source data
Expand All @@ -29,6 +30,9 @@
# Everything below this line is automated and should not need to be changed
# *************************************************************************

# Suppress false warning
warnings.filterwarnings(action="ignore", message="The linked table for DynamicTableRegion*", category=UserWarning)

NWB_OUTPUT_FOLDER_PATH.mkdir(exist_ok=True)

# Parse session start time from the pumpprobe path
Expand All @@ -40,9 +44,6 @@
PUMPPROBE_FOLDER_PATH = str(PUMPPROBE_FOLDER_PATH)
MULTICOLOR_FOLDER_PATH = str(MULTICOLOR_FOLDER_PATH)

# Initialize interfaces
data_interfaces = list()

source_data = {
"PumpProbeImagingInterfaceGreen": {"pumpprobe_folder_path": PUMPPROBE_FOLDER_PATH, "channel_name": "Green"},
"PumpProbeImagingInterfaceRed": {"pumpprobe_folder_path": PUMPPROBE_FOLDER_PATH, "channel_name": "Red"},
Expand All @@ -54,32 +55,59 @@
"ExtraOphysMetadataInterface": {"pumpprobe_folder_path": PUMPPROBE_FOLDER_PATH},
}

# Initialize converter
converter = RandiNature2023Converter(source_data=source_data)

metadata = converter.get_metadata()

metadata["NWBFile"]["session_start_time"] = session_start_time

# TODO: these are placeholders that would be read in from a logbook read+lookup
# TODO: these are all placeholders that would be read in from the YAML logbook read+lookup
metadata["NWBFile"][
"experiment_description"
] = """
To measure signal propagation, we activated each single neuron, one at a time, through two-photon stimulation,
while simultaneously recording the calcium activity of the population at cellular resolution using spinning disk
confocal microscopy. We recorded activity from 113 wild-type (WT)-background animals, each for up to 40min, while
stimulating a mostly randomly selected sequence of neurons one by one every 30s. We spatially restricted our
two-photon activation in three dimensions to be the size of a typical C. elegans neuron, to minimize off-target
activation of neighbouring neurons. Animals were immobilized but awake,and pharyngeal pumping was visible during
recordings.
"""
metadata["NWBFile"]["institution"] = "Princeton University"
metadata["NWBFile"]["lab"] = "Leifer Lab"
metadata["NWBFile"]["experimenter"] = ["Randi, Francesco"]
metadata["NWBFile"]["keywords"] = ["C. elegans", "optogenetics", "functional connectivity"]

subject_id = session_start_time.strftime("%y%m%d")
metadata["Subject"]["subject_id"] = subject_id
metadata["Subject"]["species"] = "C. elegans"
metadata["Subject"]["species"] = "Caenorhabditis elegans"
metadata["Subject"]["strain"] = "AKS471.2.d"
metadata["Subject"]["genotype"] = "WT"
metadata["Subject"]["sex"] = "XX"
metadata["Subject"]["age"] = "P1D"
# metadata["Subject"]["growth_stage_time"] = pandas.Timedelta(hours=2, minutes=30).isoformat() # TODO: request
metadata["Subject"]["growth_stage"] = "YA"
metadata["Subject"]["growth_stage"] = "L4"
metadata["Subject"]["cultivation_temp"] = 20.0

conversion_options = {
"PumpProbeImagingInterfaceGreen": {"stub_test": True},
"PumpProbeImagingInterfaceRed": {"stub_test": True},
"PumpProbeSegmentationInterfaceGreed": {"stub_test": True},
"PumpProbeSegmentationInterfaceRed": {"stub_test": True},
"NeuroPALImagingInterface": {"stub_test": True},
"PumpProbeImagingInterfaceGreen": {"stub_test": STUB_TEST},
"PumpProbeImagingInterfaceRed": {"stub_test": STUB_TEST},
"PumpProbeSegmentationInterfaceGreed": {"stub_test": STUB_TEST},
"PumpProbeSegmentationInterfaceRed": {"stub_test": STUB_TEST},
"NeuroPALImagingInterface": {"stub_test": STUB_TEST},
}

nwbfile_path = NWB_OUTPUT_FOLDER_PATH / f"sub-{subject_id}_ses-{session_string}.nwb"
if STUB_TEST:
stub_folder_path = NWB_OUTPUT_FOLDER_PATH / "stubs"
stub_folder_path.mkdir(exist_ok=True)
nwbfile_path = stub_folder_path / f"{session_string}_stub.nwb"
else:
# Name and nest the file in a DANDI compliant way
subject_folder_path = NWB_OUTPUT_FOLDER_PATH / f"sub-{subject_id}"
subject_folder_path.mkdir(exist_ok=True)
dandi_session_string = session_string.replace("_", "-")
nwbfile_path = subject_folder_path / f"sub-{subject_id}_ses-{dandi_session_string}.nwb"

converter.run_conversion(
nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True, conversion_options=conversion_options
)
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,10 @@ def add_to_nwbfile(
chunk_shape = (1, 1, self.data_shape[-2], self.data_shape[-1])

# Best we can do is limit the number of depths that are written by stub
# TODO: add ndx-micorscopy support to NeuroConv BackendConfiguration to avoid need for H5DataIO
imaging_data = self.data if not stub_test else self.data[:stub_depths, :, :, :]
data_iterator = neuroconv.tools.hdmf.SliceableDataChunkIterator(data=imaging_data, chunk_shape=chunk_shape)
data_iterator = pynwb.H5DataIO(data_iterator, compression="gzip")

depth_per_frame_in_um = self.brains_info["zOfFrame"][0]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def add_to_nwbfile(
)
nwbfile.add_lab_meta_data(lab_meta_data=imaging_space)
else:
imaging_space = nwbfile.lab_meta_data["PlanarImagingSpace"]
imaging_space = nwbfile.lab_meta_data["NeuroPALImagingSpace"]

plane_segmentation = ndx_microscopy.MicroscopyPlaneSegmentation(
name="NeuroPALPlaneSegmentation",
Expand All @@ -81,7 +81,7 @@ def add_to_nwbfile(
number_of_rois = self.brains_info["nInVolume"][0]
for neuropal_roi_id in range(number_of_rois):
coordinate_info = self.brains_info["coordZYX"][neuropal_roi_id]
coordinates = (coordinate_info[1], coordinate_info[2], coordinate_info[0], 1.0)
coordinates = (coordinate_info[2], coordinate_info[1], coordinate_info[0], 1.0)

plane_segmentation.add_row(
id=neuropal_roi_id,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import pathlib
from typing import Union

import ndx_microscopy
import ndx_patterned_ogen
import neuroconv
import numpy
Expand Down Expand Up @@ -36,22 +37,16 @@ def add_to_nwbfile(
nwbfile: pynwb.NWBFile,
metadata: Union[dict, None] = None,
) -> None:
assert "Microscope" in nwbfile.devices, (
"The `Microscope` must be added before this interface! Make sure the call to "
"`.run_conversion` for this interface occurs after the `PumpProbeSegmentationInterface`."
)
microscope = nwbfile.devices["Microscope"]

assert (
"ImageSegmentation" in nwbfile.processing["ophys"].data_interfaces
and "PlaneSegmentation" in nwbfile.processing["ophys"]["ImageSegmentation"].plane_segmentations
), (
"The `PlaneSegmentation` must be added before this interface! Make sure the call to "
"`.run_conversion` for this interface occurs after the `PumpProbeSegmentationInterface`."
)
image_segmentation = nwbfile.processing["ophys"]["ImageSegmentation"]
plane_segmentation = image_segmentation.plane_segmentations["PlaneSegmentation"]
imaging_plane = plane_segmentation.imaging_plane
# if "Microscope" not in nwbfile.devices:
# microscope = ndx_microscopy.Microscope(name="Microscope")
# nwbfile.add_device(devices=microscope)
# else:
# microscope = nwbfile.devices["Microscope"]

# TODO: reusing the Microscope device creates an invalid file
# NWB team has been notified about the issue, until then, we need to create a dummy device
ogen_device = pynwb.ophys.Device(name="OptogeneticDevice", description="")
nwbfile.add_device(ogen_device)

light_source = ndx_patterned_ogen.LightSource(
name="AmplifiedLaser",
Expand All @@ -74,7 +69,8 @@ def add_to_nwbfile(
excitation_lambda=850.0, # nm
effector="GUR-3/PRDX-2",
location="whole brain",
device=microscope,
# device=microscope,
device=ogen_device,
light_source=light_source,
)
nwbfile.add_ogen_site(site)
Expand All @@ -94,29 +90,50 @@ def add_to_nwbfile(
),
# Calculated manually from the 'source data' of Supplementary Figure 2a
# https://www.nature.com/articles/s41586-023-06683-4#MOESM10
lateral_point_spread_function_in_um="(-0.245, 0.059) ± (0.396, 0.264)",
axial_point_spread_function_in_um="0.444 ± 0.536",
# via https://github.com/catalystneuro/leifer_lab_to_nwb/issues/5#issuecomment-2195497434
lateral_point_spread_function_in_um="(-0.246, 2.21) ± (0.045, 1.727)",
axial_point_spread_function_in_um="0.540 ± 1.984",
)
nwbfile.add_lab_meta_data(temporal_focusing)

# Assume all targets are unique; if retargeting of the same location is ever enabled, it would be nice
# to refactor this to make proper reuse of target locations.
optical_channel = pynwb.ophys.OpticalChannel( # TODO: I really wish I didn't need this...
name="DummyOpticalChannel",
description="A dummy optical channel for ndx-patterned-ogen metadata.",
emission_lambda=numpy.nan,
)
imaging_plane = nwbfile.create_imaging_plane(
name="TargetImagingPlane",
description="The targeted plane.",
indicator="",
location="whole brain",
excitation_lambda=numpy.nan,
# device=microscope,
device=ogen_device,
optical_channel=optical_channel,
)
targeted_plane_segmentation = pynwb.ophys.PlaneSegmentation(
name="TargetPlaneSegmentation",
description="Table for storing the target centroids, defined by a one-voxel mask.",
imaging_plane=imaging_plane,
)
targeted_plane_segmentation.add_column(name="depth_in_um", description="Targeted depth in micrometers.")
for target_x_index, target_y_index, depth_in_mm in zip(
for target_x_index, target_y_index, depth_in_um in zip(
self.optogenetic_stimulus_table["optogTargetX"],
self.optogenetic_stimulus_table["optogTargetY"],
self.optogenetic_stimulus_table["optogTargetZ"],
):
targeted_plane_segmentation.add_roi(
pixel_mask=[(int(target_x_index), int(target_y_index), 1.0)], depth_in_um=depth_in_mm * 1e3
pixel_mask=[(int(target_x_index), int(target_y_index), 1.0)], depth_in_um=depth_in_um
)

image_segmentation = pynwb.ophys.ImageSegmentation(name="TargetedImageSegmentation")
image_segmentation.add_plane_segmentation(targeted_plane_segmentation)

ophys_module = neuroconv.tools.nwb_helpers.get_module(nwbfile=nwbfile, name="ophys")
ophys_module.add(image_segmentation)

# Hardcoded duration from the methods section of paper
# TODO: may have to adjust this for unc-31 mutant strain subjects
stimulus_duration_in_s = 500.0 / 1e3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ def add_to_nwbfile(
metadata: dict | None = None,
stub_test: bool = False,
stub_frames: int = 70,
display_progress: bool = True,
) -> None:
# TODO: enhance all metadata
if "Microscope" not in nwbfile.devices:
Expand All @@ -123,13 +124,13 @@ def add_to_nwbfile(
else:
light_source = nwbfile.devices["MicroscopyLightSource"]

if "PlanarImagingSpace" not in nwbfile.lab_meta_data:
if "PumpProbeImagingSpace" not in nwbfile.lab_meta_data:
imaging_space = ndx_microscopy.PlanarImagingSpace(
name="PlanarImagingSpace", description="", microscope=microscope
name="PumpProbeImagingSpace", description="", microscope=microscope
)
nwbfile.add_lab_meta_data(lab_meta_data=imaging_space)
else:
imaging_space = nwbfile.lab_meta_data["PlanarImagingSpace"]
imaging_space = nwbfile.lab_meta_data["PumpProbeImagingSpace"]

optical_channel = ndx_microscopy.MicroscopyOpticalChannel(name=self.channel_name, description="", indicator="")
nwbfile.add_lab_meta_data(lab_meta_data=optical_channel)
Expand All @@ -143,8 +144,12 @@ def add_to_nwbfile(
num_frames_per_chunk = int(chunk_size_bytes / frame_size_bytes)
chunk_shape = (max(min(num_frames_per_chunk, num_frames), 1), x, y)

# TODO: add ndx-micorscopy support to NeuroConv BackendConfiguration to avoid need for H5DataIO
imaging_data = self.imaging_data_for_channel if not stub_test else self.imaging_data_for_channel[:stub_frames]
data_iterator = neuroconv.tools.hdmf.SliceableDataChunkIterator(data=imaging_data, chunk_shape=chunk_shape)
data_iterator = neuroconv.tools.hdmf.SliceableDataChunkIterator(
data=imaging_data, chunk_shape=chunk_shape, display_progress=display_progress
)
data_iterator = pynwb.H5DataIO(data_iterator, compression="gzip")

timestamps = self.timestamps if not stub_test else self.timestamps[:stub_frames]

Expand Down

This file was deleted.

Loading

0 comments on commit 864ce7f

Please sign in to comment.