From 7dd54ba046e4836ccf34382deab91629f316ba57 Mon Sep 17 00:00:00 2001 From: kushalkolar Date: Thu, 15 Aug 2024 20:57:21 -0400 Subject: [PATCH] remove non-core algo stuff --- bin/caiman_gui.py | 666 --------- caiman/source_extraction/cnmf/estimates.py | 213 +-- caiman/source_extraction/cnmf/merging.py | 4 - caiman/source_extraction/cnmf/utilities.py | 126 -- caiman/source_extraction/volpy/volpy_gui.py | 377 ----- caiman/utils/visualization.py | 1487 ------------------- requirements.txt | 12 +- 7 files changed, 3 insertions(+), 2882 deletions(-) delete mode 100755 bin/caiman_gui.py delete mode 100644 caiman/source_extraction/volpy/volpy_gui.py delete mode 100644 caiman/utils/visualization.py diff --git a/bin/caiman_gui.py b/bin/caiman_gui.py deleted file mode 100755 index 0a02eb459..000000000 --- a/bin/caiman_gui.py +++ /dev/null @@ -1,666 +0,0 @@ -#!/usr/bin/env python - -import cv2 -from datetime import datetime -from dateutil.tz import tzlocal -import numpy as np -import pyqtgraph as pg -import scipy -import os -from pyqtgraph import FileDialog, QtWidgets -# from pyqtgraph.Qt import QtGui -from pyqtgraph.parametertree import Parameter, ParameterTree -from scipy.sparse import csc_matrix - -import caiman -from caiman.source_extraction.cnmf.cnmf import load_CNMF -from caiman.source_extraction.cnmf.params import CNMFParams - -# Always start by initializing Qt (only once per application) -app = QtWidgets.QApplication([]) - -try: - cv2.setNumThreads(1) -except: - print('OpenCV is naturally single threaded') - -try: - if __IPYTHON__: - print(1) - # this is used for debugging purposes only. allows to reload classes - # when changed - get_ipython().magic('load_ext autoreload') - get_ipython().magic('autoreload 2') -except NameError: - print('Not launched under iPython') - - -def make_color_img(img, gain=255, min_max=None, out_type=np.uint8): - if min_max is None: - min_ = img.min() - max_ = img.max() - else: - min_, max_ = min_max - - img = (img-min_)/(max_-min_+np.finfo(float).eps)*gain - img = img.astype(out_type) - img = np.dstack([img]*3) - return img - - -F = FileDialog() - -# load object saved by CNMF -fpath = F.getOpenFileName(caption='Load CNMF Object', - filter='HDF5 (*.h5 *.hdf5);;NWB (*.nwb)')[0] -if fpath == '': - raise Exception("You must provide a filename") -cnm_obj = load_CNMF(fpath) - -# movie -if not os.path.exists(cnm_obj.mmap_file): - M = FileDialog() - cnm_obj.mmap_file = M.getOpenFileName(caption='Load memory mapped file', - filter='*.mmap')[0] -else: - M = FileDialog() - d, f = os.path.split(cnm_obj.mmap_file) - cnm_obj.mmap_file = M.getOpenFileName(caption='Load memory mapped file', - directory=d, filter=f + ';;*.mmap')[0] - -if fpath[-3:] == 'nwb': - mov = caiman.load(cnm_obj.mmap_file, - var_name_hdf5='acquisition/TwoPhotonSeries') -else: - mov = caiman.load(cnm_obj.mmap_file) - -estimates = cnm_obj.estimates -params_obj = cnm_obj.params -dims = estimates.dims #original dimension -estimates.rotation = False # flag for rotation -min_mov = np.min(mov) -max_mov = np.max(mov) - -if estimates.b is None: - estimates.b = estimates.b0[:, None] - estimates.f = np.ones((1, estimates.C.shape[1])) - - class combodemo(QtWidgets.QWidget): - def __init__(self, parent = None): - super(combodemo, self).__init__(parent) - layout = QtWidgets.QHBoxLayout() - self.cb = QtWidgets.QComboBox() - self.cb.addItems(["Constant background", "Full background (slow, memory intensive)"]) - self.cb.currentIndexChanged.connect(self.selectionchange) - layout.addWidget(self.cb) - self.setLayout(layout) - self.setWindowTitle("Handle background for 1p data") - - def selectionchange(self,i): - if i==0: - estimates.b = estimates.b0[:, None] - estimates.f = np.ones((1, estimates.C.shape[1])) - elif i==1: - estimates.b = estimates.compute_background(mov.reshape(len(mov), -1).T) - estimates.f = np.eye(len(mov), dtype='int8') - - cb = combodemo() - cb.show() - -if not hasattr(estimates, 'Cn'): - estimates.Cn = caiman.local_correlations(mov, swap_dim=False) -#Cn = estimates.Cn - -# We rotate our components 90 degrees right because of incompatibility of pyqtgraph and pyplot -def rotate90(img, right=None, vector=None, sparse=False): - # rotate the img 90 degrees - # we first transpose the img then flip axis - # If right is true, then rotate 90 degrees right, otherwise, rotate left - # If vector is True, then we first reshape the spatial 1D vector to 2D then rotate - # If vector is False, then we directly rotate the matrix - global dims - a = int(right) - if vector == False: - img = img.transpose([1,0]) - img = np.flip(img, axis=a) - elif vector == True: - if sparse == False: - img = img.reshape((dims[1-a], dims[a], img.shape[1]), order='F') - img = img.transpose([1,0,2]) - img = np.flip(img, axis=a) - img = img.reshape((dims[0]*dims[1], img.shape[2]), order='F') - else: - img = img.toarray() - img = img.reshape((dims[1-a], dims[a], img.shape[1]), order='F') - img = img.transpose([1,0,2]) - img = np.flip(img, axis=a) - img = img.reshape((dims[0]*dims[1], img.shape[2]), order='F') - img = csc_matrix(img) - return img - -estimates.Cn = rotate90(estimates.Cn, right=True, vector=False) -estimates.A = rotate90(estimates.A, right=True, vector=True, sparse=True) -estimates.b = rotate90(estimates.b, right=True, vector=True) -estimates.dims = (estimates.dims[1], estimates.dims[0]) -estimates.rotation = True # rotation flag becomes true after rotation - -min_mov_denoise = np.min(estimates.A)*estimates.C.min() -max_mov_denoise = np.max(estimates.A)*estimates.C.max() -background_num = -1 -neuron_selected = False -nr_index = 0 - -min_background = np.min(estimates.b, axis=0)*np.min(estimates.f, axis=1) -max_background = np.max(estimates.b, axis=0)*np.max(estimates.f, axis=1) - -accepted_empty = True # check if accepted list exist and empty -if hasattr(estimates, 'accepted_list'): - accepted_empty = (len(estimates.accepted_list)==0) - -if (not hasattr(estimates, 'accepted_list')) or accepted_empty: - # if estimates.discarded_components.A.shape[-1] > 0: - # estimates.restore_discarded_components() - estimates.accepted_list = np.array([], dtype=int) - estimates.rejected_list = np.array([], dtype=int) - -estimates.img_components = estimates.A.toarray().reshape((estimates.dims[0], estimates.dims[1], -1), order='F').transpose([2,0,1]) -estimates.cms = np.array([scipy.ndimage.center_of_mass(comp) for comp in estimates.img_components]) -estimates.idx_components = np.arange(estimates.nr) -estimates.idx_components_bad = np.array([]) -estimates.background_image = make_color_img(estimates.Cn) -# Generate image data -estimates.img_components /= estimates.img_components.max(axis=(1, 2))[:, None, None] -estimates.img_components *= 255 -estimates.img_components = estimates.img_components.astype(np.uint8) - - -def draw_contours_overall(md): - if md == "reset": - draw_contours() - elif md == "neurons": - if neuron_selected is True: - # if a specific neuron has been selected, only one contour - # should be changed while thrshcomp_line is changing - if nr_index == 0: - # if user does not start to move through the frames - draw_contours_update(estimates.background_image, img) - draw_contours_update(comp2_scaled, img2) - else: - draw_contours_update(raw_mov_scaled, img) - draw_contours_update(frame_denoise_scaled, img2) - else: - # if no specific neuron has been selected, redraw all the contours - draw_contours() - else: - # md is "background": - return - - -def draw_contours(): - global thrshcomp_line, estimates, img - bkgr_contours = estimates.background_image.copy() - - if len(estimates.idx_components) > 0: - contours = [list(cv2.findContours(cv2.threshold(img, int(thrshcomp_line.value()), 255, 0)[1], cv2.RETR_TREE, - cv2.CHAIN_APPROX_SIMPLE)[0]) for img in estimates.img_components[estimates.idx_components]] - SNRs = np.array(estimates.r_values) - iidd = np.array(estimates.idx_components) - - idx1 = np.where(SNRs[iidd] < 0.1)[0] - idx2 = np.where((SNRs[iidd] >= 0.1) & - (SNRs[iidd] < 0.25))[0] - idx3 = np.where((SNRs[iidd] >= 0.25) & - (SNRs[iidd] < 0.5))[0] - idx4 = np.where((SNRs[iidd] >= 0.5) & - (SNRs[iidd] < 0.75))[0] - idx5 = np.where((SNRs[iidd] >= 0.75) & - (SNRs[iidd] < 0.9))[0] - idx6 = np.where(SNRs[iidd] >= 0.9)[0] - - cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx1], []), -1, (255, 0, 0), 1) - cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx2], []), -1, (0, 255, 0), 1) - cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx3], []), -1, (0, 0, 255), 1) - cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx4], []), -1, (255, 255, 0), 1) - cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx5], []), -1, (255, 0, 255), 1) - cv2.drawContours(bkgr_contours, sum([contours[jj] for jj in idx6], []), -1, (0, 255, 255), 1) - - img.setImage(bkgr_contours, autoLevels=False) - -pg.setConfigOptions(imageAxisOrder='col-major') - -def draw_contours_update(cf, im): - global thrshcomp_line, estimates - curFrame = cf.copy() - - if len(estimates.idx_components) > 0: - contours = [cv2.findContours(cv2.threshold(img, int(thrshcomp_line.value()), 255, 0)[1], cv2.RETR_TREE, - cv2.CHAIN_APPROX_SIMPLE)[0] for img in estimates.img_components[estimates.idx_components]] - SNRs = np.array(estimates.r_values) - iidd = np.array(estimates.idx_components) - - idx1 = np.where(SNRs[iidd] < 0.1)[0] - idx2 = np.where((SNRs[iidd] >= 0.1) & - (SNRs[iidd] < 0.25))[0] - idx3 = np.where((SNRs[iidd] >= 0.25) & - (SNRs[iidd] < 0.5))[0] - idx4 = np.where((SNRs[iidd] >= 0.5) & - (SNRs[iidd] < 0.75))[0] - idx5 = np.where((SNRs[iidd] >= 0.75) & - (SNRs[iidd] < 0.9))[0] - idx6 = np.where(SNRs[iidd] >= 0.9)[0] - - if min_dist_comp in idx1: - cv2.drawContours(curFrame, contours[min_dist_comp], -1, (255, 0, 0), 1) - if min_dist_comp in idx2: - cv2.drawContours(curFrame, contours[min_dist_comp], -1, (0, 255, 0), 1) - if min_dist_comp in idx3: - cv2.drawContours(curFrame, contours[min_dist_comp], -1, (0, 0, 255), 1) - if min_dist_comp in idx4: - cv2.drawContours(curFrame, contours[min_dist_comp], -1, (255, 255, 0), 1) - if min_dist_comp in idx5: - cv2.drawContours(curFrame, contours[min_dist_comp], -1, (255, 0, 255), 1) - if min_dist_comp in idx6: - cv2.drawContours(curFrame, contours[min_dist_comp], -1, (0, 255, 255), 1) - - im.setImage(curFrame, autoLevels=False) - - -# %% - -# Define a top-level widget to hold everything -w = QtWidgets.QWidget() - -# Create some widgets to be placed inside -btn = QtWidgets.QPushButton('press me') -text = QtWidgets.QLineEdit('enter text') -win = pg.GraphicsLayoutWidget() -win.setMaximumWidth(300) -win.setMinimumWidth(200) -hist = pg.HistogramLUTItem() # Contrast/color control -win.addItem(hist) -p1 = pg.PlotWidget() -p2 = pg.PlotWidget() -p3 = pg.PlotWidget() -t = ParameterTree() -t_action = ParameterTree() -action_layout = QtWidgets.QGridLayout() - -# Create a grid layout to manage the widgets size and position -layout = QtWidgets.QGridLayout() -w.setLayout(layout) - -# A plot area (ViewBox + axes) for displaying the image -# p1 = win.addPlot(title="Image here") -# Item for displaying image data -img = pg.ImageItem() -p1.addItem(img) - -img2 = pg.ImageItem() -p3.addItem(img2) - -hist.setImageItem(img) - -# Draggable line for setting isocurve level -thrshcomp_line = pg.InfiniteLine(angle=0, movable=True, pen='g') -hist.vb.addItem(thrshcomp_line) -hist.vb.setMouseEnabled(y=False) # makes user interaction a little easier -thrshcomp_line.setValue(100) -thrshcomp_line.setZValue(1000) # bring iso line above contrast controls - - -## Add widgets to the layout in their proper positions -layout.addWidget(win, 1, 0) # text edit goes in middle-left -layout.addWidget(p3, 0, 2) # text edit goes in middle-left - -layout.addWidget(t, 0, 0) # button goes in upper-left -layout.addWidget(t_action, 1, 2) # list widget goes in bottom-left -layout.addWidget(p1, 0, 1) # plot goes on right side, spanning 2 rows -layout.addWidget(p2, 1, 1) # plot goes on right side, spanning 2 rows - - -#enable only horizontal zoom for the traces component -p2.setMouseEnabled(x=True, y=False) - - -draw_contours() - -hist.setLevels(estimates.background_image.min(), estimates.background_image.max()) - - -# Another plot area for displaying ROI data -#win.nextRow() -#p2 = win.addPlot(colspan=2) -p2.setMaximumHeight(250) -#win.resize(800, 800) -#win.show() - - -# set position and scale of image -# img.scale(1, 1) -# img.translate(-50, 0) - -# zoom to fit imageo -p1.autoRange() - - -mode = "reset" -p2.setTitle(f"mode: {mode}") - -thrshcomp_line.sigDragged.connect(lambda: draw_contours_overall(mode)) - - -def imageHoverEvent(event): - # Show the position, pixel, and value under the mouse cursor. - global x, y, i, j, val - pos = event.pos() - i, j = pos.y(), pos.x() - i = int(np.clip(i, 0, estimates.background_image.shape[0] - 1)) - j = int(np.clip(j, 0, estimates.background_image.shape[1] - 1)) - val = estimates.background_image[i, j, 0] - ppos = img.mapToParent(pos) - x, y = ppos.x(), ppos.y() - - -# Monkey-patch the image to use our custom hover function. -# This is generally discouraged (you should subclass ImageItem instead), -# but it works for a very simple use like this. -img.hoverEvent = imageHoverEvent - - -def mouseClickEvent(event): - global mode - global x, y, i, j, val - - pos = img.mapFromScene(event.pos()) - x = int(pos.x()) - y = int(pos.y()) - - if x < 0 or x > estimates.dims[0] or y < 0 or y > estimates.dims[1]: - # if the user click outside of the movie, jump out of the function - return - - i, j = pos.y(), pos.x() - i = int(np.clip(i, 0, estimates.background_image.shape[0] - 1)) - j = int(np.clip(j, 0, estimates.background_image.shape[1] - 1)) - val = estimates.background_image[i, j, 0] - - if mode == "neurons": - show_neurons_clicked() - -p1.mousePressEvent = mouseClickEvent - - -# A general rule in Qt is that if you override one mouse event handler, you must override all of them. -def release(event): - pass - -p1.mouseReleaseEvent = release - -def move(event): - pass - -p1.mouseMoveEvent = move - - - - -## PARAMS -params = [{'name': 'min_cnn_thr', 'type': 'float', 'value': 0.99, 'limits': (0, 1),'step':0.01}, - {'name': 'cnn_lowest', 'type': 'float', 'value': 0.1, 'limits': (0, 1),'step':0.01}, - {'name': 'rval_thr', 'type': 'float', 'value': 0.85, 'limits': (-1, 1),'step':0.01}, - {'name': 'rval_lowest', 'type': 'float', 'value': -1, 'limits': (-1, 1),'step':0.01}, - {'name': 'min_SNR', 'type': 'float', 'value': 2, 'limits': (0, 20),'step':0.1}, - {'name': 'SNR_lowest', 'type': 'float', 'value': 0, 'limits': (0, 20),'step':0.1}, - {'name': 'RESET', 'type': 'action'}, - {'name': 'SHOW BACKGROUND', 'type': 'action'}, - {'name': 'SHOW NEURONS', 'type': 'action'} - ] - -## Create tree of Parameter objects -pars = Parameter.create(name='params', type='group', children=params) - - -params_action = [{'name': 'Filter components', 'type': 'bool', 'value': True, 'tip': "Filter components"}, - {'name': 'View components', 'type': 'list', 'values': ['All','Accepted', - 'Rejected', 'Unassigned'], 'value': 'All'}, - {'name': 'ADD GROUP', 'type': 'action'}, - {'name': 'REMOVE GROUP', 'type': 'action'}, - {'name': 'ADD SINGLE', 'type': 'action'}, - {'name': 'REMOVE SINGLE', 'type': 'action'}, - {'name': 'SAVE OBJECT', 'type': 'action'} - ] - - -pars_action = Parameter.create(name='params_action', type='group', children=params_action) - -t_action.setParameters(pars_action, showTop=False) -t_action.setWindowTitle('Parameter Action') - - -def reset_button(): - global mode - mode = "reset" - p2.setTitle(f"mode: {mode}") - # clear the upper right image - zeros = np.zeros(estimates.dims, order='F') - img2.setImage(make_color_img(zeros), autoLevels=False) - draw_contours() - - -pars.param('RESET').sigActivated.connect(reset_button) - - -def show_background_button(): - global bg_vline, min_background, max_background, background_num - global mode, background_first_frame_scaled - # clear thhe upper right image - zeros = np.zeros(estimates.dims, order='F') - img2.setImage(make_color_img(zeros), autoLevels=False) - - background_num = (background_num + 1) % estimates.f.shape[0] - mode = "background" - p2.setTitle(f"mode: {mode} {background_num}") - - # display the first frame of the background - background_first_frame = estimates.b[:, background_num].reshape(estimates.dims, order='F') - min_background_first_frame = np.min(background_first_frame) - max_background_first_frame = np.max(background_first_frame) - background_first_frame_scaled = make_color_img(background_first_frame, - min_max=(min_background_first_frame, max_background_first_frame)) - img.setImage(background_first_frame_scaled,autoLevels=False) - - # draw the trace and the infinite line - trace_background = estimates.f[background_num] - p2.plot(trace_background, clear=True) - bg_vline = pg.InfiniteLine(angle=90, movable=True) - p2.addItem(bg_vline, ignoreBounds=True) - bg_vline.setValue(0) - bg_vline.sigPositionChanged.connect(show_background_update) - - -def show_background_update(): - global bg_index, min_background, max_background, background_scaled - bg_index = int(bg_vline.value()) - if bg_index > -1 and bg_index < estimates.f.shape[-1]: - # upper left component scrolls through the frames of the background - background = estimates.b[:,background_num].dot(estimates.f[background_num,bg_index]).reshape(estimates.dims, order='F') - background_scaled=make_color_img(background, min_max=(min_background[background_num], max_background[background_num])) - img.setImage(background_scaled, autoLevels=False) - - -pars.param('SHOW BACKGROUND').sigActivated.connect(show_background_button) - - -def show_neurons_button(): - global mode, neuron_selected - mode = "neurons" - neuron_selected = False - p2.setTitle(f"mode: {mode}") - # clear the upper right image - zeros = np.zeros(estimates.dims, order='F') - img2.setImage(make_color_img(zeros), autoLevels=False) - - -def show_neurons_clicked(): - global nr_vline, nr_index - global x,y,i,j,val,min_dist_comp,contour_single, neuron_selected, comp2_scaled - neuron_selected = True - distances = np.sum(((x,y)-estimates.cms[estimates.idx_components])**2, axis=1)**0.5 - min_dist_comp = np.argmin(distances) - contour_all =[cv2.threshold(img, int(thrshcomp_line.value()), 255, 0)[1] for img in estimates.img_components[estimates.idx_components]] - contour_single = contour_all[min_dist_comp] - - # draw the traces (lower left component) - estimates.components_to_plot = estimates.idx_components[min_dist_comp] - p2.plot(estimates.C[estimates.components_to_plot] + estimates.YrA[estimates.components_to_plot], clear=True) - - # plot img (upper left component) - img.setImage(estimates.background_image, autoLevels=False) - draw_contours_update(estimates.background_image, img) - # plot img2 (upper right component) - comp2 = np.multiply(estimates.Cn, contour_single > 0) - comp2_scaled = make_color_img(comp2, min_max=(np.min(comp2), np.max(comp2))) - img2.setImage(comp2_scaled,autoLevels=False) - draw_contours_update(comp2_scaled, img2) - # set title for the upper two components - p3.setTitle("pos: (%0.1f, %0.1f) component: %d value: %g dist:%f" % (x, y, estimates.components_to_plot, - val, distances[min_dist_comp])) - p1.setTitle("pos: (%0.1f, %0.1f) component: %d value: %g dist:%f" % (x, y, estimates.components_to_plot, - val, distances[min_dist_comp])) - # draw the infinite line - nr_vline = pg.InfiniteLine(angle=90, movable=True) - p2.addItem(nr_vline, ignoreBounds=True) - nr_vline.setValue(0) - nr_vline.sigPositionChanged.connect(show_neurons_update) - nr_index = 0 - - -def show_neurons_update(): - global nr_index, frame_denoise_scaled, estimates, raw_mov_scaled - global min_mov, max_mov, min_mov_denoise, max_mov_denoise - if neuron_selected is False: - return - nr_index = int(nr_vline.value()) - if nr_index > 0 and nr_index < mov[:,0,0].shape[0]: - # upper left compoenent scrolls through the raw movie - raw_mov = rotate90(mov[nr_index,:,:], right=True, vector=False) - raw_mov_scaled = make_color_img(raw_mov, min_max=(min_mov,max_mov)) - img.setImage(raw_mov_scaled, autoLevels=False) - draw_contours_update(raw_mov_scaled, img) - # upper right component scrolls through the denoised movie - frame_denoise = estimates.A[:,estimates.idx_components].dot(estimates.C[estimates.idx_components,nr_index]).reshape(estimates.dims, order='F') - frame_denoise_scaled = make_color_img(frame_denoise, min_max=(min_mov_denoise,max_mov_denoise)) - img2.setImage(frame_denoise_scaled,autoLevels=False) - draw_contours_update(frame_denoise_scaled, img2) - - -pars.param('SHOW NEURONS').sigActivated.connect(show_neurons_button) - - -def add_group(): - estimates.accepted_list = np.union1d(estimates.accepted_list,estimates.idx_components) - estimates.rejected_list = np.setdiff1d(estimates.rejected_list,estimates.idx_components) - change(None, None) - -pars_action.param('ADD GROUP').sigActivated.connect(add_group) - -def remove_group(): - estimates.rejected_list = np.union1d(estimates.rejected_list,estimates.idx_components) - estimates.accepted_list = np.setdiff1d(estimates.accepted_list,estimates.idx_components) - change(None, None) - -pars_action.param('REMOVE GROUP').sigActivated.connect(remove_group) - - -def add_single(): - estimates.accepted_list = np.union1d(estimates.accepted_list,estimates.components_to_plot) - estimates.rejected_list = np.setdiff1d(estimates.rejected_list,estimates.components_to_plot) - change(None, None) - -pars_action.param('ADD SINGLE').sigActivated.connect(add_single) - - -def remove_single(): - estimates.rejected_list = np.union1d(estimates.rejected_list,estimates.components_to_plot) - estimates.accepted_list = np.setdiff1d(estimates.accepted_list,estimates.components_to_plot) - change(None, None) - -pars_action.param('REMOVE SINGLE').sigActivated.connect(remove_single) - - -def save_object(): - global estimates, cnm_obj, rotation_flag - print('Saving') - ffll = F.getSaveFileName(filter='HDF5 (*.hdf5);;NWB (*.nwb)') - print(ffll[0]) - # rotate back - if estimates.rotation == True: - estimates.Cn = rotate90(estimates.Cn, right=False, vector=False) - estimates.A = rotate90(estimates.A, right=False, vector=True, sparse=True) - estimates.b = rotate90(estimates.b, right=False, vector=True) - estimates.dims = (estimates.dims[1], estimates.dims[0]) - estimates.rotation = False - cnm_obj.estimates = estimates - if os.path.splitext(ffll[0])[1] == '.hdf5': - cnm_obj.save(ffll[0]) - elif os.path.splitext(ffll[0])[1] == '.nwb': - from pynwb import NWBHDF5IO - with NWBHDF5IO(fpath, 'r') as io: - nwb = io.read() - raw_data_file = nwb.acquisition['TwoPhotonSeries'].external_file[0] - cnm_obj.estimates.save_NWB(ffll[0], imaging_rate=cnm_obj.params.data['fr'], session_start_time=datetime.now(tzlocal()), - raw_data_file=raw_data_file) - - -pars_action.param('SAVE OBJECT').sigActivated.connect(save_object) - - -def action_pars_activated(param, changes): - change(None, None) - -pars_action.sigTreeStateChanged.connect(action_pars_activated) - - -# If anything changes in the tree, print a message -def change(param, changes): - global estimates, pars, pars_action - set_par = pars.getValues() - if pars_action.param('Filter components').value(): - for keyy in set_par.keys(): - params_obj.quality.update({keyy: set_par[keyy][0]}) - else: - params_obj.quality.update({'cnn_lowest': .1, - 'min_cnn_thr': 0.99, - 'rval_thr': 0.85, - 'rval_lowest': -1, - 'min_SNR': 2, - 'SNR_lowest': 0}) - estimates.filter_components(mov, params_obj, dview=None, - select_mode=pars_action.param('View components').value()) - if mode == "background": - return - else: - draw_contours() - -pars.sigTreeStateChanged.connect(change) - -change(None, None) # set params to default -t.setParameters(pars, showTop=False) -t.setWindowTitle('Parameter Quality') - -# END PARAMS - -# Display the widget as a new window -w.show() - -# Start the Qt event loop -app.exec_() - -# Rotate back -if estimates.rotation == True: - estimates.Cn = rotate90(estimates.Cn, right=False, vector=False) - estimates.A = rotate90(estimates.A, right=False, vector=True, sparse=True) - estimates.b = rotate90(estimates.b, right=False, vector=True) - estimates.dims = (estimates.dims[1], estimates.dims[0]) - cnm_obj.estimates = estimates - estimates.rotation = False diff --git a/caiman/source_extraction/cnmf/estimates.py b/caiman/source_extraction/cnmf/estimates.py index d32a1ce23..a06250881 100644 --- a/caiman/source_extraction/cnmf/estimates.py +++ b/caiman/source_extraction/cnmf/estimates.py @@ -1,19 +1,10 @@ #!/usr/bin/env python -import bokeh import cv2 import logging import matplotlib.pyplot as plt import numpy as np -import os -from pynwb import NWBHDF5IO, TimeSeries, NWBFile -from pynwb.base import Images -from pynwb.image import GrayscaleImage -from pynwb.ophys import ImageSegmentation, Fluorescence, OpticalChannel, ImageSeries -from pynwb.device import Device import scipy.sparse -import time -import uuid import caiman from caiman.base.rois import detect_duplicates_and_subsets, nf_match_neurons_in_binary_masks, nf_masks_to_neurof_dict @@ -24,6 +15,7 @@ from caiman.source_extraction.cnmf.temporal import constrained_foopsi_parallel from caiman.source_extraction.cnmf.utilities import detrend_df_f, decimation_matrix + class Estimates(object): """ Class for storing and reusing the analysis results and performing basic @@ -1499,209 +1491,6 @@ def masks_2_neurofinder(self, dataset_name): bin_masks = self.A_thr.reshape([self.dims[0], self.dims[1], -1], order='F').transpose([2, 0, 1]) return nf_masks_to_neurof_dict(bin_masks, dataset_name) - def save_NWB(self, - filename, - imaging_plane_name=None, - imaging_series_name=None, - sess_desc='CaImAn Results', - exp_desc=None, - identifier=None, - imaging_rate=30., - starting_time=0., - session_start_time=None, - excitation_lambda=488.0, - imaging_plane_description='some imaging plane description', - emission_lambda=520.0, - indicator='OGB-1', - location='brain', - raw_data_file=None): - """writes NWB file - - Args: - filename: str - - imaging_plane_name: str, optional - - imaging_series_name: str, optional - - sess_desc: str, optional - - exp_desc: str, optional - - identifier: str, optional - - imaging_rate: float, optional - default: 30 (Hz) - - starting_time: float, optional - default: 0.0 (seconds) - - location: str, optional - - session_start_time: datetime.datetime, optional - Only required for new files - - excitation_lambda: float - - imaging_plane_description: str - - emission_lambda: float - - indicator: str - - location: str - """ - logger = logging.getLogger("caiman") - - if identifier is None: - identifier = uuid.uuid1().hex - - if '.nwb' != os.path.splitext(filename)[-1].lower(): - raise Exception("Wrong filename") - - if not os.path.isfile(filename): # if the file doesn't exist create new and add the original data path - print('filename does not exist. Creating new NWB file with only estimates output') - - nwbfile = NWBFile(sess_desc, identifier, session_start_time, experiment_description=exp_desc) - device = Device('imaging_device') - nwbfile.add_device(device) - optical_channel = OpticalChannel('OpticalChannel', - 'main optical channel', - emission_lambda=emission_lambda) - nwbfile.create_imaging_plane(name='ImagingPlane', - optical_channel=optical_channel, - description=imaging_plane_description, - device=device, - excitation_lambda=excitation_lambda, - imaging_rate=imaging_rate, - indicator=indicator, - location=location) - if raw_data_file: - nwbfile.add_acquisition(ImageSeries(name='TwoPhotonSeries', - external_file=[raw_data_file], - format='external', - rate=imaging_rate, - starting_frame=[0])) - with NWBHDF5IO(filename, 'w') as io: - io.write(nwbfile) - - time.sleep(4) # ensure the file is fully closed before opening again in append mode - logger.info('Saving the results in the NWB file...') - - with NWBHDF5IO(filename, 'r+') as io: - nwbfile = io.read() - # Add processing results - - # Create the module as 'ophys' unless it is taken and append 'ophysX' instead - ophysmodules = [s[5:] for s in list(nwbfile.modules) if s.startswith('ophys')] - if any('' in s for s in ophysmodules): - if any([s for s in ophysmodules if s.isdigit()]): - nummodules = max([int(s) for s in ophysmodules if s.isdigit()])+1 - print('ophys module previously created, writing to ophys'+str(nummodules)+' instead') - mod = nwbfile.create_processing_module('ophys'+str(nummodules), 'contains caiman estimates for ' - 'the main imaging plane') - else: - print('ophys module previously created, writing to ophys1 instead') - mod = nwbfile.create_processing_module('ophys1', 'contains caiman estimates for the main ' - 'imaging plane') - else: - mod = nwbfile.create_processing_module('ophys', 'contains caiman estimates for the main imaging plane') - - img_seg = ImageSegmentation() - mod.add(img_seg) - fl = Fluorescence() - mod.add_data_interface(fl) -# mot_crct = MotionCorrection() -# mod.add_data_interface(mot_crct) - - # Add the ROI-related stuff - if imaging_plane_name is not None: - imaging_plane = nwbfile.imaging_planes[imaging_plane_name] - else: - if len(nwbfile.imaging_planes) == 1: - imaging_plane = list(nwbfile.imaging_planes.values())[0] - else: - raise Exception('There is more than one imaging plane in the file, you need to specify the name' - ' via the "imaging_plane_name" parameter') - - if imaging_series_name is not None: - image_series = nwbfile.acquisition[imaging_series_name] - else: - if not len(nwbfile.acquisition): - image_series = None - elif len(nwbfile.acquisition) == 1: - image_series = list(nwbfile.acquisition.values())[0] - else: - raise Exception('There is more than one imaging plane in the file, you need to specify the name' - ' via the "imaging_series_name" parameter') - - ps = img_seg.create_plane_segmentation( - name='PlaneSegmentation', - description='CNMF_ROIs', - imaging_plane=imaging_plane, - reference_images=image_series) - - ps.add_column('r', 'description of r values') - ps.add_column('snr', 'signal to noise ratio') - ps.add_column('accepted', 'in accepted list') - ps.add_column('rejected', 'in rejected list') - if self.cnn_preds is not None: - ps.add_column('cnn', 'description of CNN') - if self.idx_components is not None: - ps.add_column('keep', 'in idx_components') - - # Add ROIs - for i in range(self.A.shape[-1]): - add_roi_kwargs = dict(image_mask=self.A.T[i].T.toarray().reshape(self.dims), - r=self.r_values[i], snr=self.SNR_comp[i], accepted=False, rejected=False) - if hasattr(self, 'accepted_list'): - add_roi_kwargs.update(accepted=i in self.accepted_list) - if hasattr(self, 'rejected_list'): - add_roi_kwargs.update(rejected=i in self.rejected_list) - if self.cnn_preds is not None: - add_roi_kwargs.update(cnn=self.cnn_preds[i]) - if self.idx_components is not None: - add_roi_kwargs.update(keep=i in self.idx_components) - - ps.add_roi(**add_roi_kwargs) - - for bg in self.b.T: # Backgrounds - add_bg_roi_kwargs = dict(image_mask=bg.reshape(self.dims), r=np.nan, snr=np.nan, accepted=False, - rejected=False) - if 'keep' in ps.colnames: - add_bg_roi_kwargs.update(keep=False) - if 'cnn' in ps.colnames: - add_bg_roi_kwargs.update(cnn=np.nan) - ps.add_roi(**add_bg_roi_kwargs) - - # Add Traces - n_rois = self.A.shape[-1] - n_bg = len(self.f) - rt_region_roi = ps.create_roi_table_region( - 'ROIs', region=list(range(n_rois))) - - rt_region_bg = ps.create_roi_table_region( - 'Background', region=list(range(n_rois, n_rois+n_bg))) - - timestamps = np.arange(self.f.shape[1]) / imaging_rate + starting_time - - # Neurons - fl.create_roi_response_series(name='RoiResponseSeries', data=self.C.T, rois=rt_region_roi, unit='lumens', - timestamps=timestamps) - # Background - fl.create_roi_response_series(name='Background_Fluorescence_Response', data=self.f.T, rois=rt_region_bg, - unit='lumens', timestamps=timestamps) - - mod.add(TimeSeries(name='residuals', description='residuals', data=self.YrA.T, timestamps=timestamps, - unit='NA')) - if hasattr(self, 'Cn'): - images = Images('summary_images') - images.add_image(GrayscaleImage(name='local_correlations', data=self.Cn)) - - # Add MotionCorrection - # create_corrected_image_stack(corrected, original, xy_translation, name='CorrectedImageStack') - io.write(nwbfile) - def compare_components(estimate_gt, estimate_cmp, Cn=None, thresh_cost=.8, min_dist=10, print_assignment=False, labels=['GT', 'CMP'], plot_results=False): diff --git a/caiman/source_extraction/cnmf/merging.py b/caiman/source_extraction/cnmf/merging.py index a01244472..b2e3c670b 100644 --- a/caiman/source_extraction/cnmf/merging.py +++ b/caiman/source_extraction/cnmf/merging.py @@ -10,10 +10,6 @@ from scipy.sparse import csgraph, csc_matrix, lil_matrix, csr_matrix from caiman.source_extraction.cnmf.deconvolution import constrained_foopsi -from caiman.source_extraction.cnmf.spatial import update_spatial_components, threshold_components -from caiman.source_extraction.cnmf.temporal import update_temporal_components -from caiman.source_extraction.cnmf.utilities import update_order_greedy - def merge_components(Y, A, b, C, R, f, S, sn_pix, temporal_params, diff --git a/caiman/source_extraction/cnmf/utilities.py b/caiman/source_extraction/cnmf/utilities.py index 1a935ab67..5db5ce4e5 100644 --- a/caiman/source_extraction/cnmf/utilities.py +++ b/caiman/source_extraction/cnmf/utilities.py @@ -11,9 +11,6 @@ import cv2 import logging import numpy as np -import os -import pathlib -import matplotlib.pyplot as plt import scipy from scipy.sparse import spdiags, issparse, csc_matrix, csr_matrix import scipy.ndimage as ndi @@ -700,129 +697,6 @@ def detrend_df_f_auto(A, b, C, f, dims=None, YrA=None, use_annulus = True, return F_df -def manually_refine_components(Y, xxx_todo_changeme, A, C, Cn, thr=0.9, display_numbers=True, - max_number=None, cmap=None, **kwargs): - """Plots contour of spatial components - against a background image and allows to interactively add novel components by clicking with mouse - - Args: - Y: ndarray - movie in 2D - - (dx,dy): tuple - dimensions of the square used to identify neurons (should be set to the galue of gsiz) - - A: np.ndarray or sparse matrix - Matrix of Spatial components (d x K) - - Cn: np.ndarray (2D) - Background image (e.g. mean, correlation) - - thr: scalar between 0 and 1 - Energy threshold for computing contours (default 0.995) - - display_number: Boolean - Display number of ROIs if checked (default True) - - max_number: int - Display the number for only the first max_number components (default None, display all numbers) - - cmap: string - User specifies the colormap (default None, default colormap) - - Returns: - A: np.ndarray - matrix A os estimated spatial component contributions - - C: np.ndarray - array of estimated calcium traces - """ - (dx, dy) = xxx_todo_changeme - if issparse(A): - A = np.array(A.todense()) - else: - A = np.array(A) - - d1, d2 = np.shape(Cn) - d, nr = np.shape(A) - if max_number is None: - max_number = nr - - x, y = np.mgrid[0:d1:1, 0:d2:1] - - plt.imshow(Cn, interpolation=None, cmap=cmap) - cm = caiman.base.rois.com(A, d1, d2) - - Bmat = np.zeros((np.minimum(nr, max_number), d1, d2)) - for i in range(np.minimum(nr, max_number)): - indx = np.argsort(A[:, i], axis=None)[::-1] - cumEn = np.cumsum(A[:, i].flatten()[indx]**2) - cumEn /= cumEn[-1] - Bvec = np.zeros(d) - Bvec[indx] = cumEn - Bmat[i] = np.reshape(Bvec, np.shape(Cn), order='F') - - T = np.shape(Y)[-1] - - plt.close() - fig = plt.figure() - ax = plt.gca() - ax.imshow(Cn, interpolation=None, cmap=cmap, - vmin=np.percentile(Cn[~np.isnan(Cn)], 1), vmax=np.percentile(Cn[~np.isnan(Cn)], 99)) - for i in range(np.minimum(nr, max_number)): - plt.contour(y, x, Bmat[i], [thr]) - - if display_numbers: - for i in range(np.minimum(nr, max_number)): - ax.text(cm[i, 1], cm[i, 0], str(i + 1)) - - A3 = np.reshape(A, (d1, d2, nr), order='F') - while True: - pts = fig.ginput(1, timeout=0) - - if pts != []: - print(pts) - xx, yy = np.round(pts[0]).astype(int) - coords_y = np.array(list(range(yy - dy, yy + dy + 1))) - coords_x = np.array(list(range(xx - dx, xx + dx + 1))) - coords_y = coords_y[(coords_y >= 0) & (coords_y < d1)] - coords_x = coords_x[(coords_x >= 0) & (coords_x < d2)] - a3_tiny = A3[coords_y[0]:coords_y[-1] + - 1, coords_x[0]:coords_x[-1] + 1, :] - y3_tiny = Y[coords_y[0]:coords_y[-1] + - 1, coords_x[0]:coords_x[-1] + 1, :] - - dy_sz, dx_sz = np.shape(a3_tiny)[:-1] - y2_tiny = np.reshape(y3_tiny, (dx_sz * dy_sz, T), order='F') - a2_tiny = np.reshape(a3_tiny, (dx_sz * dy_sz, nr), order='F') - y2_res = y2_tiny - a2_tiny.dot(C) - y3_res = np.reshape(y2_res, (dy_sz, dx_sz, T), order='F') - a__, c__, center__, b_in__, f_in__ = caiman.source_extraction.cnmf.initialization.greedyROI( - y3_res, nr=1, gSig=[dx_sz//2, dy_sz//2], gSiz=[dx_sz, dy_sz]) - - a_f = np.zeros((d, 1)) - idxs = np.meshgrid(coords_y, coords_x) - a_f[np.ravel_multi_index( - idxs, (d1, d2), order='F').flatten()] = a__ - - A = np.concatenate([A, a_f], axis=1) - C = np.concatenate([C, c__], axis=0) - indx = np.argsort(a_f, axis=None)[::-1] - cumEn = np.cumsum(a_f.flatten()[indx]**2) - cumEn /= cumEn[-1] - Bvec = np.zeros(d) - Bvec[indx] = cumEn - bmat = np.reshape(Bvec, np.shape(Cn), order='F') - plt.contour(y, x, bmat, [thr]) - plt.pause(.01) - - elif pts == []: - break - - nr += 1 - A3 = np.reshape(A, (d1, d2, nr), order='F') - - return A, C def app_vertex_cover(A): """ Finds an approximate vertex cover for a symmetric graph with adjacency matrix A. diff --git a/caiman/source_extraction/volpy/volpy_gui.py b/caiman/source_extraction/volpy/volpy_gui.py deleted file mode 100644 index ae333dfc1..000000000 --- a/caiman/source_extraction/volpy/volpy_gui.py +++ /dev/null @@ -1,377 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -VolPy GUI interface is used to correct outputs of Mask R-CNN or annotate new datasets. -VolPy GUI uses summary images and ROIs as the input. It outputs binary masks for the trace denoising -and spike extraction step of VolPy. -@author: @caichangjia -""" -import cv2 -import h5py -import numpy as np -import os -from pathlib import Path -import pyqtgraph as pg -from pyqtgraph import FileDialog -from pyqtgraph.Qt import QtGui -from pyqtgraph.parametertree import Parameter, ParameterTree -import PyQt5 -from PyQt5 import QtWidgets -from PyQt5.QtWidgets import QApplication, QShortcut -import random -from skimage.draw import polygon -import sys - -import caiman as cm -from caiman.external.cell_magic_wand import cell_magic_wand_single_point - -os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = os.fspath( - Path(PyQt5.__file__).resolve().parent / "Qt5" / "plugins") - - -def mouseClickEvent(event): - global mode, x, y, i, j, pts, roi, cur_image - if mode == "POLYGON": - pos = img.mapFromScene(event.pos()) - x = int(pos.x()) - y = int(pos.y()) - j, i = pos.y(), pos.x() - i = int(np.clip(i, 0, dims[0] - 1)) - j = int(np.clip(j, 0, dims[1] - 1)) - p1.plot(x=[i], y=[j], symbol='o', pen=None, symbolBrush='y', symbolSize=5)#, symbolBrush=pg.intColor(i,6,maxValue=128)) - pts.append([i, j]) - elif mode == "CELL MAGIC WAND": - if pars_action.param('DISPLAY').value() == 'SPATIAL FOOTPRINTS': - overlay(all_ROIs) - pts = [] - pos = img.mapFromScene(event.pos()) - p1.clear() - p1.addItem(img) - try: - x = int(pos.x()) - y = int(pos.y()) - j, i = pos.y(), pos.x() - i = int(np.clip(i, 0, dims[0] - 1)) - j = int(np.clip(j, 0, dims[1] - 1)) - p1.plot(x=[i], y=[j], symbol='o', pen=None, symbolBrush='y', symbolSize=5)#, symbolBrush=pg.intColor(i,6,maxValue=128)) - min_radius = pars_action.param('MAGIC WAND PARAMS').child('MIN RADIUS').value() - max_radius = pars_action.param('MAGIC WAND PARAMS').child('MAX RADIUS').value() - roughness = pars_action.param('MAGIC WAND PARAMS').child('ROUGHNESS').value() - roi, edge = cell_magic_wand_single_point(adjust_contrast(cur_img.copy().T, hist.getLevels()[0], hist.getLevels()[1]), (j, i), - min_radius=min_radius, max_radius=max_radius, - roughness=roughness, zoom_factor=1) - ROI8 = np.uint8(roi * 255) - contours = cv2.findContours(cv2.threshold(ROI8, 100, 255, 0)[1], cv2.RETR_TREE, - cv2.CHAIN_APPROX_NONE)[0][0][:,0,:] - pts = contours.tolist() - pp = pts.copy() - pp.append(pp[0]) - p1.plot(x=np.array(pp)[:,0], y=np.array(pp)[:,1], pen=pen) - roi = roi * 1. - except: - pass - - elif mode == 'CHOOSE NEURONS': - pos = img.mapFromScene(event.pos()) - p1.clear() - p1.addItem(img) - x = int(pos.x()) - y = int(pos.y()) - j, i = pos.y(), pos.x() - i = int(np.clip(i, 0, dims[0] - 1)) - j = int(np.clip(j, 0, dims[1] - 1)) - p1.plot(x=[i], y=[j], symbol='o', pen=None, symbolBrush='y', symbolSize=5)#, symbolBrush=pg.intColor(i,6,maxValue=128)) - - try: - loc = np.array(list(all_centers.values())) - dis = np.square(loc - np.array([i, j])).sum(1) - min_idx = np.where(dis == dis.min())[0][0] - #list(all_centers.keys())[min_idx] - neuron_list.setCurrentRow(min_idx) - show_neuron() - except: - pass - -def release(event): - pass - -def move(event): - pass - -def add(): - global mode, pts, all_pts,all_centers, img, p1, neuron_list, neuron_idx, roi - if mode == "POLYGON": - roi = np.zeros((dims[1], dims[0])) - ff = np.array(polygon(np.array(pts)[:,0], np.array(pts)[:,1])) - roi[ff[1],[ff[0]]] = 1 - - if len(pts) > 2 : - flag = True - while flag: - r1 = random.randrange(1, 10**4) - r2 = random.randrange(1, 10**4) - neuron_idx = str(r1) + '-' +str(r2) - if neuron_idx not in all_pts.keys(): - flag = False - neuron_list.addItem(neuron_idx) - all_pts[neuron_idx] = pts - all_centers[neuron_idx] = np.array(pts).mean(0) - roi = roi.astype(np.float32) - all_ROIs[neuron_idx] = roi - - pp = pts.copy() - pp.append(pp[0]) - p1.plot(x=np.array(pp)[:,0], y=np.array(pp)[:,1], pen=pen) - pts = [] - - show_all() - -def remove(): - try: - item = neuron_list.currentItem() - del all_pts[str(item.text())] - del all_centers[str(item.text())] - del all_ROIs[str(item.text())] - neuron_list.takeItem(neuron_list.row(item)) - show_all() - except: - pass - -def show_all(): - global all_pts, pen, all_ROIs, neuron_list, img_overlay - p1.clear() - p1.addItem(img) - if pars_action.param('DISPLAY').value() == 'CONTOUR': - for pp in list(all_pts.values()): - pp.append(pp[0]) - p1.plot(x=np.array(pp)[:,0], y=np.array(pp)[:,1], pen=pen) - else: - overlay(all_ROIs) - -def clear(): - p1.clear() - p1.addItem(img) - -def show_neuron(): - global all_pts - item = neuron_list.currentItem() - pp = all_pts[str(item.text())] - pp.append(pp[0]) - p1.clear() - p1.addItem(img) - if pars_action.param('DISPLAY').value() == 'SPATIAL FOOTPRINTS': - overlay({str(item.text): all_ROIs[str(item.text())]}) - else: - p1.plot(x=np.array(pp)[:,0], y=np.array(pp)[:,1], pen=pen) - -def load(): - global summary_images, dims, cur_img, p1 - fpath = F.getOpenFileName(caption='Load Summary Images', - filter='TIFF (*.tif);;HDF5 (*.h5 *.hdf5)')[0] - summary_images = cm.load(fpath) - summary_images = summary_images.transpose([0, 2, 1]) - summary_images = np.flip(summary_images, axis=2) - cur_img = summary_images[0] - dims = summary_images[0].shape - #p1.resize(dims[0], dims[1]) - img.setImage(cur_img) - p1.setAspectLocked() - -def load_rois(): - global summary_images, neuron_list, all_pts, all_centers, all_ROIs, dims - fpath = F.getOpenFileName(caption='Load ROIS', - filter='HDF5 (*.h5 *.hdf5);;TIFF (*.tif)')[0] - with h5py.File(fpath, "r") as f: - l_ROIs = np.array(list(f[list(f.keys())[0]])) - l_ROIs = np.flip(l_ROIs, axis=1) - if (l_ROIs.shape[2], l_ROIs.shape[1]) != dims: - print(dims);print(l_ROIs.shape[1:]) - raise ValueError('Dimentions of movie and rois do not accord') - - for roi in l_ROIs: - flag = True - while flag: - r1 = random.randrange(1, 10**4) - r2 = random.randrange(1, 10**4) - neuron_idx = str(r1) + '-' +str(r2) - if neuron_idx not in all_pts.keys(): - flag = False - neuron_list.addItem(neuron_idx) - ROI8 = np.uint8(roi * 255) - contours = cv2.findContours(cv2.threshold(ROI8, 100, 255, 0)[1], cv2.RETR_TREE, - cv2.CHAIN_APPROX_NONE)[0][0][:,0,:] - pts = contours.tolist() - all_pts[neuron_idx] = pts - all_centers[neuron_idx] = np.array(pts).mean(0) - all_ROIs[neuron_idx] = roi - show_all() - -def save(): - global all_ROIs, save_ROIs, summary_images - print('Saving') - ffll = F.getSaveFileName(filter='HDF5 (*.hdf5)') - print(ffll[0]) - save_ROIs = np.array(list(all_ROIs.values())).copy() - save_ROIs = np.flip(save_ROIs, axis=1) - - if os.path.splitext(ffll[0])[1] == '.hdf5': - cm.movie(save_ROIs).save(ffll[0]) - summary_images = summary_images.transpose([0, 2, 1]) - summary_images = np.flip(summary_images, axis=1) - -def change(param, changes): - global mode, cur_img - print("tree changes:") - for param, change, data in changes: - if pars_action.childPath(param)[0] == 'MODE': - if data == 'None': - mode = 'POLYGON' - else: - mode = data - elif pars_action.childPath(param)[0] == 'IMAGES': - if data == 'CORR': - cur_img = summary_images[-1] - else: - cur_img = summary_images[0] - img.setImage(cur_img) - print(param) - print(change) - print(data) - -def down(): - global all_pts - try: - neuron_list.setCurrentRow(neuron_list.currentRow() + 1) - item = neuron_list.currentItem() - pp = all_pts[str(item.text())] - pp.append(pp[0]) - p1.clear() - p1.addItem(img) - if pars_action.param('DISPLAY').value() == 'SPATIAL FOOTPRINTS': - overlay({str(item.text): all_ROIs[str(item.text())]}) - else: - p1.plot(x=np.array(pp)[:,0], y=np.array(pp)[:,1], pen=pen) - except: - pass - -def up(): - global all_pts - try: - neuron_list.setCurrentRow(neuron_list.currentRow() - 1) - item = neuron_list.currentItem() - pp = all_pts[str(item.text())] - pp.append(pp[0]) - p1.clear() - p1.addItem(img) - if pars_action.param('DISPLAY').value() == 'SPATIAL FOOTPRINTS': - overlay({str(item.text): all_ROIs[str(item.text())]}) - else: - p1.plot(x=np.array(pp)[:,0], y=np.array(pp)[:,1], pen=pen) - except: - pass - -def adjust_contrast(img, min_value, max_value): - img[img < min_value] = min_value - img[img > max_value] = max_value - return img - -def overlay(all_ROIs): - if len(list(all_ROIs.values())) > 0: - img_rois = np.array(list(all_ROIs.values())) - img_rois = img_rois.transpose([0, 2, 1]) - img_rois = img_rois.sum(0) - img_rois[img_rois>1] = 1 - b,g,r = cv2.split(cv2.cvtColor(img_rois,cv2.COLOR_GRAY2BGR)) - g[g>0] = 0 - r[r>0] = 0 - img_rois = cv2.merge([b,g,r]) - img_overlay = pg.ImageItem() - img_overlay.setImage(img_rois) - p1.addItem(img_overlay) - img_overlay.setZValue(10) # make sure this image is on top - img_overlay.setOpacity(0.2) - -if __name__ == "__main__": - ## Always start by initializing Qt (only once per application) - app = QApplication(sys.argv) - - ## Define a top-level widget to hold everything - w = QtWidgets.QWidget() - - ## Create some widgets to be placed inside - hist = pg.HistogramLUTItem() # Contrast/color control - win = pg.GraphicsLayoutWidget() - win.setMaximumWidth(300) - win.setMinimumWidth(200) - win.addItem(hist) - p1 = pg.PlotWidget() - neuron_action = ParameterTree() - neuron_list = QtWidgets.QListWidget() - - ## Create a grid layout to manage the widgets size and position - layout = QtWidgets.QGridLayout() - w.setLayout(layout) - - ## Add widgets to the layout in their proper positions - layout.addWidget(win, 0, 1) - layout.addWidget(p1, 0, 2) - layout.addWidget(neuron_action, 0, 3) - layout.addWidget(neuron_list, 0, 4) - img = pg.ImageItem() - p1.addItem(img) - hist.setImageItem(img) - - # Add actions - params_action = [{'name': 'LOAD DATA', 'type':'action'}, - {'name': 'LOAD ROIS', 'type':'action'}, - {'name': 'SAVE', 'type':'action'}, - {'name': 'ADD', 'type': 'action'}, - {'name': 'REMOVE', 'type': 'action'}, - {'name': 'SHOW ALL', 'type': 'action'}, - {'name': 'CLEAR', 'type': 'action'}, - {'name': 'IMAGES', 'type': 'list', 'values': ['MEAN','CORR']}, - {'name': 'DISPLAY', 'type': 'list', 'values': ['CONTOUR','SPATIAL FOOTPRINTS']}, - {'name': 'MODE', 'type': 'list', 'values': ['POLYGON','CELL MAGIC WAND', 'CHOOSE NEURONS']}, - {'name': 'MAGIC WAND PARAMS', 'type': 'group', 'children': [{'name': 'MIN RADIUS', 'type': 'int', 'value': 4}, - {'name': 'MAX RADIUS', 'type': 'int', 'value': 10}, - {'name': 'ROUGHNESS', 'type': 'int', 'value': 1}]}] - - pars_action = Parameter.create(name='params_action', type='group', children=params_action) - neuron_action.setParameters(pars_action, showTop=False) - neuron_action.setWindowTitle('Parameter Action') - mode = pars_action.getValues()['MODE'][0] - - # Add event - p1.mousePressEvent = mouseClickEvent - p1.mouseReleaseEvent = release - p1.mouseMoveEvent = move - pars_action.param('ADD').sigActivated.connect(add) - pars_action.param('REMOVE').sigActivated.connect(remove) - pars_action.param('SHOW ALL').sigActivated.connect(show_all) - pars_action.param('CLEAR').sigActivated.connect(clear) - pars_action.param('LOAD DATA').sigActivated.connect(load) - pars_action.param('LOAD ROIS').sigActivated.connect(load_rois) - pars_action.param('SAVE').sigActivated.connect(save) - pars_action.sigTreeStateChanged.connect(change) - shortcut_down = QShortcut(QtGui.QKeySequence("down"), w) - shortcut_down.activated.connect(down) - shortcut_up = QShortcut(QtGui.QKeySequence("up"), w) - shortcut_up.activated.connect(up) - neuron_list.itemClicked.connect(show_neuron) - - # Create dictionary for saving - F = FileDialog() - all_pts = {} - all_centers = {} - all_ROIs = {} - pts = [] - pen = pg.mkPen(color=(255, 255, 0), width=4)#, style=Qt.DashDotLine) - - ## Display the widget as a new window - w.show() - - ## Start the Qt event loop - app.exec_() - - - diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py deleted file mode 100644 index 9f9a6f2e1..000000000 --- a/caiman/utils/visualization.py +++ /dev/null @@ -1,1487 +0,0 @@ -#!/usr/bin/env python - - -""" List of plotting functions to visualize what's happening in the code """ - -import base64 -import cv2 -import functools as fct -import holoviews -from IPython.display import HTML -from math import sqrt, ceil -import matplotlib -import matplotlib.patches -import matplotlib.pyplot as plt -import matplotlib.widgets -import numpy as np -from numpy.typing import ArrayLike -from scipy.ndimage import center_of_mass, median_filter -from scipy.sparse import issparse, spdiags, coo_matrix, csc_matrix -from skimage.measure import find_contours -import sys -from tempfile import NamedTemporaryFile -from typing import Any, Optional -from warnings import warn - -import caiman.base.rois -import caiman.summary_images - -try: - cv2.setNumThreads(0) -except: - pass - -# FIXME Look into converting this into a standard import -try: - import bokeh - import bokeh.plotting as bpl - from bokeh.models import CustomJS, ColumnDataSource, Range1d, LabelSet -except: - print("Bokeh could not be loaded. Either it is not installed or you are not running within a notebook") - - -def view_patches(Yr, A, C, b, f, d1, d2, YrA=None, secs=1): - """view spatial and temporal components (secs=0 interactive) - - Args: - Yr: np.ndarray - movie in format pixels (d) x frames (T) - - A: sparse matrix - matrix of spatial components (d x K) - - C: np.ndarray - matrix of temporal components (K x T) - - b: np.ndarray - spatial background (vector of length d) - - f: np.ndarray - temporal background (vector of length T) - - d1,d2: np.ndarray - frame dimensions - - YrA: np.ndarray - ROI filtered residual as it is given from update_temporal_components - If not given, then it is computed (K x T) - - secs: float - number of seconds in between component scrolling. secs=0 means interactive (click to scroll) - - imgs: np.ndarray - background image for contour plotting. Default is the image of all spatial components (d1 x d2) - - """ - - plt.ion() - nr, T = C.shape - nb = f.shape[0] - A2 = A.copy() - A2.data **= 2 - nA2 = np.sqrt(np.array(A2.sum(axis=0))).squeeze() - if YrA is None: - Y_r = np.array(A.T * np.matrix(Yr) - (A.T * np.matrix(b[:, np.newaxis])) * np.matrix( - f[np.newaxis]) - (A.T.dot(A)) * np.matrix(C) + C) - else: - Y_r = YrA + C - - A = A.todense() - bkgrnd = np.reshape(b, (d1, d2) + (nb,), order='F') - fig = plt.figure() - thismanager = plt.get_current_fig_manager() - thismanager.toolbar.pan() - print('In order to scroll components you need to click on the plot') - sys.stdout.flush() - for i in range(nr + 1): - if i < nr: - ax1 = fig.add_subplot(2, 1, 1) - plt.imshow(np.reshape(np.array(A[:, i]) / nA2[i], - (d1, d2), order='F'), interpolation='None') - ax1.set_title('Spatial component ' + str(i + 1)) - ax2 = fig.add_subplot(2, 1, 2) - plt.plot(np.arange(T), np.squeeze( - np.array(Y_r[i, :])), 'c', linewidth=3) - plt.plot(np.arange(T), np.squeeze( - np.array(C[i, :])), 'r', linewidth=2) - ax2.set_title('Temporal component ' + str(i + 1)) - ax2.legend(labels=['Filtered raw data', 'Inferred trace']) - - if secs > 0: - plt.pause(secs) - else: - plt.waitforbuttonpress() - - fig.delaxes(ax2) - else: - ax1 = fig.add_subplot(2, 1, 1) - plt.imshow(bkgrnd[:, :, i - nr], interpolation='None') - ax1.set_title('Spatial background ' + str(i - nr + 1)) - ax2 = fig.add_subplot(2, 1, 2) - plt.plot(np.arange(T), np.squeeze(np.array(f[i - nr, :]))) - ax2.set_title('Temporal background ' + str(i - nr + 1)) - - -def nb_view_patches(Yr, A, C, b, f, d1, d2, YrA=None, image_neurons=None, thr=0.99, - denoised_color=None, cmap='jet', r_values=None, SNR=None, cnn_preds=None): - """ - Interactive plotting utility for ipython notebook - - Args: - Yr: np.ndarray - movie - - A,C,b,f: np.ndarrays - outputs of matrix factorization algorithm - - d1,d2: floats - dimensions of movie (x and y) - - YrA: np.ndarray - ROI filtered residual as it is given from update_temporal_components - If not given, then it is computed (K x T) - - image_neurons: np.ndarray - image to be overlaid to neurons (for instance the average) - - thr: double - threshold regulating the extent of the displayed patches - - denoised_color: string or None - color name (e.g. 'red') or hex color code (e.g. '#F0027F') - - cmap: string - name of colormap (e.g. 'viridis') used to plot image_neurons - """ - - colormap = matplotlib.colormaps.get_cmap(cmap) - grayp = [matplotlib.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))] - nr, T = C.shape - nA2 = np.ravel(np.power(A, 2).sum(0)) if isinstance(A, np.ndarray) else np.ravel(A.power(2).sum(0)) - b = np.squeeze(b) - f = np.squeeze(f) - if YrA is None: - Y_r = np.array(spdiags(1 / nA2, 0, nr, nr) * - (A.T * np.matrix(Yr) - - (A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) - - A.T.dot(A) * np.matrix(C)) + C) - else: - Y_r = C + YrA - - x = np.arange(T) - if image_neurons is None: - image_neurons = A.mean(1).reshape((d1, d2), order='F') - - coors = get_contours(A, (d1, d2), thr) - cc1 = [cor['coordinates'][:, 0] for cor in coors] - cc2 = [cor['coordinates'][:, 1] for cor in coors] - c1 = cc1[0] - c2 = cc2[0] - - # split sources up, such that Bokeh does not warn - # "ColumnDataSource's columns must be of the same length" - source = ColumnDataSource(data=dict(x=x, y=Y_r[0] / 100, y2=C[0] / 100)) - source_ = ColumnDataSource(data=dict(z=Y_r / 100, z2=C / 100)) - source2 = ColumnDataSource(data=dict(c1=c1, c2=c2)) - source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2)) - - code=""" - var data = source.data - var data_ = source_.data - var f = cb_obj.value - 1 - var x = data['x'] - var y = data['y'] - var y2 = data['y2'] - - for (var i = 0; i < x.length; i++) { - y[i] = data_['z'][i+f*x.length] - y2[i] = data_['z2'][i+f*x.length] - } - - var data2_ = source2_.data; - var data2 = source2.data; - var c1 = data2['c1']; - var c2 = data2['c2']; - var cc1 = data2_['cc1']; - var cc2 = data2_['cc2']; - - for (var i = 0; i < c1.length; i++) { - c1[i] = cc1[f][i] - c2[i] = cc2[f][i] - } - source2.change.emit(); - source.change.emit(); - """ - - if r_values is not None: - code += """ - var mets = metrics.data['mets'] - mets[1] = metrics_.data['R'][f].toFixed(3) - mets[2] = metrics_.data['SNR'][f].toFixed(3) - metrics.change.emit(); - """ - metrics = ColumnDataSource(data=dict(y=(3, 2, 1, 0), - mets=('', "% 7.3f" % r_values[0], "% 7.3f" % SNR[0], - "N/A" if np.sum(cnn_preds) in (0, None) else "% 7.3f" % cnn_preds[0]), - keys=("Evaluation Metrics", "Spatial corr:", "SNR:", "CNN:"))) - if np.sum(cnn_preds) in (0, None): - metrics_ = ColumnDataSource(data=dict(R=r_values, SNR=SNR)) - else: - metrics_ = ColumnDataSource(data=dict(R=r_values, SNR=SNR, CNN=cnn_preds)) - code += """ - mets[3] = metrics_.data['CNN'][f].toFixed(3) - """ - labels = LabelSet(x=0, y='y', text='keys', source=metrics) - labels2 = LabelSet(x=10, y='y', text='mets', source=metrics, text_align="right") - plot2 = bpl.figure(width=200, height=100, toolbar_location = None) - plot2.axis.visible = False - plot2.grid.visible = False - plot2.tools.visible = False - plot2.line([0,10], [0,4], line_alpha=0) - plot2.add_layout(labels) - plot2.add_layout(labels2) - else: - metrics, metrics_ = None, None - - callback = CustomJS(args=dict(source=source, source_=source_, source2=source2, - source2_=source2_, metrics=metrics, metrics_=metrics_), code=code) - - plot = bpl.figure(width=600, height=300) - plot.line('x', 'y', source=source, line_width=1, line_alpha=0.6) - if denoised_color is not None: - plot.line('x', 'y2', source=source, line_width=1, - line_alpha=0.6, color=denoised_color) - - xr = Range1d(start=0, end=image_neurons.shape[1]) - yr = Range1d(start=image_neurons.shape[0], end=0) - plot1 = bpl.figure(x_range=xr, y_range=yr, - width=int(min(1, d2/d1)*300), - height=int(min(1, d1/d2)*300)) - - plot1.image(image=[image_neurons], x=0, - y=0, dw=d2, dh=d1, palette=grayp) - plot1.patch('c1', 'c2', alpha=0.6, color='purple', - line_width=2, source=source2) - - if Y_r.shape[0] > 1: - slider = bokeh.models.Slider(start=1, end=Y_r.shape[0], value=1, step=1, - title="Neuron Number") - slider.js_on_change('value', callback) - bpl.show(bokeh.layouts.layout([[slider], [bokeh.layouts.row( - plot1 if r_values is None else bokeh.layouts.column(plot1, plot2), plot)]])) - else: - bpl.show(bokeh.layouts.row(plot1 if r_values is None else - bokeh.layouts.column(plot1, plot2), plot)) - - return Y_r - - -def hv_view_patches(Yr, A, C, b, f, d1, d2, YrA=None, image_neurons=None, denoised_color=None, - cmap='viridis', r_values=None, SNR=None, cnn_preds=None): - """ - Interactive plotting utility for ipython notebook - - Args: - Yr: np.ndarray - movie - - A,C,b,f: np.ndarrays - outputs of matrix factorization algorithm - - d1,d2: floats - dimensions of movie (x and y) - - YrA: np.ndarray - ROI filtered residual as it is given from update_temporal_components - If not given, then it is computed (K x T) - - image_neurons: np.ndarray - image to be overlaid to neurons (for instance the average) - - denoised_color: string or None - color name (e.g. 'red') or hex color code (e.g. '#F0027F') - - cmap: string - name of colormap (e.g. 'viridis') used to plot image_neurons - - r_values: np.ndarray - space correlation values - - SNR: np.ndarray - peak-SNR over the length of a transient for each component - - cnn_preds: np.ndarray - prediction values from the CNN classifier - """ - - nr, T = C.shape - nA2 = np.ravel(np.power(A, 2).sum(0)) if isinstance(A, np.ndarray) else np.ravel(A.power(2).sum(0)) - b = np.squeeze(b) - f = np.squeeze(f) - if YrA is None: - Y_r = np.array( - spdiags(1 / nA2, 0, nr, nr) * - (A.T * np.matrix(Yr) - - (A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) - - A.T.dot(A) * np.matrix(C)) + C) - else: - Y_r = C + YrA - if image_neurons is None: - image_neurons = A.mean(1).reshape((d1, d2), order='F') - smp = matplotlib.cm.ScalarMappable(cmap=cmap) - im_rgb = smp.to_rgba(image_neurons)[:, :, :3] - im_hsv = matplotlib.colors.rgb_to_hsv(im_rgb) - - def norm(a, rg=(0, 1)): - a_norm = (a - a.min()) / (a.max() - a.min()) - return a_norm * (rg[1] - rg[0]) + rg[0] - - Ad = np.asarray(A.todense()).reshape((d1, d2, -1), order='F') - - def plot_unit(uid, scl): - if r_values is not None: - metrics = 'Evaluation Metrics\nSpatial corr:% 7.3f\nSNR:% 18.3f\nCNN:' % ( - r_values[uid], SNR[uid]) - metrics += ' '*15+'N/A' if np.sum(cnn_preds) in (0, None) else '% 18.3f' % cnn_preds[uid] - else: - metrics = '' - trace = ( - holoviews.Curve(Y_r[uid, :], kdims='frame #').opts(framewise=True) - * (holoviews.Curve(C[uid, :], kdims='frame #') - .opts(color=denoised_color, framewise=True)) - * holoviews.Text(.03*Y_r.shape[1], Y_r[uid].max(), metrics, halign='left', valign='top', fontsize=10) - ).opts(aspect=3, frame_height=200) - A_scl = norm(Ad[:, :, uid], (scl, 1)) - im_hsv_scl = im_hsv.copy() - im_hsv_scl[:, :, 2] = im_hsv[:, :, 2] * A_scl - im_u = (holoviews.HSV(im_hsv_scl, kdims=['height', 'width']) - .opts(aspect='equal', frame_height=200)) - return holoviews.Layout([im_u] + [trace]).cols(1) #im_u + trace - - if nr==1: - return (holoviews.DynamicMap(lambda scl: plot_unit(0, scl), kdims=['scale']) - .redim.range(scale=(0.0, 1.0))) - else: - return (holoviews.DynamicMap(plot_unit, kdims=['unit_id', 'scale']) - .redim.range(unit_id=(0, nr-1), scale=(0.0, 1.0))) - - -def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: Optional[int] = None): - """Gets contour of spatial components and returns their coordinates - - Args: - A: np.ndarray or sparse matrix - Matrix of Spatial components (d x K) - - dims: tuple of ints - Spatial dimensions of movie - - thr: scalar between 0 and 1 - Energy threshold for computing contours (default 0.9) - - thr_method: string - Method of thresholding: - 'max' sets to zero pixels that have value less than a fraction of the max value - 'nrg' keeps the pixels that contribute up to a specified fraction of the energy - - swap_dim: bool - If False (default), each column of A should be reshaped in F-order to recover the mask; - this is correct if the dimensions have not been reordered from (y, x[, z]). - If True, each column should be reshaped in C-order; this is correct for dims = ([z, ]x, y). - - slice_dim: int or None - Which dimension to slice along if we have 3D data. (i.e., get contours on each plane along this axis). - The default (None) is 0 if swap_dim is True, else -1. - - Returns: - Coor: list of coordinates with center of mass and - contour plot coordinates (per layer) for each component - """ - - if 'csc_matrix' not in str(type(A)): - A = csc_matrix(A) - d, nr = np.shape(A) - - coordinates = [] - - # get the center of mass of neurons( patches ) - cm = caiman.base.rois.com(A, *dims, order='C' if swap_dim else 'F') - - # for each patches - for i in range(nr): - pars:dict = dict() - # we compute the cumulative sum of the energy of the Ath component that has been ordered from least to highest - patch_data = A.data[A.indptr[i]:A.indptr[i + 1]] - indx = np.argsort(patch_data)[::-1] - if thr_method == 'nrg': - cumEn = np.cumsum(patch_data[indx]**2) - if len(cumEn) == 0: - pars = dict( - coordinates=np.array([]), - CoM=np.array([np.NaN, np.NaN]), - neuron_id=i + 1, - ) - coordinates.append(pars) - continue - else: - # we work with normalized values - cumEn /= cumEn[-1] - Bvec = np.ones(d) - # we put it in a similar matrix - Bvec[A.indices[A.indptr[i]:A.indptr[i + 1]][indx]] = cumEn - else: - if thr_method != 'max': - warn("Unknown threshold method. Choosing max") - Bvec = np.zeros(d) - Bvec[A.indices[A.indptr[i]:A.indptr[i + 1]]] = patch_data / patch_data.max() - - if swap_dim: - Bmat = np.reshape(Bvec, dims, order='C') - else: - Bmat = np.reshape(Bvec, dims, order='F') - - def get_slice_coords(B: np.ndarray) -> np.ndarray: - """Get contour coordinates for a 2D slice""" - d1, d2 = B.shape - vertices = find_contours(B.T, thr) - # this fix is necessary for having disjoint figures and borders plotted correctly - v = np.atleast_2d([np.nan, np.nan]) - for _, vtx in enumerate(vertices): - num_close_coords = np.sum(np.isclose(vtx[0, :], vtx[-1, :])) - if num_close_coords < 2: - if num_close_coords == 0: - # case angle - newpt = np.round(np.mean(vtx[[0, -1], :], axis=0) / [d2, d1]) * [d2, d1] - vtx = np.concatenate((newpt[np.newaxis, :], vtx, newpt[np.newaxis, :]), axis=0) - else: - # case one is border - vtx = np.concatenate((vtx, vtx[0, np.newaxis]), axis=0) - v = np.concatenate( - (v, vtx, np.atleast_2d([np.nan, np.nan])), axis=0) - return v - - if len(dims) == 2: - pars['coordinates'] = get_slice_coords(Bmat) - else: - # make a list of the contour coordinates for each 2D slice - pars['coordinates'] = [] - if slice_dim is None: - slice_dim = 0 if swap_dim else -1 - for s in range(dims[slice_dim]): - B = Bmat.take(s, axis=slice_dim) - pars['coordinates'].append(get_slice_coords(B)) - - pars['CoM'] = np.squeeze(cm[i, :]) - pars['neuron_id'] = i + 1 - coordinates.append(pars) - return coordinates - - -def nb_view_patches3d(Y_r, A, C, dims, image_type='mean', Yr=None, - max_projection=False, axis=0, thr=0.9, denoised_color=None, cmap='jet'): - """ - Interactive plotting utility for ipython notbook - - Args: - Y_r: np.ndarray - residual of each trace - - A,C,b,f: np.ndarrays - outputs of matrix factorization algorithm - - dims: tuple of ints - dimensions of movie (x, y and z) - - image_type: 'mean', 'max' or 'corr' - image to be overlaid to neurons - (average of shapes, maximum of shapes or nearest neighbor correlation of raw data) - - Yr: np.ndarray - movie, only required if image_type=='corr' to calculate correlation image - - max_projection: boolean - plot max projection along specified axis if True, plot layers if False - - axis: int (0, 1 or 2) - axis along which max projection is performed or layers are shown - - thr: scalar between 0 and 1 - Energy threshold for computing contours - - denoised_color: string or None - color name (e.g. 'red') or hex color code (e.g. '#F0027F') - - cmap: string - name of colormap (e.g. 'viridis') used to plot image_neurons - - Raises: - ValueError "image_type must be 'mean', 'max' or 'corr'" - - """ - - bokeh.io.curdoc().clear() # prune old orphaned models, otherwise filesize blows up - d = A.shape[0] - order = list(range(4)) - order.insert(0, order.pop(axis)) - Y_r = Y_r + C - index_permut = np.reshape(np.arange(d), dims, order='F').transpose( - order[:-1]).reshape(d, order='F') - A = csc_matrix(A)[index_permut, :] - dims = tuple(np.array(dims)[order[:3]]) - d1, d2, d3 = dims - colormap = matplotlib.colormaps.get_cmap(cmap) - grayp = [matplotlib.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))] - nr, T = C.shape - - x = np.arange(T) - - source = ColumnDataSource(data=dict(x=x, y=Y_r[0] / 100, y2=C[0] / 100)) - source_ = ColumnDataSource(data=dict(z=Y_r / 100, z2=C / 100)) - sourceN = ColumnDataSource(data=dict(N=[nr], nan=np.array([np.nan]))) - - if max_projection: - if image_type == 'corr': - tmp = [(caiman.summary_images.local_correlations( - Yr[index_permut].reshape(dims + (-1,), order='F'))[:, ::-1]).max(i) - for i in range(3)] - - elif image_type == 'mean': - tmp = [(np.array(A.mean(axis=1)).reshape(dims, order='F')[:, ::-1]).max(i) - for i in range(3)] - - elif image_type == 'max': - tmp = [(A.max(axis=1).toarray().reshape(dims, order='F')[:, ::-1]).max(i) - for i in range(3)] - - else: - raise ValueError("image_type must be 'mean', 'max' or 'corr'") - - image_neurons = np.nan * \ - np.ones((int(1.05 * (d1 + d2)), int(1.05 * (d1 + d3)))) - - image_neurons[:d2, -d3:] = tmp[0][::-1] - image_neurons[:d2, :d1] = tmp[2].T[::-1] - image_neurons[-d1:, -d3:] = tmp[1] - offset1 = image_neurons.shape[1] - d3 - offset2 = image_neurons.shape[0] - d1 - - proj_ = [coo_matrix([A[:, nnrr].toarray().reshape(dims, order='F').max( - i).reshape(-1, order='F') for nnrr in range(A.shape[1])]) for i in range(3)] - proj_ = [pproj_.T for pproj_ in proj_] - - coors = [get_contours(proj_[i], tmp[i].shape, thr=thr) - for i in range(3)] - - plt.close() - K = np.max([[len(cor['coordinates']) for cor in cc] for cc in coors]) - cc1 = np.nan * np.zeros(np.shape(coors) + (K,)) - cc2 = np.nan * np.zeros(np.shape(coors) + (K,)) - for i, cor in enumerate(coors[0]): - cc1[0, i, :len(cor['coordinates']) - ] = cor['coordinates'][:, 0] + offset1 - cc2[0, i, :len(cor['coordinates'])] = cor['coordinates'][:, 1] - for i, cor in enumerate(coors[2]): - cc1[1, i, :len(cor['coordinates'])] = cor['coordinates'][:, 1] - cc2[1, i, :len(cor['coordinates'])] = cor['coordinates'][:, 0] - for i, cor in enumerate(coors[1]): - cc1[2, i, :len(cor['coordinates']) - ] = cor['coordinates'][:, 0] + offset1 - cc2[2, i, :len(cor['coordinates']) - ] = cor['coordinates'][:, 1] + offset2 - - c1x = cc1[0][0] - c2x = cc2[0][0] - c1y = cc1[1][0] - c2y = cc2[1][0] - c1z = cc1[2][0] - c2z = cc2[2][0] - source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2)) - source2 = ColumnDataSource(data=dict(c1x=c1x, c1y=c1y, c1z=c1z, - c2x=c2x, c2y=c2y, c2z=c2z)) - callback = CustomJS(args=dict(source=source, source_=source_, sourceN=sourceN, - source2=source2, source2_=source2_), code=""" - var data = source.data; - var data_ = source_.data; - var f = cb_obj.value - 1 - var x = data['x'] - var y = data['y'] - var y2 = data['y2'] - for (var i = 0; i < x.length; i++) { - y[i] = data_['z'][i+f*x.length] - y2[i] = data_['z2'][i+f*x.length] - } - - var data2_ = source2_.data; - var data2 = source2.data; - var c1x = data2['c1x']; - var c2x = data2['c2x']; - var c1y = data2['c1y']; - var c2y = data2['c2y']; - var c1z = data2['c1z']; - var c2z = data2['c2z']; - var cc1 = data2_['cc1']; - var cc2 = data2_['cc2']; - var N = sourceN.data['N'][0]; - for (i = 0; i < c1x.length; i++) { - c1x[i] = cc1[f*c1x.length + i] - c2x[i] = cc2[f*c1x.length + i] - } - for (i = 0; i < c1x.length; i++) { - c1y[i] = cc1[N*c1y.length + f*c1y.length + i] - c2y[i] = cc2[N*c1y.length + f*c1y.length + i] - } - for (i = 0; i < c1x.length; i++) { - c1z[i] = cc1[2*N*c1z.length + f*c1z.length + i] - c2z[i] = cc2[2*N*c1z.length + f*c1z.length + i] - } - source2.change.emit(); - source.change.emit(); - """) - else: - - if image_type == 'corr': - image_neurons = caiman.summary_images.local_correlations( - Yr[index_permut].reshape(dims + (-1,), order='F')) - - elif image_type == 'mean': - image_neurons = np.array(A.mean(axis=1)).reshape(dims, order='F') - - elif image_type == 'max': - image_neurons = A.max(axis=1).toarray().reshape(dims, order='F') - - else: - raise ValueError('image_type must be mean, max or corr') - - cmap = bokeh.models.mappers.LinearColorMapper([matplotlib.colors.rgb2hex(m) - for m in colormap(np.arange(colormap.N))]) - cmap.high = image_neurons.max() - coors = get_contours(A, dims, thr=thr) - plt.close() - cc1 = [[(l[:, 0]) for l in n['coordinates']] for n in coors] - cc2 = [[(l[:, 1]) for l in n['coordinates']] for n in coors] - length = np.ravel([list(map(len, cc)) for cc in cc1]) - idx = np.cumsum(np.concatenate([[0], length[:-1]])) - cc1 = np.concatenate(list(map(np.concatenate, cc1))) - cc2 = np.concatenate(list(map(np.concatenate, cc2))) - # pick initial layer in which first neuron lies - linit = int(round(coors[0]['CoM'][0])) - K = length.max() - c1 = np.nan * np.zeros(K) - c2 = np.nan * np.zeros(K) - c1[:length[linit]] = cc1[idx[linit]:idx[linit] + length[linit]] - c2[:length[linit]] = cc2[idx[linit]:idx[linit] + length[linit]] - source2 = ColumnDataSource(data=dict(c1=c1, c2=c2)) - source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2)) - source2_idx = ColumnDataSource(data=dict(idx=idx, length=length)) - source3 = ColumnDataSource( - data=dict(image=[image_neurons[linit]], im=[image_neurons], - x=[0], y=[0], dw=[d3], dh=[d2])) - callback = CustomJS(args=dict(source=source, source_=source_, sourceN=sourceN, - source2=source2, source2_=source2_, source2_idx=source2_idx), - code=""" - var data = source.data; - var data_ = source_.data; - var f = slider_neuron.value-1; - var l = slider_layer.value-1; - var x = data['x'] - var y = data['y'] - var y2 = data['y2'] - for (var i = 0; i < x.length; i++) { - y[i] = data_['z'][i+f*x.length] - y2[i] = data_['z2'][i+f*x.length] - } - - var data2 = source2.data; - var data2_ = source2_.data; - var data2_idx = source2_idx.data; - var idx = data2_idx['idx']; - var c1 = data2['c1']; - var c2 = data2['c2']; - var nz = idx.length / sourceN.data['N'][0]; - var nan = sourceN.data['nan'][0]; - for (i = 0; i < c1.length; i++) { - c1[i] = nan; - c2[i] = nan; - } - for (i = 0; i < data2_idx['length'][l+f*nz]; i++) { - c1[i] = data2_['cc1'][idx[l+f*nz] + i]; - c2[i] = data2_['cc2'][idx[l+f*nz] + i]; - } - source2.change.emit(); - source.change.emit(); - """) - - callback_layer = CustomJS(args=dict(source=source3, sourceN=sourceN, source2=source2, - source2_=source2_, source2_idx=source2_idx), code=""" - var f = slider_neuron.value-1; - var l = slider_layer.value-1; - var dh = source.data['dh'][0]; - var dw = source.data['dw'][0]; - var image = source.data['image'][0]; - var images = source.data['im'][0]; - for (var i = 0; i < dw; i++) { - for (var j = 0; j < dh; j++){ - image[i*dh+j] = images[l*dh*dw + i*dh + j]; - } - } - - var data2 = source2.data; - var data2_ = source2_.data; - var data2_idx = source2_idx.data; - var idx = data2_idx['idx'] - var c1 = data2['c1']; - var c2 = data2['c2']; - var nz = idx.length / sourceN.data['N'][0]; - var nan = sourceN.data['nan'][0]; - for (i = 0; i < c1.length; i++) { - c1[i] = nan; - c2[i] = nan; - } - for (i = 0; i < data2_idx['length'][l+f*nz]; i++) { - c1[i] = data2_['cc1'][idx[l+f*nz] + i]; - c2[i] = data2_['cc2'][idx[l+f*nz] + i]; - } - source.change.emit() - source2.change.emit(); - """) - - plot = bpl.figure(width=600, height=300) - plot.line('x', 'y', source=source, line_width=1, line_alpha=0.6) - if denoised_color is not None: - plot.line('x', 'y2', source=source, line_width=1, - line_alpha=0.6, color=denoised_color) - slider = bokeh.models.Slider(start=1, end=Y_r.shape[0], value=1, step=1, - title="Neuron Number") - slider.js_on_change('value', callback) - xr = Range1d(start=0, end=image_neurons.shape[1] if max_projection else d3) - yr = Range1d(start=image_neurons.shape[0] if max_projection else d2, end=0) - plot1 = bpl.figure(x_range=xr, y_range=yr, width=300, height=300) - - if max_projection: - plot1.image(image=[image_neurons], x=0, y=0, - dw=image_neurons.shape[1], dh=image_neurons.shape[0], palette=grayp) - plot1.patch('c1x', 'c2x', alpha=0.6, color='purple', - line_width=2, source=source2) - plot1.patch('c1y', 'c2y', alpha=0.6, color='purple', - line_width=2, source=source2) - plot1.patch('c1z', 'c2z', alpha=0.6, color='purple', - line_width=2, source=source2) - layout = bokeh.layouts.layout([[slider], [bokeh.layouts.row(plot1, plot)]], - sizing_mode="scale_width") - else: - slider_layer = bokeh.models.Slider(start=1, end=d1, value=linit + 1, step=1, - title="Layer") - slider_layer.js_on_change('value', callback_layer) - callback.args['slider_neuron'] = slider - callback.args['slider_layer'] = slider_layer - callback_layer.args['slider_neuron'] = slider - callback_layer.args['slider_layer'] = slider_layer - plot1.image(image='image', x='x', y='y', dw='dw', dh='dh', - color_mapper=cmap, source=source3) - plot1.patch('c1', 'c2', alpha=0.6, color='purple', - line_width=2, source=source2) - layout = bokeh.layouts.layout([[slider], [slider_layer], [bokeh.layouts.row(plot1, plot)]], - sizing_mode="scale_width") - if Y_r.shape[0] == 1: - layout = bokeh.layouts.row(plot1, plot) - bpl.show(layout) - - return Y_r - - -def nb_imshow(image, cmap='jet'): - """ - Interactive equivalent of imshow for ipython notebook - """ - colormap = matplotlib.colormaps.get_cmap(cmap) # choose any matplotlib colormap here - grayp = [matplotlib.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))] - xr = Range1d(start=0, end=image.shape[1]) - yr = Range1d(start=image.shape[0], end=0) - p = bpl.figure(x_range=xr, y_range=yr) - - p.image(image=[image], x=0, y=0, - dw=image.shape[1], dh=image.shape[0], palette=grayp) - - return p - - -def nb_plot_contour(image, A, d1, d2, thr=None, thr_method='max', maxthr=0.2, nrgthr=0.9, - face_color=None, line_color='red', alpha=0.4, line_width=2, - coordinates=None, show=True, cmap='viridis', **kwargs): - """Interactive Equivalent of plot_contours for ipython notebook - - Args: - A: np.ndarray or sparse matrix - Matrix of Spatial components (d x K) - - Image: np.ndarray (2D) - Background image (e.g. mean, correlation) - - d1,d2: floats - dimensions os image - - thr: scalar between 0 and 1 - Energy threshold for computing contours - Kept for backwards compatibility. If not None then thr_method = 'nrg', and nrgthr = thr - - thr_method: [optional] string - Method of thresholding: - 'max' sets to zero pixels that have value less than a fraction of the max value - 'nrg' keeps the pixels that contribute up to a specified fraction of the energy - - maxthr: [optional] scalar - Threshold of max value - - nrgthr: [optional] scalar - Threshold of energy - - display_number: Boolean - Display number of ROIs if checked (default True) - - max_number: int - Display the number for only the first max_number components (default None, display all numbers) - - cmap: string - User specifies the colormap (default None, default colormap) - """ - - p = nb_imshow(image, cmap=cmap) - p.width = 600 - p.height = 600 * d1 // d2 - center = caiman.base.rois.com(A, d1, d2) - p.circle(center[:, 1], center[:, 0], size=10, color="black", - fill_color=None, line_width=2, alpha=1) - if coordinates is None: - coors = get_contours(A, np.shape(image), thr, thr_method) - else: - coors = coordinates - cc1 = [np.clip(cor['coordinates'][1:-1, 0], 0, d2) for cor in coors] - cc2 = [np.clip(cor['coordinates'][1:-1, 1], 0, d1) for cor in coors] - - p.patches(cc1, cc2, alpha=1, color=face_color, - line_color=line_color, line_width=2, **kwargs) - if show: - bpl.show(p) - return p - -def playMatrix(mov, gain=1.0, frate=.033): - for frame in mov: - if gain != 1: - cv2.imshow('frame', frame * gain) - else: - cv2.imshow('frame', frame) - - if cv2.waitKey(int(frate * 1000)) & 0xFF == ord('q'): - cv2.destroyAllWindows() - break - cv2.destroyAllWindows() - -def matrixMontage(spcomps, *args, **kwargs): - numcomps, _, _ = spcomps.shape - rowcols = int(np.ceil(np.sqrt(numcomps))) - for k, comp in enumerate(spcomps): - plt.subplot(rowcols, rowcols, k + 1) - plt.imshow(comp, *args, **kwargs) - plt.axis('off') - - -VIDEO_TAG = """""" - - -def anim_to_html(anim, fps=20): - # todo: todocument - if not hasattr(anim, '_encoded_video'): - with NamedTemporaryFile(suffix='.mp4') as f: - anim.save(f.name, fps=fps, extra_args=['-vcodec', 'libx264']) - video = open(f.name, "rb").read() - anim._encoded_video = base64.b64encode(video) - - return VIDEO_TAG.format(anim._encoded_video.decode('ascii')) - -def display_animation(anim, fps=20): - plt.close(anim._fig) - return HTML(anim_to_html(anim, fps=fps)) - -def view_patches_bar(Yr, A, C, b, f, d1, d2, YrA=None, img=None, - r_values=None, SNR=None, cnn_preds=None): - """view spatial and temporal components interactively - - Args: - Yr: np.ndarray - movie in format pixels (d) x frames (T) - - A: sparse matrix - matrix of spatial components (d x K) - - C: np.ndarray - matrix of temporal components (K x T) - - b: np.ndarray - spatial background (vector of length d) - - f: np.ndarray - temporal background (vector of length T) - - d1,d2: np.ndarray - frame dimensions - - YrA: np.ndarray - ROI filtered residual as it is given from update_temporal_components - If not given, then it is computed (K x T) - - img: np.ndarray - background image for contour plotting. Default is the image of all spatial components (d1 x d2) - - r_values: np.ndarray - space correlation values - - SNR: np.ndarray - peak-SNR over the length of a transient for each component - - cnn_preds: np.ndarray - prediction values from the CNN classifier - """ - - plt.ion() - if 'csc_matrix' not in str(type(A)): - A = csc_matrix(A) - - nr, T = C.shape - nb = 0 if f is None else f.shape[0] - nA2 = np.sqrt(np.array(A.power(2).sum(axis=0))).squeeze() - - if YrA is None: - Y_r = spdiags(1 / nA2, 0, nr, nr) * (A.T.dot(Yr) - - (A.T.dot(b)).dot(f) - (A.T.dot(A)).dot(C)) + C - else: - Y_r = YrA + C - - if img is None: - img = np.reshape(np.array(A.mean(axis=1)), (d1, d2), order='F') - - fig = plt.figure(figsize=(10, 10)) - - axcomp = plt.axes([0.05, 0.05, 0.9, 0.03]) - - ax1 = plt.axes([0.05, 0.55, 0.4, 0.4]) - ax3 = plt.axes([0.55, 0.55, 0.4, 0.4]) - ax2 = plt.axes([0.05, 0.1, 0.9, 0.4]) - - s_comp = matplotlib.widgets.Slider(axcomp, 'Component', 0, nr + nb - 1, valinit=0) - vmax = np.percentile(img, 95) - - def update(val): - i = int(np.round(s_comp.val)) - print(('Component:' + str(i))) - - if i < nr: - - ax1.cla() - imgtmp = np.reshape(A[:, i].toarray(), (d1, d2), order='F') - ax1.imshow(imgtmp, interpolation='None', cmap=matplotlib.cm.gray, vmax=np.max(imgtmp)*0.5) - ax1.set_title('Spatial component ' + str(i + 1)) - ax1.axis('off') - - ax2.cla() - ax2.plot(np.arange(T), Y_r[i], 'c', linewidth=3) - ax2.plot(np.arange(T), C[i], 'r', linewidth=2) - ax2.set_title('Temporal component ' + str(i + 1)) - ax2.legend(labels=['Filtered raw data', 'Inferred trace'], loc=1) - if r_values is not None: - metrics = 'Evaluation Metrics\nSpatial corr:% 7.3f\nSNR:% 18.3f\nCNN:' % ( - r_values[i], SNR[i]) - metrics += ' '*15+'N/A' if np.sum(cnn_preds) in (0, None) else '% 18.3f' % cnn_preds[i] - ax2.text(0.02, 0.97, metrics, ha='left', va='top', transform=ax2.transAxes, - bbox=dict(edgecolor='k', facecolor='w', alpha=.5)) - - ax3.cla() - ax3.imshow(img, interpolation='None', cmap=matplotlib.cm.gray, vmax=vmax) - imgtmp2 = imgtmp.copy() - imgtmp2[imgtmp2 == 0] = np.nan - ax3.imshow(imgtmp2, interpolation='None', - alpha=0.5, cmap=matplotlib.cm.hot) - ax3.axis('off') - else: - ax1.cla() - bkgrnd = np.reshape(b[:, i - nr], (d1, d2), order='F') - ax1.imshow(bkgrnd, interpolation='None') - ax1.set_title('Spatial background ' + str(i + 1 - nr)) - ax1.axis('off') - - ax2.cla() - ax2.plot(np.arange(T), np.squeeze(np.array(f[i - nr, :]))) - ax2.set_title('Temporal background ' + str(i + 1 - nr)) - - def arrow_key_image_control(event): - - if event.key == 'left': - new_val = np.round(s_comp.val - 1) - if new_val < 0: - new_val = 0 - s_comp.set_val(new_val) - - elif event.key == 'right': - new_val = np.round(s_comp.val + 1) - if new_val > nr + nb: - new_val = nr + nb - s_comp.set_val(new_val) - else: - pass - - s_comp.on_changed(update) - s_comp.set_val(0) - fig.canvas.mpl_connect('key_release_event', arrow_key_image_control) - plt.show() - -def plot_contours(A, Cn, thr=None, thr_method='max', maxthr=0.2, nrgthr=0.9, display_numbers=True, max_number=None, - cmap=None, swap_dim=False, colors='w', vmin=None, vmax=None, coordinates=None, - contour_args={}, number_args={}, **kwargs): - """Plots contour of spatial components against a background image and returns their coordinates - - Args: - A: np.ndarray or sparse matrix - Matrix of Spatial components (d x K) - - Cn: np.ndarray (2D) - Background image (e.g. mean, correlation) - - thr_method: [optional] string - Method of thresholding: - 'max' sets to zero pixels that have value less than a fraction of the max value - 'nrg' keeps the pixels that contribute up to a specified fraction of the energy - - maxthr: [optional] scalar - Threshold of max value - - nrgthr: [optional] scalar - Threshold of energy - - thr: scalar between 0 and 1 - Energy threshold for computing contours (default 0.9) - Kept for backwards compatibility. If not None then thr_method = 'nrg', and nrgthr = thr - - display_number: Boolean - Display number of ROIs if checked (default True) - - max_number: int - Display the number for only the first max_number components (default None, display all numbers) - - cmap: string - User specifies the colormap (default None, default colormap) - - Returns: - coordinates: list of coordinates with center of mass, contour plot coordinates and bounding box for each component - """ - - if swap_dim: - Cn = Cn.T - print('Swapping dim') - - if thr is None: - try: - thr = {'nrg': nrgthr, 'max': maxthr}[thr_method] - except KeyError: - thr = maxthr - else: - thr_method = 'nrg' - - - for key in ['c', 'colors', 'line_color']: - if key in kwargs.keys(): - color = kwargs[key] - kwargs.pop(key) - - ax = plt.gca() - if vmax is None and vmin is None: - plt.imshow(Cn, interpolation=None, cmap=cmap, - vmin=np.percentile(Cn[~np.isnan(Cn)], 1), - vmax=np.percentile(Cn[~np.isnan(Cn)], 99)) - else: - plt.imshow(Cn, interpolation=None, cmap=cmap, vmin=vmin, vmax=vmax) - - if coordinates is None: - coordinates = get_contours(A, np.shape(Cn), thr, thr_method, swap_dim) - for c in coordinates: - v = c['coordinates'] - c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])), - np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))] - plt.plot(*v.T, c=colors, **contour_args) - - if display_numbers: - nr = A.shape[1] - if max_number is None: - max_number = nr - for i, c in zip(range(np.minimum(nr, max_number)), coordinates): - ax.text(c['CoM'][1], c['CoM'][0], str(i + 1), color=colors, **number_args) - return coordinates - -def plot_shapes(Ab, dims, num_comps=15, size=(15, 15), comps_per_row=None, - cmap='viridis', smoother=lambda s: median_filter(s, 3)): - - def GetBox(centers, R, dims): - D = len(R) - box = np.zeros((D, 2), dtype=int) - for dd in range(D): - box[dd, 0] = max((centers[dd] - R[dd], 0)) - box[dd, 1] = min((centers[dd] + R[dd] + 1, dims[dd])) - return box - - nx = int(sqrt(num_comps) * 1.3) if comps_per_row is None else comps_per_row - ny = int(ceil(num_comps / float(nx))) - plt.figure(figsize=(nx, ny)) - for i, a in enumerate(Ab.T[:num_comps]): - ax = plt.subplot(ny, nx, i + 1) - s = a.toarray().reshape(dims, order='F') - box = GetBox(np.array(center_of_mass(s), dtype=np.int16), size, dims) - plt.imshow(smoother(s[list(map(lambda a: slice(*a), box))]), - cmap=cmap, interpolation='nearest') - ax.axis('off') - plt.subplots_adjust(0, 0, 1, 1, .06, .06) - - -def nb_inspect_correlation_pnr(corr, pnr, cmap='jet', num_bins=100): - """ - inspect correlation and pnr images to infer the min_corr, min_pnr for cnmfe - - Args: - corr: ndarray - correlation image created with caiman.summary_images.correlation_pnr - - pnr: ndarray - peak-to-noise image created with caiman.summary_images.correlation_pnr - - cmap: string - colormap used for plotting corr and pnr images - For valid colormaps see https://holoviews.org/user_guide/Colormaps.html - - num_bins: int - number of bins to use for plotting histogram of corr/pnr values - - Returns: - Holoviews plot layout (Side effect: generates plots in notebook) - """ - - hv_corr = holoviews.Image(corr, - vdims='corr', - label='correlation').opts(cmap=cmap) - hv_pnr = holoviews.Image(pnr, - vdims='pnr', - label='pnr').opts(cmap=cmap) - - def hist(im, rx, ry, num_bins=num_bins): - obj = im.select(x=rx, y=ry) if rx and ry else im - return holoviews.operation.histogram(obj, num_bins=num_bins) - - str_corr = (holoviews.streams.RangeXY(source=hv_corr).rename(x_range='rx', y_range='ry')) - str_pnr = (holoviews.streams.RangeXY(source=hv_pnr).rename(x_range='rx', y_range='ry')) - - hist_corr = holoviews.DynamicMap( - fct.partial(hist, im=hv_corr), streams=[str_corr]) - hist_pnr = holoviews.DynamicMap( - fct.partial(hist, im=hv_pnr), streams=[str_pnr]) - - hv_layout = (hv_corr << hist_corr) + (hv_pnr << hist_pnr) - - return hv_layout - - -def inspect_correlation_pnr(correlation_image_pnr, pnr_image): - """ - inspect correlation and pnr images to infer the min_corr, min_pnr - - Args: - correlation_image_pnr: ndarray - correlation image created with caiman.summary_images.correlation_pnr - - pnr_image: ndarray - peak-to-noise image created with caiman.summary_images.correlation_pnr - """ - - fig = plt.figure(figsize=(10, 4)) - plt.axes([0.05, 0.2, 0.4, 0.7]) - im_cn = plt.imshow(correlation_image_pnr, cmap='jet') - plt.title('correlation image') - plt.colorbar() - plt.axes([0.5, 0.2, 0.4, 0.7]) - im_pnr = plt.imshow(pnr_image, cmap='jet') - plt.title('PNR') - plt.colorbar() - - s_cn_max = matplotlib.widgets.Slider(plt.axes([0.05, 0.01, 0.35, 0.03]), 'vmax', - correlation_image_pnr.min(), correlation_image_pnr.max(), valinit=correlation_image_pnr.max()) - s_cn_min = matplotlib.widgets.Slider(plt.axes([0.05, 0.07, 0.35, 0.03]), 'vmin', - correlation_image_pnr.min(), correlation_image_pnr.max(), valinit=correlation_image_pnr.min()) - s_pnr_max = matplotlib.widgets.Slider(plt.axes([0.5, 0.01, 0.35, 0.03]), 'vmax', - pnr_image.min(), pnr_image.max(), valinit=pnr_image.max()) - s_pnr_min = matplotlib.widgets.Slider(plt.axes([0.5, 0.07, 0.35, 0.03]), 'vmin', - pnr_image.min(), pnr_image.max(), valinit=pnr_image.min()) - - def update(val): - im_cn.set_clim([s_cn_min.val, s_cn_max.val]) - im_pnr.set_clim([s_pnr_min.val, s_pnr_max.val]) - fig.canvas.draw_idle() - - s_cn_max.on_changed(update) - s_cn_min.on_changed(update) - s_pnr_max.on_changed(update) - s_pnr_min.on_changed(update) - - -def get_rectangle_coords(im_dims: ArrayLike, - stride: int, - overlap: int) -> tuple[np.ndarray, np.ndarray]: - """ - Extract rectangle (patch) coordinates: a helper function used by view_quilt(). - - Given dimensions of summary image (rows x columns), stride between patches, and overlap - between patches, returns row coordinates of the patches in patch_rows, and column - coordinates patches in patch_cols. This is meant to be used by view_quilt(). - - Args: - im_dims: array-like - dimension of image (num_rows, num_cols) - stride: int - stride between patches in pixels - overlap: int - overlap between patches in pixels - - Returns: - patch_rows: ndarray - num_patch_rows x 2 array, where row i contains onset and offset row pixels for patch row i - patch_cols: ndarray - num_patch_cols x 2 array, where row j contains onset and offset column pixels for patch column j - - Note: - Currently assumes square patches so takes in a single number for stride/overlap. - """ - patch_width = overlap + stride - - patch_onset_rows = np.array(list(range(0, im_dims[0] - patch_width, stride)) + [im_dims[0] - patch_width]) - patch_offset_rows = patch_onset_rows + patch_width - patch_offset_rows[patch_offset_rows > im_dims[0]-1] = im_dims[0]-1 - patch_rows = np.column_stack((patch_onset_rows, patch_offset_rows)) - - patch_onset_cols = np.array(list(range(0, im_dims[1] - patch_width, stride)) + [im_dims[1] - patch_width]) - patch_offset_cols = patch_onset_cols + patch_width - patch_offset_cols[patch_offset_cols > im_dims[1]-1] = im_dims[1]-1 - patch_cols = np.column_stack((patch_onset_cols, patch_offset_cols)) - - return patch_rows, patch_cols - - -def rect_draw(row_minmax: ArrayLike, - col_minmax: ArrayLike, - color: Optional[str]='white', - alpha: Optional[float]=0.2, - ax: Optional[Any]=None) -> tuple[Any, Any]: - """ - Draw a single rectangle on given axes object. - - Args: - row_minmax: array-like - [row_min, row_max] -- upper and lower bounds for rectangle rows (ints) - col_minmax: array-like - [col_min, col_max] -- upper and lower bounds for rectangle columns (ints) - color: matplotlib color - Any acceptable matplotlib color spec (r,g,b), string, etc., default 'white' - alpha : float - opaqueness level (0. to 1., where 1 is opaque), default 0.2 - ax : pyplot.Axes object - axes object upon which rectangle will be drawn, default None - - Returns: - ax: pyplot.Axes object - rect: matplotlib Rectangle object - """ - if ax is None: - ax = plt.gca() - - box_origin = (col_minmax[0], row_minmax[0]) - box_height = row_minmax[1] - row_minmax[0] - box_width = col_minmax[1] - col_minmax[0] - - rect = matplotlib.patches.Rectangle(box_origin, - width=box_width, - height=box_height, - color=color, - alpha=alpha) - ax.add_patch(rect) - - return ax, rect - - -def view_quilt(template_image: np.ndarray, - stride: int, - overlap: int, - color: Optional[Any]='white', - alpha: Optional[float]=0.2, - vmin: Optional[float]=None, - vmax: Optional[float]=None, - figsize: Optional[tuple[float,float]]=(6.,6.), - ax: Optional[Any]=None) -> Any: - """ - Plot patches on template image given stride and overlap parameters on template image. - This can be useful for checking motion correction and cnmf spatial parameters. - It ends up looking like a quilt pattern. - - Args: - template_image: ndarray - row x col summary image upon which to draw patches (e.g., correlation image) - stride (int) stride between patches in pixels - overlap (int) overlap between patches in pixels - color: matplotlib color - Any acceptable matplotlib color (r,g,b), string, etc., default 'white' - alpha: patch transparency (0. to 1.: higher is more opaque), default 0.2 - vmin: vmin for plotting underlying template image, default None - vmax: vmax for plotting underlying template image, default None - figsize: fig size in inches (width, height). Only used if ax is None, default (6.,6.) - ax (pyplot.Axes object): axes object in case user wants to plot quilt on pre-existing axes, default None - - Returns: - ax: pyplot.Axes object - - Example: - # Uses cnm object (cnm) and correlation image (corr_image) as template: - patch_width = 2*cnm.params.patch['rf'] + 1 - patch_overlap = cnm.params.patch['stride'] + 1 - patch_stride = patch_width - patch_overlap - ax = view_quilt(corr_image, patch_stride, patch_overlap, vmin=0.0, vmax=0.6); - - Note: - Currently assumes square patches so takes in a single number for stride/overlap. - """ - im_dims = template_image.shape - patch_rows, patch_cols = get_rectangle_coords(im_dims, stride, overlap) - - if ax is None: - f, ax = plt.subplots(figsize=figsize) - - ax.imshow(template_image, cmap='gray', vmin=vmin, vmax=vmax) - for patch_row in patch_rows: - for patch_col in patch_cols: - ax, _ = rect_draw(patch_row, - patch_col, - color=color, - alpha=alpha, - ax=ax) - - return ax - - -def create_quilt_patches(patch_rows, patch_cols): - """ - Helper function for nb_view_quilt. - Create patches given the row and column coordinates. - - Args: - patch_rows (ndarray): Array of row start and end positions for each patch. - patch_cols (ndarray): Array of column start and end positions for each patch. - - Returns: - list: A list of dictionaries, each containing the center coordinates, width, - and height of a patch. - """ - patches = [] - for row in patch_rows: - for col in patch_cols: - center_x = (col[0] + col[1]) / 2 - center_y = (row[0] + row[1]) / 2 - width = col[1] - col[0] - height = row[1] - row[0] - patches.append({'center_x': center_x, 'center_y': center_y, 'width': width, 'height': height}) - return patches - - -def nb_view_quilt(template_image: np.ndarray, - rf: int, - stride_input: int, - color: Optional[Any]='white', - alpha: Optional[float]=0.2): - """ - Bokeh implementation of view_quilt. - Plot patches on the template image given stride and overlap parameters. - - Args: - template_image (ndarray): Row x column summary image upon which to draw patches (e.g., correlation image). - rf (int): Half-size of the patches in pixels (patch width is rf*2 + 1). - stride_input (int): Amount of overlap between the patches in pixels (overlap is stride_input + 1). - color (Optional[Any]): Color of the patches, default 'white'. - alpha (Optional[float]): Patch transparency, default 0.2. - """ - - width = (rf*2)+1 - overlap = stride_input+1 - stride = width-overlap - - im_dims = template_image.shape - patch_rows, patch_cols = get_rectangle_coords(im_dims, stride, overlap) - patches = create_quilt_patches(patch_rows, patch_cols) - - plot = bpl.figure(x_range=(0, im_dims[1]), y_range=(im_dims[0], 0), width=600, height=600) - #plot.y_range.flipped = True - plot.image(image=[template_image], x=0, y=0, dw=im_dims[1], dh=im_dims[0], palette="Greys256") - source = ColumnDataSource(data=dict( - center_x=[patch['center_x'] for patch in patches], - center_y=[patch['center_y'] for patch in patches], - width=[patch['width'] for patch in patches], - height=[patch['height'] for patch in patches] - )) - plot.rect(x='center_x', y='center_y', width='width', height='height', source=source, color=color, alpha=alpha) - - # Create sliders - stride_slider = bokeh.models.Slider(start=1, end=100, value=rf, step=1, title="Patch half-size (rf)") - overlap_slider = bokeh.models.Slider(start=0, end=100, value=stride_input, step=1, title="Overlap (stride)") - - callback = CustomJS(args=dict(source=source, im_dims=im_dims, stride_slider=stride_slider, overlap_slider=overlap_slider), code=""" - function get_rectangle_coords(im_dims, stride, overlap) { - let patch_width = overlap + stride; - - let patch_onset_rows = Array.from({length: Math.ceil((im_dims[0] - patch_width) / stride)}, (_, i) => i * stride).concat([im_dims[0] - patch_width]); - let patch_offset_rows = patch_onset_rows.map(x => Math.min(x + patch_width, im_dims[0] - 1)); - let patch_rows = patch_onset_rows.map((x, i) => [x, patch_offset_rows[i]]); - - let patch_onset_cols = Array.from({length: Math.ceil((im_dims[1] - patch_width) / stride)}, (_, i) => i * stride).concat([im_dims[1] - patch_width]); - let patch_offset_cols = patch_onset_cols.map(x => Math.min(x + patch_width, im_dims[1] - 1)); - let patch_cols = patch_onset_cols.map((x, i) => [x, patch_offset_cols[i]]); - - return [patch_rows, patch_cols]; - } - - function create_quilt_patches(patch_rows, patch_cols) { - let patches = []; - for (let row of patch_rows) { - for (let col of patch_cols) { - let center_x = (col[0] + col[1]) / 2; - let center_y = (row[0] + row[1]) / 2; - let width = col[1] - col[0]; - let height = row[1] - row[0]; - patches.push({'center_x': center_x, 'center_y': center_y, 'width': width, 'height': height}); - } - } - return patches; - } - - let width = (stride_slider.value * 2) + 1; - let overlap = overlap_slider.value + 1; - let stride = width - overlap - - let [patch_rows, patch_cols] = get_rectangle_coords(im_dims, stride, overlap); - let patches = create_quilt_patches(patch_rows, patch_cols); - - source.data = { - center_x: patches.map(patch => patch.center_x), - center_y: patches.map(patch => patch.center_y), - width: patches.map(patch => patch.width), - height: patches.map(patch => patch.height) - }; - source.change.emit(); - """) - - stride_slider.js_on_change('value', callback) - overlap_slider.js_on_change('value', callback) - - - bpl.show(bokeh.layouts.row(plot, bokeh.layouts.column(stride_slider, overlap_slider))) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index f3a39222c..5c7c299fe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,30 +1,22 @@ -av -bokeh coverage cython h5py -holoviews ipykernel ipython ipyparallel -jupyter +jupyterlab keras matplotlib -mypy numpy -numpydoc -opencv-python +opencv-python-headless peakutils pims psutil -pynwb -pyqtgraph scikit-image scikit-learn scipy tensorflow tifffile -tk tqdm yapf zarr