Skip to content

Commit 494ab09

Browse files
authored
Merge pull request #3 from pqz317/main
add scaffold for format spikes
2 parents b4e8c95 + cba9e48 commit 494ab09

File tree

1 file changed

+80
-0
lines changed

1 file changed

+80
-0
lines changed

format_spikes.ipynb

+80
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 6,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"import numpy as np\n",
10+
"import pandas as pd\n",
11+
"from spike_tools import (\n",
12+
" general as spike_general,\n",
13+
" analysis as spike_analysis,\n",
14+
")\n",
15+
"from scipy.ndimage import gaussian_filter1d\n"
16+
]
17+
},
18+
{
19+
"cell_type": "code",
20+
"execution_count": 7,
21+
"metadata": {},
22+
"outputs": [],
23+
"source": [
24+
"def firing_rate(spData, channelData, bins, smoothing):\n",
25+
" bin_size = np.abs(np.diff(bins)[0])\n",
26+
" # spData is pandas dataframe with at least TrialNumber, UnitId, and SpikeTimeFromStart columns\n",
27+
" trial_unit_index = pd.MultiIndex.from_product([np.unique(spData.TrialNumber), np.unique(channelData.UnitID).astype(int), bins[:-1]], names=[\"TrialNumber\", \"UnitID\", \"TimeBins\"]).to_frame()\n",
28+
" trial_unit_index = trial_unit_index.droplevel(2).drop(columns=[\"TrialNumber\", \"UnitID\"]).reset_index()\n",
29+
" \n",
30+
" groupedData = spData.groupby([\"TrialNumber\", \"UnitID\"])\n",
31+
"\n",
32+
" fr_DF = groupedData.apply(lambda x: pd.DataFrame(\\\n",
33+
" {\"SpikeCounts\": np.histogram(x.SpikeTimeFromStart/1000, bins)[0],\\\n",
34+
" \"FiringRate\": gaussian_filter1d(np.histogram(x.SpikeTimeFromStart/1000, bins)[0].astype(float)/bin_size, smoothing),\\\n",
35+
" \"TimeBins\": bins[:-1]}))\n",
36+
" #print(\"Trial\", np.unique(trial_unit_index.UnitID))\n",
37+
" #print(\"FR\", np.unique(fr_DF.droplevel(2).reset_index().UnitID))\n",
38+
" all_units_df = trial_unit_index.merge(fr_DF.droplevel(2).reset_index(), how='outer', on=[\"TrialNumber\", \"UnitID\", \"TimeBins\"])\n",
39+
" #for unit in np.unique(all_units_df.UnitID):\n",
40+
" # unit_df = all_units_df[all_units_df.UnitID == unit]\n",
41+
" # print(unit_df)\n",
42+
" # print(unit, len(unit_df))\n",
43+
" all_units_df.FiringRate = all_units_df.FiringRate.fillna(0.0)\n",
44+
" all_units_df.SpikeCounts = all_units_df.SpikeCounts.fillna(0)\n",
45+
" return all_units_df"
46+
]
47+
},
48+
{
49+
"cell_type": "code",
50+
"execution_count": 4,
51+
"metadata": {},
52+
"outputs": [],
53+
"source": [
54+
"spike_times = pd.read_pickle(\"/data/sub-SA_sess-20180802_spike_times.pickle\")"
55+
]
56+
}
57+
],
58+
"metadata": {
59+
"kernelspec": {
60+
"display_name": "Python 3",
61+
"language": "python",
62+
"name": "python3"
63+
},
64+
"language_info": {
65+
"codemirror_mode": {
66+
"name": "ipython",
67+
"version": 3
68+
},
69+
"file_extension": ".py",
70+
"mimetype": "text/x-python",
71+
"name": "python",
72+
"nbconvert_exporter": "python",
73+
"pygments_lexer": "ipython3",
74+
"version": "3.10.6"
75+
},
76+
"orig_nbformat": 4
77+
},
78+
"nbformat": 4,
79+
"nbformat_minor": 2
80+
}

0 commit comments

Comments
 (0)