-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
1919a79
commit 9b1ca58
Showing
109 changed files
with
14,381 additions
and
1,821 deletions.
There are no files selected for viewing
Binary file modified
BIN
+6.13 KB
(110%)
docs/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip
Binary file not shown.
193 changes: 193 additions & 0 deletions
193
docs/_downloads/63b412797a45350b72c090ea95e36ee8/plot_Hinss2021_classification.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,193 @@ | ||
""" | ||
================================ | ||
Hinss2021 classification example | ||
================================ | ||
This example shows how to use the Hinss2021 dataset | ||
with the resting state paradigm. | ||
In this example, we aim to determine the most effective channel selection strategy | ||
for the :class:`moabb.datasets.Hinss2021` dataset. | ||
The pipelines under consideration are: | ||
- `Xdawn` | ||
- Electrode selection based on time epochs data | ||
- Electrode selection based on covariance matrices | ||
""" | ||
|
||
# License: BSD (3-clause) | ||
|
||
import warnings | ||
|
||
import numpy as np | ||
import seaborn as sns | ||
from matplotlib import pyplot as plt | ||
from pyriemann.channelselection import ElectrodeSelection | ||
from pyriemann.estimation import Covariances | ||
from pyriemann.spatialfilters import Xdawn | ||
from pyriemann.tangentspace import TangentSpace | ||
from sklearn.base import TransformerMixin | ||
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA | ||
from sklearn.pipeline import make_pipeline | ||
|
||
from moabb import set_log_level | ||
from moabb.datasets import Hinss2021 | ||
from moabb.evaluations import CrossSessionEvaluation | ||
from moabb.paradigms import RestingStateToP300Adapter | ||
|
||
|
||
# Suppressing future and runtime warnings for cleaner output | ||
warnings.simplefilter(action="ignore", category=FutureWarning) | ||
warnings.simplefilter(action="ignore", category=RuntimeWarning) | ||
|
||
set_log_level("info") | ||
|
||
############################################################################## | ||
# Create util transformer | ||
# ---------------------- | ||
# | ||
# Let's create a scikit transformer mixin, that will | ||
# select electrodes based on the covariance information | ||
|
||
|
||
class EpochSelectChannel(TransformerMixin): | ||
"""Select channels based on covariance information.""" | ||
|
||
def __init__(self, n_chan, cov_est): | ||
self._chs_idx = None | ||
self.n_chan = n_chan | ||
self.cov_est = cov_est | ||
|
||
def fit(self, X, _y=None): | ||
# Get the covariances of the channels for each epoch. | ||
covs = Covariances(estimator=self.cov_est).fit_transform(X) | ||
# Get the average covariance between the channels | ||
m = np.mean(covs, axis=0) | ||
n_feats, _ = m.shape | ||
# Select the `n_chan` channels having the maximum covariances. | ||
all_max = [] | ||
for i in range(n_feats): | ||
for j in range(n_feats): | ||
if len(all_max) <= self.n_chan: | ||
all_max.append(m[i, j]) | ||
else: | ||
if m[i, j] > max(all_max): | ||
all_max[np.argmin(all_max)] = m[i, j] | ||
indices = [] | ||
for v in all_max: | ||
indices.extend(np.argwhere(m == v).flatten()) | ||
# We will keep only these channels for the transform step. | ||
indices = np.unique(indices) | ||
self._chs_idx = indices | ||
return self | ||
|
||
def transform(self, X): | ||
return X[:, self._chs_idx, :] | ||
|
||
|
||
############################################################################## | ||
# Initialization Process | ||
# ---------------------- | ||
# | ||
# 1) Define the experimental paradigm object (RestingState) | ||
# 2) Load the datasets | ||
# 3) Select a subset of subjects and specific events for analysis | ||
|
||
# Here we define the mne events for the RestingState paradigm. | ||
events = dict(easy=2, diff=3) | ||
# The paradigm is adapted to the P300 paradigm. | ||
paradigm = RestingStateToP300Adapter(events=events, tmin=0, tmax=0.5) | ||
# We define a list with the dataset to use | ||
datasets = [Hinss2021()] | ||
|
||
# To reduce the computation time in the example, we will only use the | ||
# first two subjects. | ||
start_subject = 1 | ||
stop_subject = 2 | ||
title = "Datasets: " | ||
for dataset in datasets: | ||
title = title + " " + dataset.code | ||
dataset.subject_list = dataset.subject_list[start_subject:stop_subject] | ||
|
||
############################################################################## | ||
# Create Pipelines | ||
# ---------------- | ||
# | ||
# Pipelines must be a dict of scikit-learning pipeline transformer. | ||
|
||
pipelines = {} | ||
|
||
pipelines["Xdawn+Cov+TS+LDA"] = make_pipeline( | ||
Xdawn(nfilter=4), Covariances(estimator="lwf"), TangentSpace(), LDA() | ||
) | ||
|
||
pipelines["Cov+ElSel+TS+LDA"] = make_pipeline( | ||
Covariances(estimator="lwf"), ElectrodeSelection(nelec=8), TangentSpace(), LDA() | ||
) | ||
|
||
# Pay attention here that the channel selection took place before computing the covariances: | ||
# It is done on time epochs. | ||
pipelines["ElSel+Cov+TS+LDA"] = make_pipeline( | ||
EpochSelectChannel(n_chan=8, cov_est="lwf"), | ||
Covariances(estimator="lwf"), | ||
TangentSpace(), | ||
LDA(), | ||
) | ||
|
||
############################################################################## | ||
# Run evaluation | ||
# ---------------- | ||
# | ||
# Compare the pipeline using a cross session evaluation. | ||
|
||
# Here should be cross-session | ||
evaluation = CrossSessionEvaluation( | ||
paradigm=paradigm, | ||
datasets=datasets, | ||
overwrite=False, | ||
) | ||
|
||
results = evaluation.process(pipelines) | ||
|
||
############################################################################### | ||
# Here, with the ElSel+Cov+TS+LDA pipeline, we reduce the computation time | ||
# in approximately 8 times to the Cov+ElSel+TS+LDA pipeline. | ||
|
||
print("Averaging the session performance:") | ||
print(results.groupby("pipeline").mean("score")[["score", "time"]]) | ||
|
||
############################################################################### | ||
# Plot Results | ||
# ------------- | ||
# | ||
# Here, we plot the results to compare two pipelines | ||
|
||
|
||
fig, ax = plt.subplots(facecolor="white", figsize=[8, 4]) | ||
|
||
sns.stripplot( | ||
data=results, | ||
y="score", | ||
x="pipeline", | ||
ax=ax, | ||
jitter=True, | ||
alpha=0.5, | ||
zorder=1, | ||
palette="Set1", | ||
) | ||
sns.pointplot(data=results, y="score", x="pipeline", ax=ax, palette="Set1").set( | ||
title=title | ||
) | ||
|
||
ax.set_ylabel("ROC AUC") | ||
ax.set_ylim(0.3, 1) | ||
|
||
plt.show() | ||
|
||
############################################################################### | ||
# Key Observations: | ||
# ----------------- | ||
# - `Xdawn` is not ideal for the resting state paradigm. This is due to its specific design for Event-Related Potential (ERP). | ||
# - Electrode selection strategy based on covariance matrices demonstrates less variability and typically yields better performance. | ||
# - However, this strategy is more time-consuming compared to the simpler electrode selection based on time epoch data. |
Binary file modified
BIN
+8.32 KB
(100%)
docs/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip
Binary file not shown.
Binary file modified
BIN
+0 Bytes
(100%)
docs/_downloads/97a1de59bce682890841bb846e3dd09c/auto_tutorials_jupyter.zip
Binary file not shown.
169 changes: 169 additions & 0 deletions
169
docs/_downloads/c0b6836dfec75ec67ce644b1417286cf/plot_Hinss2021_classification.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,169 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"%matplotlib inline" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"\n# Hinss2021 classification example\n\nThis example shows how to use the Hinss2021 dataset\nwith the resting state paradigm.\n\nIn this example, we aim to determine the most effective channel selection strategy\nfor the :class:`moabb.datasets.Hinss2021` dataset.\nThe pipelines under consideration are:\n\n- `Xdawn`\n- Electrode selection based on time epochs data\n- Electrode selection based on covariance matrices\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"# License: BSD (3-clause)\n\nimport warnings\n\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom pyriemann.channelselection import ElectrodeSelection\nfrom pyriemann.estimation import Covariances\nfrom pyriemann.spatialfilters import Xdawn\nfrom pyriemann.tangentspace import TangentSpace\nfrom sklearn.base import TransformerMixin\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.pipeline import make_pipeline\n\nfrom moabb import set_log_level\nfrom moabb.datasets import Hinss2021\nfrom moabb.evaluations import CrossSessionEvaluation\nfrom moabb.paradigms import RestingStateToP300Adapter\n\n\n# Suppressing future and runtime warnings for cleaner output\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\n\nset_log_level(\"info\")" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Create util transformer\n\nLet's create a scikit transformer mixin, that will\nselect electrodes based on the covariance information\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"class EpochSelectChannel(TransformerMixin):\n \"\"\"Select channels based on covariance information.\"\"\"\n\n def __init__(self, n_chan, cov_est):\n self._chs_idx = None\n self.n_chan = n_chan\n self.cov_est = cov_est\n\n def fit(self, X, _y=None):\n # Get the covariances of the channels for each epoch.\n covs = Covariances(estimator=self.cov_est).fit_transform(X)\n # Get the average covariance between the channels\n m = np.mean(covs, axis=0)\n n_feats, _ = m.shape\n # Select the `n_chan` channels having the maximum covariances.\n all_max = []\n for i in range(n_feats):\n for j in range(n_feats):\n if len(all_max) <= self.n_chan:\n all_max.append(m[i, j])\n else:\n if m[i, j] > max(all_max):\n all_max[np.argmin(all_max)] = m[i, j]\n indices = []\n for v in all_max:\n indices.extend(np.argwhere(m == v).flatten())\n # We will keep only these channels for the transform step.\n indices = np.unique(indices)\n self._chs_idx = indices\n return self\n\n def transform(self, X):\n return X[:, self._chs_idx, :]" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Initialization Process\n\n1) Define the experimental paradigm object (RestingState)\n2) Load the datasets\n3) Select a subset of subjects and specific events for analysis\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"# Here we define the mne events for the RestingState paradigm.\nevents = dict(easy=2, diff=3)\n# The paradigm is adapted to the P300 paradigm.\nparadigm = RestingStateToP300Adapter(events=events, tmin=0, tmax=0.5)\n# We define a list with the dataset to use\ndatasets = [Hinss2021()]\n\n# To reduce the computation time in the example, we will only use the\n# first two subjects.\nstart_subject = 1\nstop_subject = 2\ntitle = \"Datasets: \"\nfor dataset in datasets:\n title = title + \" \" + dataset.code\n dataset.subject_list = dataset.subject_list[start_subject:stop_subject]" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Create Pipelines\n\nPipelines must be a dict of scikit-learning pipeline transformer.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"pipelines = {}\n\npipelines[\"Xdawn+Cov+TS+LDA\"] = make_pipeline(\n Xdawn(nfilter=4), Covariances(estimator=\"lwf\"), TangentSpace(), LDA()\n)\n\npipelines[\"Cov+ElSel+TS+LDA\"] = make_pipeline(\n Covariances(estimator=\"lwf\"), ElectrodeSelection(nelec=8), TangentSpace(), LDA()\n)\n\n# Pay attention here that the channel selection took place before computing the covariances:\n# It is done on time epochs.\npipelines[\"ElSel+Cov+TS+LDA\"] = make_pipeline(\n EpochSelectChannel(n_chan=8, cov_est=\"lwf\"),\n Covariances(estimator=\"lwf\"),\n TangentSpace(),\n LDA(),\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Run evaluation\n\nCompare the pipeline using a cross session evaluation.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"# Here should be cross-session\nevaluation = CrossSessionEvaluation(\n paradigm=paradigm,\n datasets=datasets,\n overwrite=False,\n)\n\nresults = evaluation.process(pipelines)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Here, with the ElSel+Cov+TS+LDA pipeline, we reduce the computation time\nin approximately 8 times to the Cov+ElSel+TS+LDA pipeline.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"print(\"Averaging the session performance:\")\nprint(results.groupby(\"pipeline\").mean(\"score\")[[\"score\", \"time\"]])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Plot Results\n\nHere, we plot the results to compare two pipelines\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"fig, ax = plt.subplots(facecolor=\"white\", figsize=[8, 4])\n\nsns.stripplot(\n data=results,\n y=\"score\",\n x=\"pipeline\",\n ax=ax,\n jitter=True,\n alpha=0.5,\n zorder=1,\n palette=\"Set1\",\n)\nsns.pointplot(data=results, y=\"score\", x=\"pipeline\", ax=ax, palette=\"Set1\").set(\n title=title\n)\n\nax.set_ylabel(\"ROC AUC\")\nax.set_ylim(0.3, 1)\n\nplt.show()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Key Observations:\n- `Xdawn` is not ideal for the resting state paradigm. This is due to its specific design for Event-Related Potential (ERP).\n- Electrode selection strategy based on covariance matrices demonstrates less variability and typically yields better performance.\n- However, this strategy is more time-consuming compared to the simpler electrode selection based on time epoch data.\n\n" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.9.19" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 0 | ||
} |
Binary file modified
BIN
+0 Bytes
(100%)
docs/_downloads/cab7a090c4183ca69dc0cd84d3b04413/auto_tutorials_python.zip
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
-20 Bytes
(100%)
docs/_images/sphx_glr_plot_cross_session_motor_imagery_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
+12 Bytes
(100%)
docs/_images/sphx_glr_plot_cross_session_motor_imagery_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
+5 Bytes
(100%)
docs/_images/sphx_glr_plot_learning_curve_motor_imagery_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
+121 Bytes
(100%)
docs/_images/sphx_glr_plot_learning_curve_motor_imagery_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
-654 Bytes
(99%)
docs/_images/sphx_glr_plot_learning_curve_p300_external_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
-233 Bytes
(99%)
docs/_images/sphx_glr_plot_learning_curve_p300_external_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
+0 Bytes
(100%)
docs/_images/sphx_glr_tutorial_2_using_mulitple_datasets_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
-6 Bytes
(100%)
docs/_images/sphx_glr_tutorial_2_using_mulitple_datasets_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
+0 Bytes
(100%)
docs/_images/sphx_glr_tutorial_3_benchmarking_multiple_pipelines_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
+7 Bytes
(100%)
docs/_images/sphx_glr_tutorial_3_benchmarking_multiple_pipelines_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Oops, something went wrong.