Skip to content

Commit

Permalink
alpha max fix for iterative L1
Browse files Browse the repository at this point in the history
  • Loading branch information
anujanegi committed Nov 20, 2024
1 parent 4369d10 commit 515539e
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 89 deletions.
19 changes: 17 additions & 2 deletions bsi_zoo/estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def _gamma_map_opt(
Parameters
----------
M : array, shape=(n_sensors, n_times)
: array, shape=(n_sensors, n_times)
Observation.
G : array, shape=(n_sensors, n_sources)
Forward operator.
Expand Down Expand Up @@ -530,6 +530,14 @@ def gprime(w):

return x

def norm_l2inf(A, n_orient, copy=True):
from math import sqrt
"""L2-inf norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))

def iterative_L1(L, y, alpha=0.2, n_orient=1, max_iter=1000, max_iter_reweighting=10):
"""Iterative Type-I estimator with L1 regularizer.
Expand Down Expand Up @@ -578,9 +586,16 @@ def gprime(w):
grp_norms = np.sqrt(groups_norm2(w.copy(), n_orient))
return np.repeat(grp_norms, n_orient).ravel() + eps

alpha_max = abs(L.T.dot(y)).max() / len(L)
if n_orient==1:
alpha_max = abs(L.T.dot(y)).max() / len(L)
else:
n_dip_per_pos = 3
alpha_max = norm_l2inf(np.dot(L.T, y), n_dip_per_pos)

alpha = alpha * alpha_max

# y->M
# L->gain
x = _solve_reweighted_lasso(
L, y, alpha, n_orient, weights, max_iter, max_iter_reweighting, gprime
)
Expand Down
176 changes: 89 additions & 87 deletions bsi_zoo/run_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@
from bsi_zoo.config import get_leadfield_path

n_jobs = 20
nruns = 10
spatial_cv = [False, True]
nruns = 1
# spatial_cv = [False, True]
spatial_cv = [False]

#
subjects = ["CC120166", "CC120264", "CC120313", "CC120309"]
metrics = [
Expand All @@ -38,81 +40,81 @@

for do_spatial_cv in spatial_cv:
for subject in subjects:
"""Fixed orientation parameters for the benchmark"""

orientation_type = "fixed"
data_args_I = {
# "n_sensors": [50],
"n_times": [10],
# "n_sources": [200],
"nnz": nnzs,
"cov_type": ["diag"],
"path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
"orientation_type": [orientation_type],
"alpha": alpha_SNR, # this is actually SNR
}

data_args_II = {
# "n_sensors": [50],
"n_times": [10],
# "n_sources": [200],
"nnz": nnzs,
"cov_type": ["full"],
"path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
"orientation_type": [orientation_type],
"alpha": alpha_SNR, # this is actually SNR
}

estimators = [
(fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
(eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
(iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
(iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
(gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
]

df_results = []
for estimator, data_args, estimator_args, estimator_extra_params in estimators:
benchmark = Benchmark(
estimator,
subject,
metrics,
data_args,
estimator_args,
random_state=42,
memory=memory,
n_jobs=n_jobs,
do_spatial_cv=do_spatial_cv,
estimator_extra_params=estimator_extra_params,
)
results = benchmark.run(nruns=nruns)
df_results.append(results)
# save results
data_path = Path("bsi_zoo/data/updated_alpha_grid")
data_path.mkdir(exist_ok=True)
if do_spatial_cv:
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
else:
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
results.to_pickle(data_path / FILE_NAME)


df_results = pd.concat(df_results, axis=0)

data_path = Path("bsi_zoo/data/ramen")
data_path.mkdir(exist_ok=True)
if do_spatial_cv:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
else:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
df_results.to_pickle(data_path / FILE_NAME)

print(df_results)
# """Fixed orientation parameters for the benchmark"""

# orientation_type = "fixed"
# data_args_I = {
# # "n_sensors": [50],
# "n_times": [10],
# # "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["diag"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# data_args_II = {
# # "n_sensors": [50],
# "n_times": [10],
# # "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["full"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# estimators = [
# (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
# (iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
# ]

# df_results = []
# for estimator, data_args, estimator_args, estimator_extra_params in estimators:
# benchmark = Benchmark(
# estimator,
# subject,
# metrics,
# data_args,
# estimator_args,
# random_state=42,
# memory=memory,
# n_jobs=n_jobs,
# do_spatial_cv=do_spatial_cv,
# estimator_extra_params=estimator_extra_params,
# )
# results = benchmark.run(nruns=nruns)
# df_results.append(results)
# # save results
# data_path = Path("bsi_zoo/data/updated_alpha_grid")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# results.to_pickle(data_path / FILE_NAME)


# df_results = pd.concat(df_results, axis=0)

# data_path = Path("bsi_zoo/data/ramen")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# df_results.to_pickle(data_path / FILE_NAME)

# print(df_results)

""" Free orientation parameters for the benchmark """

Expand Down Expand Up @@ -142,22 +144,22 @@
if spatial_cv:
# currently no support for type II methods
estimators = [
(fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
]
else:
estimators = [
(fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
(eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
# (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
(iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
(iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
(iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
(gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
]

Expand All @@ -178,7 +180,7 @@
results = benchmark.run(nruns=nruns)
df_results.append(results)
# save results
data_path = Path("bsi_zoo/data/free2")
data_path = Path("bsi_zoo/data/free3")
data_path.mkdir(exist_ok=True)

if do_spatial_cv:
Expand All @@ -189,7 +191,7 @@

df_results = pd.concat(df_results, axis=0)

data_path = Path("bsi_zoo/data/free2")
data_path = Path("bsi_zoo/data/free3")
data_path.mkdir(exist_ok=True)
if do_spatial_cv:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
Expand Down

0 comments on commit 515539e

Please sign in to comment.