-
Notifications
You must be signed in to change notification settings - Fork 3
/
nimg_simuls_compute_individual_A_linearpower.py
100 lines (87 loc) · 3.34 KB
/
nimg_simuls_compute_individual_A_linearpower.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import RidgeCV
from sklearn.dummy import DummyRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_val_score
from library.simuls import generate_covariances
from library.spfiltering import ProjIdentitySpace, ProjSPoCSpace
from library.featuring import Diag, LogDiag, NaiveVec, Riemann
import config as cfg
print('Running individual A experiment...')
# Parameters
n_matrices = 100 # Number of matrices
n_channels = 5 # Number of channels
n_sources = 2 # Number of sources
distance_A_id = 0.7 # Parameter 'mu': distance from A to Id
f_powers = 'linear' # link function between the y and the source powers
direction_A = None # random direction_A
rng = 4
sigma = 0. # Noise level in y
noises_A = np.logspace(-2.5, 0, 10) # noise in Ai -2.5 -.2
scoring = 'neg_mean_absolute_error'
# define spatial filters
identity = ProjIdentitySpace()
spoc = ProjSPoCSpace(n_compo=n_channels, scale='auto', reg=0, shrink=0)
# define featuring
upper = NaiveVec(method='upper')
diag = Diag()
logdiag = LogDiag()
riemann = Riemann(n_fb=1, metric='riemann')
sc = StandardScaler()
# define algo
dummy = DummyRegressor()
ridge = RidgeCV(alphas=np.logspace(-3, 5, 100), scoring=scoring)
# define models
pipelines = {
'dummy': make_pipeline(identity, diag, sc, dummy),
'upper': make_pipeline(identity, upper, sc, ridge),
'diag': make_pipeline(identity, diag, sc, ridge),
'spoc': make_pipeline(spoc, diag, sc, ridge),
'riemann': make_pipeline(identity, riemann, sc, ridge)
}
# Run experiments
results = np.zeros((len(pipelines), len(noises_A)))
for j, noise_A in enumerate(noises_A):
X, y = generate_covariances(n_matrices, n_channels, n_sources,
sigma=sigma, distance_A_id=distance_A_id,
f_p=f_powers, direction_A=direction_A,
noise_A=noise_A, rng=rng)
X = X[:, None, :, :]
for i, (name, pipeline) in enumerate(pipelines.items()):
print('noise_A = {}, {} method'.format(noise_A, name))
sc = cross_val_score(pipeline, X, y, scoring=scoring,
cv=10, n_jobs=3, error_score=np.nan)
results[i, j] = - np.mean(sc)
# save results
np.savetxt(op.join(cfg.path_outputs,
'simuls/individual_spatial/scores_powers.csv'),
results, delimiter=',')
np.savetxt(op.join(cfg.path_outputs,
'simuls/individual_spatial/names_powers.csv'),
np.array(list(pipelines)), fmt='%s')
np.savetxt(op.join(cfg.path_outputs,
'simuls/individual_spatial/noises_A_powers.csv'),
noises_A)
# draft plot
# f, ax = plt.subplots(figsize=(4, 3))
# results /= results[0]
# for i, name in enumerate(list(pipelines)):
# if name != 'Chance level':
# ls = None
# else:
# ls = '--'
# ax.plot(noises_A, results[i],
# label=name,
# linewidth=3,
# linestyle='--' if name == 'dummy' else None)
# ax.set_xlabel('individual noise')
# plt.grid()
# ax.set_ylabel('Normalized M.A.E.')
# ax.hlines(0, noises_A[0], noises_A[-1], label=r'Perfect',
# color='k', linestyle='--', linewidth=3)
# ax.legend(loc='lower right')
# ax.set_xscale('log')
# plt.show()