-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathDataGeneration.py
More file actions
456 lines (369 loc) · 17.8 KB
/
DataGeneration.py
File metadata and controls
456 lines (369 loc) · 17.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
import os
import time
import copy
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
import torch.nn.functional as F
import pytorch_lightning as pl
from sklearn import decomposition
from sklearn.feature_extraction import image
import scipy
from scipy.integrate import solve_ivp
### L63 - Data generation
def AnDA_Lorenz_63(S, t, sigma, rho, beta):
""" Lorenz-63 dynamical model. """
x_1 = sigma*(S[1]-S[0]);
x_2 = S[0]*(rho-S[2])-S[1];
x_3 = S[0]*S[1] - beta*S[2];
dS = np.array([x_1,x_2,x_3]);
return dS
def L63_sparse_noisy_data(
y0 = np.array([8.,0.,30.]),
sigma = 10.,
rho = 28.,
beta = 8./3.,
dt_integration = 0.01,
final_time = 5.,
freq_obs = 2,
seed_noise = 1234,
sigma_noise = np.sqrt(2.),
var_mask = np.array([1,1,1]),
seed_sparsity = 4321,
seed_arange = 1111,
sparsity = 1,
masked_value=0.,
num_variables=3):
"""
Returns data mask, noisy and sparse observation data, noisy data and true
data generated by the L63 model with given initial condition y0.
y0 : array of shape (3,), initial condition
sigma : float, L63 parameter
rho : float, L63 parameter
beta : float, L63 parameter
dt_integration : float, integration time using RK4 scheme
final_time : float, final simulation time
freq_obs : int, number of timesteps between each observation of
true state
seed_noise : int, random seed of noise generation
sigma_noise : float, standard deviation of observation noise
var_mask : array of shape (3,), tag for observed variables
seed_sparsity : int, random seed for data sparsity
sparsity : float in [0, 1], percentage of observationnal data for
each observed variable
masked_value : value retained for masked data
"""
t_eval = np.linspace(0, final_time, num=int(final_time/dt_integration))
t_obs = t_eval[::freq_obs]
y_true = solve_ivp(
fun=lambda t,y: AnDA_Lorenz_63(y, t, sigma, rho, beta),
t_span=(0, final_time),
y0=y0,
t_eval=t_eval,
method='RK45').y.transpose()
np.random.seed(seed_noise)
y_noise = y_true + sigma_noise * np.random.randn(*y_true.shape)
np.random.seed(seed_sparsity)
mask = np.zeros(y_true.shape)
mask_obs = np.zeros(y_true[::freq_obs].shape)
s = int(sparsity*mask_obs.shape[0])
for i in range(mask_obs.shape[1]):
mask_obs[np.random.choice(mask_obs.shape[0], size=s, replace=False), i] = np.ones(s)
np.random.seed(seed_arange)
mask[::freq_obs] = mask_obs
if num_variables==3 :
var_mask = np.array([1,1,1])
elif num_variables==2:
var_mask = np.array([1,1,0])
np.random.shuffle(var_mask)
elif num_variables==1:
var_mask = np.array([1,0,0])
np.random.shuffle(var_mask)
else :
return RaiseError
mask = mask*var_mask
y_obs = np.copy(y_noise)
np.putmask(y_obs, mask==0, masked_value)
y_missing = np.copy(y_true)
np.putmask(y_missing, mask==0, masked_value)
return mask, y_obs, y_true, y_missing
def L63PatchDataExtraction(sparsity=1, sigma_noise=np.sqrt(2.), num_variables=3, var_mask=np.array([1, 1, 1])):
"""
Returns Training, Validation and Testing Dataset using L63_sparse_noisy_data function for L63 Model
Inputs :
sparsity : float in [0, 1], percentage of observationnal data for
each observed variable
sigma_noise : float, standard deviation of observation noise
num_variables : int in [1, 2, 3], choice of the number of variables observed
Outputs : 3 dictionnaries : Training_dataset, Val_dataset and Test_dataset with the array 'Truth', 'Missing', 'Obs', 'Init', and 'Mask'
'Truth' : ground-truth trajectory simulated
'Missing' : ground-truth trajectory on observed data
'Obs' : Observed data : masks and noise applied
'Mask' : Mask array : value 1 in the point is observed, 0 otherwise
'Init' : Interpolated trajectory between the observed data points.
"""
NbTraining = 128*100
NbVal = 128*20
NbTest = 128*20
begin_time = 10
final_time =( NbTraining+2*NbVal +begin_time)*2
mask, y_obs, y_true, y_missing = L63_sparse_noisy_data(sparsity = sparsity, sigma_noise = sigma_noise,final_time =final_time,num_variables=num_variables, var_mask=var_mask)
X_train = y_true[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,3))
X_train_missing = y_missing[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,3))
X_train_obs = y_obs[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,3))
mask_train = mask[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,3))
X_val = y_true[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,3))
X_val_missing = y_missing[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,3))
X_val_obs = y_obs[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,3))
mask_val = mask[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,3))
X_test = y_true[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,3))
X_test_missing = y_missing[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,3))
X_test_obs = y_obs[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,3))
mask_test = mask[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,3))
meanTr = np.mean(X_train_missing[:]) / np.mean(mask_train)
meanTe = np.mean(X_test_missing[:]) / np.mean(mask_test)
meanV = np.mean(X_val_missing[:]) / np.mean(mask_val)
# print(meanTr)
X_train_Init = np.zeros(X_train.shape)
for ii in range(0,X_train.shape[0]):
# Initial linear interpolation for each component
XInit = np.zeros((X_train.shape[1],X_train.shape[2]))
for kk in range(0,3):
indt = np.where( mask_train[ii,:,kk] == 1.0 )[0]
indt_ = np.where( mask_train[ii,:,kk] == 0.0 )[0]
if len(indt) > 1:
indt_[ np.where( indt_ < np.min(indt)) ] = np.min(indt)
indt_[ np.where( indt_ > np.max(indt)) ] = np.max(indt)
fkk = scipy.interpolate.interp1d(indt, X_train_obs[ii,indt,kk],axis=-1)
XInit[indt,kk] = X_train_obs[ii,indt,kk]
XInit[indt_,kk] = fkk(indt_)
else:
XInit = XInit + meanTr
X_train_Init[ii,:,:] = XInit
X_test_Init = np.zeros(X_test.shape)
for ii in range(0,X_test.shape[0]):
# Initial linear interpolation for each component
XInit = np.zeros((X_test.shape[1],X_test.shape[2]))
for kk in range(0,3):
indt = np.where( mask_test[ii,:,kk] == 1.0 )[0]
indt_ = np.where( mask_test[ii,:,kk] == 0.0 )[0]
if len(indt) > 1:
indt_[ np.where( indt_ < np.min(indt)) ] = np.min(indt)
indt_[ np.where( indt_ > np.max(indt)) ] = np.max(indt)
fkk = scipy.interpolate.interp1d(indt, X_test_obs[ii,indt,kk],axis=-1)
XInit[indt,kk] = X_test_obs[ii,indt,kk]
XInit[indt_,kk] = fkk(indt_)
else:
XInit = XInit + meanTe
X_test_Init[ii,:,:] = XInit
X_val_Init = np.zeros(X_val.shape)
for ii in range(0,X_val.shape[0]):
# Initial linear interpolation for each component
XInit = np.zeros((X_val.shape[1],X_val.shape[2]))
for kk in range(0,3):
indt = np.where( mask_val[ii,:,kk] == 1.0 )[0]
indt_ = np.where( mask_val[ii,:,kk] == 0.0 )[0]
if len(indt) > 1:
indt_[ np.where( indt_ < np.min(indt)) ] = np.min(indt)
indt_[ np.where( indt_ > np.max(indt)) ] = np.max(indt)
fkk = scipy.interpolate.interp1d(indt, X_val_obs[ii,indt,kk],axis=-1)
XInit[indt,kk] = X_val_obs[ii,indt,kk]
XInit[indt_,kk] = fkk(indt_)
else:
XInit = XInit + meanV
X_val_Init[ii,:,:] = XInit
Training_dataset = {}
Training_dataset['Truth']=X_train
Training_dataset['Obs']=X_train_obs
Training_dataset['Missing']=X_train_missing
Training_dataset['Init']=X_train_Init
Training_dataset['Mask']=mask_train
Val_dataset = {}
Val_dataset['Truth']=X_val
Val_dataset['Obs']=X_val_obs
Val_dataset['Missing']=X_val_missing
Val_dataset['Init']=X_val_Init
Val_dataset['Mask']=mask_val
Test_dataset = {}
Test_dataset['Truth']=X_test
Test_dataset['Obs']=X_test_obs
Test_dataset['Missing']=X_test_missing
Test_dataset['Init']=X_test_Init
Test_dataset['Mask']=mask_test
return Training_dataset, Val_dataset, Test_dataset
### L96 Data Generation
def AnDA_Lorenz_96(S, t ,F, J):
""" Lorenz-96 dynamical model. """
x = np.zeros(J);
x[0] = (S[1]-S[J-2])*S[J-1]-S[0];
x[1] = (S[2]-S[J-1])*S[0]-S[1];
x[J-1] = (S[0]-S[J-3])*S[J-2]-S[J-1];
for j in range(2,J-1):
x[j] = (S[j+1]-S[j-2])*S[j-1]-S[j];
dS = x.T + F;
return dS
def L96_sparse_noisy_data(
F = 8,
dt_integration = 0.05,
final_time = 10.,
freq_obs = 2,
seed_noise = 1234,
seed_init = 1357,
sigma_noise = np.sqrt(2.),
var_mask = np.ones(40),
seed_sparsity = 4321,
seed_arange = 1111,
sparsity = 1,
masked_value=0.,
num_variables=40):
"""
Returns data mask, noisy and sparse observation data, noisy data and true
data generated by the L63 model with given initial condition y0.
y0 : array of shape (3,), initial condition
F : float, L96 term
dt_integration : float, integration time using RK4 scheme
final_time : float, final simulation time
freq_obs : int, number of timesteps between each observation of
true state
seed_noise : int, random seed of noise generation
sigma_noise : float, standard deviation of observation noise
var_mask : array of shape (40,), tag for observed variables
seed_sparsity : int, random seed for data sparsity
sparsity : float in [0, 1], percentage of observationnal data for
each observed variable
masked_value : value retained for masked data
num_variable : int between 1 and 40 : number of observed variables
"""
np.random.seed(seed_init)
y0 = np.random.randn(40)
t_eval = np.linspace(0, final_time, num=int(final_time/dt_integration))
t_obs = t_eval[::freq_obs]
y_true = solve_ivp(
fun=lambda t,y: AnDA_Lorenz_96(y,t,F=F,J=40),
t_span=(0, final_time),
y0=y0,
t_eval=t_eval,
method='RK45').y.transpose()
np.random.seed(seed_noise)
y_noise = y_true + sigma_noise * np.random.randn(*y_true.shape)
np.random.seed(seed_sparsity)
mask = np.zeros(y_true.shape)
mask_obs = np.zeros(y_true[::freq_obs].shape)
s = int(sparsity*mask_obs.shape[0])
for i in range(mask_obs.shape[1]):
mask_obs[np.random.choice(mask_obs.shape[0], size=s, replace=False), i] = np.ones(s)
mask[::freq_obs] = mask_obs
np.random.seed(seed_arange)
if num_variables != 40 :
var_mask = np.concatenate((np.ones(num_variables), np.zeros(40-num_variables)), axis=0)
np.random.shuffle(var_mask)
mask = mask*var_mask
y_obs = np.copy(y_noise)
np.putmask(y_obs, mask==0, masked_value)
y_missing = np.copy(y_true)
np.putmask(y_missing, mask==0, masked_value)
return mask, y_obs, y_true, y_missing
def L96PatchDataExtraction(sparsity=1,sigma_noise=np.sqrt(2.),num_variables=40):
"""
Returns Training, Validation and Testing Dataset using L63_sparse_noisy_data function for L63 Model
Inputs :
sparsity : float in [0, 1], percentage of observationnal data for
each observed variable
sigma_noise : float, standard deviation of observation noise
num_variables : int in [1:40], choice of the number of variables observed
Outputs : 3 dictionnaries : Training_dataset, Val_dataset and Test_dataset with the array 'Truth', 'Missing', 'Obs', 'Init', and 'Mask'
'Truth' : ground-truth trajectory simulated
'Missing' : ground-truth trajectory on observed data
'Obs' : Observed data : masks and noise applied
'Mask' : Mask array : value 1 in the point is observed, 0 otherwise
'Init' : Interpolated trajectory between the observed data points.
"""
NbTraining = 1280
NbVal = 256
NbTest = 256
begin_time = 2
final_time = 2512*10 + begin_time*100
mask, y_obs, y_true, y_missing = L96_sparse_noisy_data(sparsity = sparsity, sigma_noise = sigma_noise,final_time =final_time,num_variables=num_variables)
X_train = y_true[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,40))
X_train_missing = y_missing[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,40))
X_train_obs = y_obs[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,40))
mask_train = mask[begin_time*100:begin_time*100+NbTraining*200].reshape((NbTraining,200,40))
X_val = y_true[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,40))
X_val_missing = y_missing[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,40))
X_val_obs = y_obs[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,40))
mask_val = mask[begin_time*100+NbTraining*200:begin_time*100+NbTraining*200+NbVal*200].reshape((NbVal,200,40))
X_test = y_true[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,40))
X_test_missing = y_missing[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,40))
X_test_obs = y_obs[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,40))
mask_test = mask[begin_time*100+NbTraining*200+NbVal*200:begin_time*100+NbTraining*200+NbVal*200+NbTest*200].reshape((NbTest,200,40))
meanTr = np.mean(X_train_missing[:]) / np.mean(mask_train)
meanTe = np.mean(X_test_missing[:]) / np.mean(mask_test)
meanV = np.mean(X_val_missing[:]) / np.mean(mask_val)
X_train_Init = np.zeros(X_train.shape)
for ii in range(0,X_train.shape[0]):
# Initial linear interpolation for each component
XInit = np.zeros((X_train.shape[1],X_train.shape[2]))
for kk in range(0,40):
indt = np.where( mask_train[ii,:,kk] == 1.0 )[0]
indt_ = np.where( mask_train[ii,:,kk] == 0.0 )[0]
if len(indt) > 1:
indt_[ np.where( indt_ < np.min(indt)) ] = np.min(indt)
indt_[ np.where( indt_ > np.max(indt)) ] = np.max(indt)
fkk = scipy.interpolate.interp1d(indt, X_train_obs[ii,indt,kk],axis=-1)
XInit[indt,kk] = X_train_obs[ii,indt,kk]
XInit[indt_,kk] = fkk(indt_)
else:
XInit = XInit + meanTr
X_train_Init[ii,:,:] = XInit
X_test_Init = np.zeros(X_test.shape)
for ii in range(0,X_test.shape[0]):
# Initial linear interpolation for each component
XInit = np.zeros((X_test.shape[1],X_test.shape[2]))
for kk in range(0,40):
indt = np.where( mask_test[ii,:,kk] == 1.0 )[0]
indt_ = np.where( mask_test[ii,:,kk] == 0.0 )[0]
if len(indt) > 1:
indt_[ np.where( indt_ < np.min(indt)) ] = np.min(indt)
indt_[ np.where( indt_ > np.max(indt)) ] = np.max(indt)
fkk = scipy.interpolate.interp1d(indt, X_test_obs[ii,indt,kk],axis=-1)
XInit[indt,kk] = X_test_obs[ii,indt,kk]
XInit[indt_,kk] = fkk(indt_)
else:
XInit = XInit + meanTe
X_test_Init[ii,:,:] = XInit
X_val_Init = np.zeros(X_val.shape)
for ii in range(0,X_val.shape[0]):
# Initial linear interpolation for each component
XInit = np.zeros((X_val.shape[1],X_val.shape[2]))
for kk in range(0,40):
indt = np.where( mask_val[ii,:,kk] == 1.0 )[0]
indt_ = np.where( mask_val[ii,:,kk] == 0.0 )[0]
if len(indt) > 1:
indt_[ np.where( indt_ < np.min(indt)) ] = np.min(indt)
indt_[ np.where( indt_ > np.max(indt)) ] = np.max(indt)
fkk = scipy.interpolate.interp1d(indt, X_val_obs[ii,indt,kk],axis=-1)
XInit[indt,kk] = X_val_obs[ii,indt,kk]
XInit[indt_,kk] = fkk(indt_)
else:
XInit = XInit + meanV
X_val_Init[ii,:,:] = XInit
Training_dataset = {}
Training_dataset['Truth']=X_train
Training_dataset['Obs']=X_train_obs
Training_dataset['Missing']=X_train_missing
Training_dataset['Init']=X_train_Init
Training_dataset['Mask']=mask_train
Val_dataset = {}
Val_dataset['Truth']=X_val
Val_dataset['Obs']=X_val_obs
Val_dataset['Missing']=X_val_missing
Val_dataset['Init']=X_val_Init
Val_dataset['Mask']=mask_val
Test_dataset = {}
Test_dataset['Truth']=X_test
Test_dataset['Obs']=X_test_obs
Test_dataset['Missing']=X_test_missing
Test_dataset['Init']=X_test_Init
Test_dataset['Mask']=mask_test
return Training_dataset, Val_dataset, Test_dataset