-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathMethods.py
More file actions
1052 lines (846 loc) · 39.8 KB
/
Methods.py
File metadata and controls
1052 lines (846 loc) · 39.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Python distribution modules
from multiprocessing import Pool
from copy import deepcopy
from collections import OrderedDict
from itertools import combinations
# Community modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import num2date
from numpy.random import choice, randint, seed
# Local modules
from ArgParse import ParseCmdLine
from EDM import EmbedData, ReadEmbeddedData, Prediction, \
SimplexProjection, Distance, DistanceMetric,\
ComputeError, nCol, nRow
# Global enum as a class
class Source :
Python, Jupyter = range( 2 )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def Predict( args, data = None, colNames = None, target = None,
source = Source.Python ):
'''
Data input/embedding wrapper for EDM.Prediction() to compute:
Simplex projection of observational data (Sugihara, 1990), or
SMap projection of observational data (Sugihara, 1994).
There are two options for data file input, or an embedding can be
passed in directly (data, colNames, target).
If --embedding (-e) is specified, it is assumed that the data file
or data input is already an embedding or multivariable data matrix.
Otherwise, the data is embedded by EmbedData().
If --embedding (-e) is specified and the data input parameter is None,
then the -i (inputFile) is processed by ReadEmbeddedData() which assumes
the files consists of a .csv file formatted as:
[ Time, Dim_1, Dim_2, ... ]
where Dim_1 is observed data, Dim_2 data offset by τ, Dim_3 by 2τ...
The user can specify the desired embedding dimension E, which
can be less than the total number of columns in the inputFile.
The first E + 1 columns (Time, D1, D2, ... D_E) will be returned.
Alternatively, the data can be a .csv file with multiple simultaneous
observations or delay embeddings (columns) where the columns to
embed and target to project are specified with the -c (columns)
and -r (target) options. In all cases 'time' is required in column 0.
Embedding can be done with EDM.EmbedData() via the wrapper Embed.py.
Note: The embedded data .csv file will have fewer rows (observations)
than the data input to EmbedData() by E - 1.
'''
if args.embedded :
if data is None :
# args.inputFile is an embedding or multivariable data frame.
# ReadEmbeddedData() sets args.E to the number of columns
# if the -c (columns) and -t (target) options are used.
embedding, colNames, target = ReadEmbeddedData( args )
else:
# Data matrix is passed in as parameter, no embedding needed
embedding = data
# target taken as-is from input parameters
else :
# args.inputFile are timeseries data to be embedded by EmbedData
embedding, colNames, target = EmbedData( args, data, colNames )
rho, rmse, mae, header, output, smap_output = Prediction( embedding,
colNames,
target, args )
if source == Source.Jupyter :
return { 'rho':rho, 'RMSE':rmse, 'MAE':mae, 'header':header,
'prediction':output, 'S-map':smap_output }
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def Embed( args, data = None, colNames = None, source = Source.Python ):
'''
Wrapper for EDM.EmbedData()
Time-delay embedd data vector(s) from args.inputFile into
args.Dimensions at multiples of args.tau. Note that if the
-f --forwardTau option is specified, then the embedding is
x(t) + τ instead of x(t) - τ.
The -e (embedColumns) option specifies the zero-offset column
numbers or column names to embed from args.inputFile.
Writes a .csv file with header [Time, Dim_1, Dim_2...] if -o specified.
Note: The output .csv file will have fewer rows (observations)
than the input data by args.Dimension - 1 (E-1).
'''
embedding, header, target = EmbedData( args, data, colNames )
if args.Debug:
print( "Embed() " + ' '.join( args.columns ) +\
" from " + args.inputFile +\
" E=" + str( args.E ) + " " +\
str( embedding.shape[0] ) + " rows, " +\
str( embedding.shape[1] ) + " columns." )
print( header )
print( embedding[ 0:3, : ] )
if source == Source.Jupyter :
return { 'header':header, 'embedding':embedding, 'target':target }
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def EmbedDimensions( args, source = Source.Python ):
'''
Using ParseCmdLine() arguments, override E and k_NN to evaluate
embeddings for E = 1 to 10.
There are two options for data input. One is to use the -c (columns)
argument so that the -i (inputFile) will be considered a timeseries with
embeddings dynamically performed by EmbedData() for each evaluation.
The other is to specify -e (embedded) so that -i (inputFile) specifies
a .csv file with an embedding or multivariables already in place. The
vector in the second column (j=1) will be considered the observed data.
This will be read by ReadEmbeddedData().
Prediction() sets k_NN equal to E + 1 if -k not specified and method
is Simplex.
'''
# Save args.plot flag, but disable so Prediction() does not plot
showPlot = args.plot
args.plot = False
# Process pool
pool = Pool()
# Create iterable with args variants for E = 1 to 10
argsList = []
for E in range( 1, 11 ) :
newArgs = deepcopy( args )
newArgs.E = E
newArgs.k_NN = E + 1
argsList.append( newArgs )
# Submit EmbedPredict jobs to the process pool
results = pool.map( EmbedPredict, argsList )
E_rho = {} # Dict to hold E : rho pairs from EmbedPredict() tuple
for result in results :
if result == None:
continue
E_rho[ result[ 0 ] ] = result[ 1 ]
# Console output
print( "{:<5} {:<10}".format('E','ρ') )
for E_, rho_ in E_rho.items():
print( "{0:<5} {1:<10}".format( E_, rho_ ) )
#----------------------------------------------------------
if showPlot:
fig, ax = plt.subplots( 1, 1, figsize = args.figureSize, dpi = 150 )
ax.plot( E_rho.keys(), E_rho.values(),
label = 'Predictions_t(+{0:d})'.format( args.Tp ),
color='blue', linewidth = 3 )
if args.plotTitle is not None :
title = args.plotTitle
else:
title = args.inputFile + ' Tp=' + str( args.Tp )
ax.set( xlabel = 'Embedding Dimension',
ylabel = 'Prediction Skill' + r' $\rho$',
title = title )
plt.show()
if source == Source.Jupyter :
return E_rho
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def EmbedPredict( args ):
'''
Pool worker function called from EmbedDimensions()
'''
# if -e has been specified: use ReadEmbeddedData()
# ReadEmbeddedData() sets args.E to the number of columns specified
# if the -c (columns) and -t (target) options are used, otherwise
# it uses args.E to read E columns.
if args.embedded :
# Set args.E so at least 10 dimensions are read.
E = args.E
args.E = 10
embedding, colNames, target = ReadEmbeddedData( args )
# Reset args.E for Prediction
args.E = E
else :
# -e not specified, embed on each iteration
embedding, colNames, target = EmbedData( args )
rho, rmse, mae, header, output, smap_output = Prediction( embedding,
colNames,
target, args )
return tuple( ( args.E, round( rho, 3 ) ) )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def PredictDecays( args, source = Source.Python ):
'''
Using ParseCmdLine() arguments, override Tp to evaluate Tp = 1 to 10.
There are two options for data input. One is to use the -c (columns)
argument so that the -i (inputFile) will be considered a timeseries with
embeddings dynamically performed by EmbedData() for each evaluation.
The other is to specify -e (embedded) so that -i (inputFile) specifies
a .csv file with an embedding or multivariables already in place. The
vector in the second column (j=1) will be considered the observed data.
This will be read by ReadEmbeddedData().
Prediction() sets k_NN equal to E+1 if -k not specified and method
is Simplex.
'''
# Save args.plot flag, but disable so Prediction() does not plot
showPlot = args.plot
args.plot = False
# if -e has not been specified: use EmbedData()
if not args.embedded :
embedding, colNames, target = EmbedData( args )
else :
# ReadEmbeddedData() sets args.E to the number of columns specified
# if the -c (columns) and -t (target) options are used, otherwise
# it uses args.E to read E columns.
embedding, colNames, target = ReadEmbeddedData( args )
# Process pool
pool = Pool()
# Create iterable with args variants for Tp = 1 to 10
argsEmbeddingList = []
for T in range( 1, 11 ) :
newArgs = deepcopy( args )
newArgs.Tp = T
# Add the embedding, colNames, target in a tuple
argsEmbeddingList.append( ( newArgs, embedding, \
colNames, target, 'Tp' ) )
# Submit PredictFunc jobs to the process pool
results = pool.map( PredictFunc, argsEmbeddingList )
Tp_rho = {} # Dict to hold Tp : rho pairs from PredictFunc() tuple
for result in results :
if result == None:
continue
Tp_rho[ result[ 0 ] ] = result[ 1 ]
# Console output
print( "{:<5} {:<10}".format('Tp','ρ') )
for T_, rho_ in Tp_rho.items():
print( "{0:<5} {1:<10}".format( T_, rho_ ) )
#----------------------------------------------------------
if showPlot:
fig, ax = plt.subplots( 1, 1, figsize = args.figureSize, dpi = 150 )
ax.plot( Tp_rho.keys(), Tp_rho.values(),
label = 'Predictions_t(+{0:d})'.format( args.Tp ),
color='blue', linewidth = 3 )
if args.plotTitle is not None :
title = args.plotTitle
else:
title = args.inputFile + ' E=' + str( args.E )
ax.set( xlabel = 'Forecast time Tp',
ylabel = 'Prediction Skill' + r' $\rho$',
title = title )
plt.show()
if source == Source.Jupyter :
return Tp_rho
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def PredictFunc( argsEmbedding ) :
'''
Pool worker function called from PredictDecays() or SMapNL()
'''
args = argsEmbedding[ 0 ]
embedding = argsEmbedding[ 1 ]
colNames = argsEmbedding[ 2 ]
target = argsEmbedding[ 3 ]
outputType = argsEmbedding[ 4 ]
# Prediction does not currently use colNames
rho, rmse, mae, header, output, smap_output = Prediction( embedding,
colNames,
target, args )
if 'Tp' in outputType:
return tuple( ( args.Tp, round( rho, 3 ) ) )
elif 'theta' in outputType:
return tuple( ( args.theta, round( rho, 3 ) ) )
else:
raise Exception( 'PredictFunc() Invalid output specified.' )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def SMapNL( args, data = None, colNames = None, target = None, thetas = None,
source = Source.Python ):
'''
Using ParseCmdLine() arguments, override the -t (theta) to evaluate
theta = 0.01 to 9.
There are two options for data file input. One is to use the -c (columns)
argument so that the -i (inputFile) will be considered a timeseries with
embeddings dynamically performed by EmbedData() for each evaluation.
The other is to specify -e (embedded) so that -i (inputFile) specifies
a .csv file with an embedding or multivariables already in place. The
vector in the second column (j=1) will be considered the observed data.
This will be read by ReadEmbeddedData().
Data can also be passed in (data, colNames, target) instead of read
from a file.
'''
args.method = 'smap'
# Save args.plot flag, but disable so Prediction() does not plot
showPlot = args.plot
args.plot = False
if args.embedded :
if data is None :
# args.inputFile is an embedding or multivariable data frame.
# ReadEmbeddedData() sets args.E to the number of columns
# if the -c (columns) and -t (target) options are used.
embedding, colNames, target = ReadEmbeddedData( args )
else:
# Data matrix is passed in as parameter, no embedding needed
embedding = data
# target taken as-is from input parameters
else :
# args.inputFile are timeseries data to be embedded by EmbedData
embedding, colNames, target = EmbedData( args, data, colNames )
if thetas is None :
# Evaluate theta localization parameter from 0.01 to 9
Theta = [ 0.01, 0.1, 0.3, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9 ]
else :
if len( thetas ) < 1 :
raise Exception( 'SMapNL() theta must have at least one value.' )
Theta = thetas
# Process pool
pool = Pool()
# Create iterable with args variants for theta
argsEmbeddingList = []
for theta in Theta :
newArgs = deepcopy( args )
newArgs.theta = theta
# Add the embedding, colNames, target in a tuple
argsEmbeddingList.append( ( newArgs, embedding,
colNames, target, 'theta' ) )
# Submit PredictFunc jobs to the process pool
results = pool.map( PredictFunc, argsEmbeddingList )
# Dict to hold theta : rho pairs from PredictFunc() tuple
theta_rho = OrderedDict()
for result in results :
if result == None:
continue
theta_rho[ result[ 0 ] ] = result[ 1 ]
# Console output
print( "{:<5} {:<10}".format('θ','ρ') )
for theta_, rho_ in theta_rho.items():
print( "{0:<5} {1:<10}".format( theta_, rho_ ) )
#----------------------------------------------------------
if showPlot:
fig, ax = plt.subplots( 1, 1, figsize = args.figureSize, dpi = 150 )
ax.plot( theta_rho.keys(), theta_rho.values(),
label = 'Predictions_t(+{0:d})'.format( args.Tp ),
color='blue', linewidth = 3 )
if args.plotTitle is not None :
title = args.plotTitle
else:
title = args.inputFile + ' Tp=' + str( args.Tp ) +\
' E=' + str( args.E )
ax.set( xlabel = 'S Map Localization θ',
ylabel = 'Prediction Skill' + r' $\rho$',
title = title )
plt.show()
if source == Source.Jupyter :
return theta_rho
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def Multiview( args, source = Source.Python ):
'''
Data input requires -c (columns) to specify timeseries columns
in inputFile (-i) that will be embedded by EmbedData(), and the
-r (target) specifying the data target column in inputFile.
args.E represents the number of variables to combine for each
assessment, as well as the number of time delays to create in
EmbedData() for each variable.
Prediction() with Simplex sets k_NN equal to E+1 if -k not specified.
--
Ye H., and G. Sugihara, 2016. Information leverage in interconnected
ecosystems: Overcoming the curse of dimensionality.
Science 353:922–925.
'''
if not len( args.columns ) :
raise RuntimeError( 'Multiview() requires -c to specify data.' )
if not args.target :
raise RuntimeError( 'Multiview() requires -r to specify target.' )
if args.E < 0 :
raise RuntimeError( 'Multiview() E is required.' )
# Save args.plot flag, and disable so Prediction() does not plot
showPlot = args.plot
args.plot = False
# Save args.outputFile and reset so Prediction() does not write
outputFile = args.outputFile
args.outputFile = None
# Embed data from inputFile
embedding, colNames, target = EmbedData( args )
# Combinations of possible embedding variables (columns), E at-a-time
# Column 0 is time. Coerce the iterable into a list of E-tuples
nVar = len( args.columns )
combos = list( combinations( range( 1, nVar * args.E + 1 ), args.E ) )
# Require that each embedding has at least one coordinate with
# observed data (zero time lag). This corresponds to combo tuples
# with modulo E == 1.
# Note: this only works if the data (unlagged) are in columns
# 1, 1 + E, 1 + 2E, ... which is consistent with EmbedData() output.
combo_i = []
for i in range( len( combos ) ) :
c = combos[i] # a tuple of combination indices
for x in c:
if x % args.E == 1:
combo_i.append( i )
break
combos = [ combos[i] for i in combo_i ]
if not args.multiview :
# Ye & Sugihara suggest sqrt( m ) as the number of embeddings to avg
args.multiview = max( 2, int( np.sqrt( len( combos ) ) ) )
print( 'Multiview() Set view sample size to ' + str( args.multiview ))
#---------------------------------------------------------------
# Evaluate variable combinations.
# Note that this is done within the library itself (in-sample).
# Save a copy of the specified prediction observations.
prediction = args.prediction
# Override the args.prediction for in-sample forecast skill evaluation
args.prediction = args.library
# Process pool to evaluate combos
pool = Pool()
# Iterable list of arguments for EvalLib()
argList = []
for combo in combos :
argList.append( ( args, combo, embedding, colNames, target ) )
# Submit EvalLib jobs to the process pool
results = pool.map( EvalLib, argList )
# Dict to hold combos : rho pairs from EvalLib() tuple
Combo_rho = {}
for result in results :
if result == None:
continue
Combo_rho[ result[ 0 ] ] = result[ 1 ]
#---------------------------------------------------------------
# Rank the in-sample forecasts, zip returns an iterator of 1-tuples
rho_sort, combo_sort = zip( *sorted( zip( Combo_rho.values(),
Combo_rho.keys() ),
reverse = True ) )
if args.verbose:
print( "Multiview() In sample sorted embeddings:" )
print( 'Columns ρ' )
for i in range( len( combo_sort ) ):
print(str( combo_sort[i] ) + " " + str( round( rho_sort[i],4)))
#---------------------------------------------------------------
# Perform predictions with the top args.multiview embeddings
# Reset the user specified prediction vector
args.prediction = prediction
argList.clear() # Iterable list of arguments for EvalPred()
# Take the top args.multiview combos
for combo in combo_sort[ 0: args.multiview ] :
argList.append( ( args, combo, embedding, colNames, target ) )
# Submit EvalPred jobs to the process pool
results = pool.map( EvalPred, argList )
Results = OrderedDict() # Dictionary of dictionaries results each combo
for result in results :
if result == None:
continue
Results[ result[ 0 ] ] = result[ 1 ]
# Console output
print( "Multiview() Prediction Embeddings:" )
print( "Columns Names ρ mae rmse" )
for key in Results.keys() :
result = Results[ key ]
print( str( key ) + " " + ' '.join( result[ 'names' ] ) +\
" " + str( round( result[ 'rho' ], 4 ) ) +\
" " + str( round( result[ 'mae' ], 4 ) ) +\
" " + str( round( result[ 'rmse' ], 4 ) ) )
#----------------------------------------------------------
# Compute Multiview averaged prediction
# The output item of Results dictionary is a matrix with three
# columns [ Time, Data, Prediction_t() ]
# Collect the Predictions into a single matrix
aresult = Results[ combo_sort[0] ]
nrows = nRow( aresult['output'] )
time = aresult['output'][:,0]
data = aresult['output'][:,1]
M = np.zeros( ( nrows, len( Results ) ) )
col_i = 0
for result in Results.values() :
output = result[ 'output' ]
M[ :, col_i ] = output[ :, 2 ] # Prediction is in col j=2
col_i = col_i + 1
prediction = np.mean( M, axis = 1 )
multiview_out = np.column_stack( ( time, data, prediction ) )
# Write output
header = 'Time,Data,Prediction_t(+{0:d})'.format( args.Tp )
if outputFile:
np.savetxt( args.path + outputFile, multiview_out, fmt = '%.4f',
delimiter = ',', header = header, comments = '' )
# Estimate correlation coefficient on observed : predicted data
rho, rmse, mae = ComputeError( data, prediction )
print( ("Multiview() ρ {0:5.3f} RMSE {1:5.3f} "
"MAE {2:5.3f}").format( rho, rmse, mae ) )
#----------------------------------------------------------
if showPlot:
Time = multiview_out[ :, 0 ] # Required to be first (j=0) column
if args.plotDate :
Time = num2date( Time )
fig, ax = plt.subplots( 1, 1, figsize = args.figureSize, dpi = 150 )
ax.plot( Time, multiview_out[ :, 1 ],
label = 'Observations',
color='blue', linewidth = 2 )
ax.plot( Time, multiview_out[ :, 2 ],
label = 'Predictions_t(+{0:d})'.format( args.Tp ),
color='red', linewidth = 2 )
if args.verbose : # Plot all projections
for col in range( nCol( M ) ) :
ax.plot( multiview_out[ :, 0 ], M[ :, col ],
label = combo_sort[col], linewidth = 2 )
if args.plotTitle is not None :
title = args.plotTitle
else:
title = "Multiview " + args.inputFile +\
' Tp=' + str( args.Tp ) +\
' E=' + str( args.E ) +\
r' $\rho$=' + str( round( rho, 2 ) )
ax.legend()
ax.set( xlabel = args.plotXLabel,
ylabel = args.plotYLabel,
title = title )
plt.show()
if source == Source.Jupyter :
return { 'header':header, 'multiview':multiview_out,
'rho':rho, 'RMSE':rmse, 'MAE':mae }
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def EvalLib( argsList ) :
'''
Function for multiprocessing of combo evaluation within library
argsList : ( args, combo, embedding, colNames, target )
'''
args = argsList[ 0 ]
combo = argsList[ 1 ]
embedding = argsList[ 2 ]
colNames = argsList[ 3 ]
target = argsList[ 4 ]
# Extract the variable combination
# Note that we prepend the time column (0,) as Prediction() requires
embed = np.take( embedding, (0,) + combo, axis = 1 )
# Evaluate prediction skill
rho, rmse, mae, header, output, smap_output = Prediction( embed,
colNames,
target, args )
return tuple( ( combo, round( rho, 5 ) ) )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def EvalPred( argsList ) :
'''
Function for multiprocessing of combo evaluation outside library
argsList : ( args, combo, embedding, colNames, target )
'''
args = argsList[ 0 ]
combo = argsList[ 1 ]
embedding = argsList[ 2 ]
colNames = argsList[ 3 ]
target = argsList[ 4 ]
# Extract the variable combination
# Note that we prepend the time column (0,) as Prediction() requires
embed = np.take( embedding, (0,) + combo, axis = 1 )
Names = [ colNames[i] for i in combo ]
# Evaluate prediction skill
rho, rmse, mae, header, output, smap_output = Prediction( embed,
colNames,
target, args )
Result = { 'names' : Names, 'rho' : rho,
'rmse' : rmse, 'mae' : mae,
'header': header, 'output' : output }
return tuple( ( combo, Result ) )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def CCM( args, source = Source.Python ):
'''
Compute simplex cross-map skill over (random) subsamples of a time series.
Data are a .csv file with multiple simultaneous observations (columns)
and "time" in the first column. The -r (target) column is used for
the cross prediction, -c (column) is embedded to dimension E.
CCM is performed simultaneously between both target and column with
the use of a process Pool.
Arguments:
-L (libsize) specifies a list of library sizes [start, stop, increment]
-s number of subsamples generated at each library size
-R random subsamples
-rp subsample with replacement
Simplex "Predictions" are made over the same data/embedding slices as
the library so that -l and -p parameters have no meaning.
'''
if not len( args.columns ) :
raise RuntimeError( "CCM() -c must specify the column to embed." )
if not args.target :
raise RuntimeError( "CCM() -r must specify the target column." )
if args.seed :
seed( int( args.seed ) )
args.method = 'simplex'
# Save args.plot flag, but disable so Prediction() does not plot
showPlot = args.plot
args.plot = False
# Process pool
pool = Pool()
# Create iterable with args for the two CrossMap() calls
argsList = []
# Cross mapping from -c (columns) to -r (target):
argsList.append( deepcopy( args ) )
# Switch the columns and target in args, assume only one column
target = args.target
columns = args.columns
args.target = columns[0]
args.columns = [target]
# Cross mapping from -r (target) to -c (columns) :
argsList.append( deepcopy( args ) )
# Switch back for plotting
args.target = target
args.columns = columns
# Submit the CrossMap jobs to the process pool
results = pool.map( CrossMap, argsList )
R0 = results[ 0 ] # tuple ( ID, PredictLibStats{} )
R1 = results[ 1 ] # tuple ( ID, PredictLibStats{} )
# Extract results based on the "columns to target" ID
if R0[ 0 ] in str( args.columns ) + " to " + args.target :
Col_Targ = R0[ 1 ]
Targ_Col = R1[ 1 ]
else :
Col_Targ = R1[ 1 ]
Targ_Col = R0[ 1 ]
start, stop, increment = args.libsize
print( "lib_size ρ " + args.columns[0] + " ρ " + args.target )
print( " " + args.target + " " + args.columns[0] )
for lib_size in range( start, stop + 1, increment ) :
col_targ_rho = Col_Targ[ lib_size ][0] # ρ in element [0]
targ_col_rho = Targ_Col[ lib_size ][0] # ρ in element [0]
print( "{:>8} {:>10} {:>10}".format( lib_size,
np.round( col_targ_rho, 2 ),
np.round( targ_col_rho, 2 ) ) )
if showPlot :
#-------------------------------------------------------
# Plot rho at each subsample
lib_sizes = np.arange( start, stop + 1, increment )
col_targ_rho = [ Col_Targ[ lib_size ][0] \
for lib_size in range( start, stop + 1, increment ) ]
targ_col_rho = [ Targ_Col[ lib_size ][0] \
for lib_size in range( start, stop + 1, increment ) ]
fig, ax = plt.subplots( 1, 1, figsize = args.figureSize, dpi = 150 )
ax.plot( lib_sizes, col_targ_rho,
linewidth = 3, color = 'blue',
label = args.columns[0] + " to " + args.target )
ax.plot( lib_sizes, targ_col_rho,
linewidth = 3, color = 'red',
label = args.target + " to " + args.columns[0] )
plt.axhline( y = 0, linewidth = 1 )
ax.legend()
if args.plotTitle is not None :
title = args.plotTitle
else:
title = args.inputFile + ' E=' + str( args.E )
ax.set( xlabel = 'Library Size',
ylabel = "Cross map correlation " + r' $\rho$',
title = title )
plt.show()
if source == Source.Jupyter :
return { 'lib_sizes':lib_sizes, 'column_target_rho':col_targ_rho,
'target_column_rho':targ_col_rho }
else:
return
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def CrossMap( args ) :
'''
Pool worker function called from CCM()
'''
# Generate embedding on the data to be cross mapped (-c column)
embedding, colNames, target = EmbedData( args )
# Use entire library and prediction from embedding matrix
libraryMatrix = predictionMatrix = embedding
N_row = nRow( libraryMatrix )
# Range of CCM library indices
start, stop, increment = args.libsize
if args.randomLib and not args.replacement :
# Sampling with replacement, validate lib_size
if stop > N_row :
raise RuntimeError( "CCM CrossMap() Random library without "+\
"replacement requires max lib_size " +\
str( stop ) + " less than or equal to " +\
"N_row " + str( N_row ) )
if args.randomLib :
# Random samples from library with replacement
maxSamples = args.subsample
else:
# Contiguous samples up to the size of the library
maxSamples = 1
# Simplex: if k_NN not specified, set k_NN to E + 1
if args.k_NN < 0 :
args.k_NN = args.E + 1
if args.verbose:
print( "CCM() Set k_NN to E + 1 = " + str( args.k_NN ) +\
" for SimplexProjection." )
#-----------------------------------------------------------------
print( "CCM(): Simplex cross mapping from " + str( args.columns ) +\
" to " + args.target + " E=" + str( args.E ) +\
" k_nn=" + str( args.k_NN ) +\
" Library range: [{}, {}, {}]".format( start, stop, increment ))
#-----------------------------------------------------------------
# Distance for all possible pred : lib E-dimensional vector pairs
# Distances is a Matrix of all row to to row distances
#-----------------------------------------------------------------
Distances = CCMGetDistances( libraryMatrix, args )
#----------------------------------------------------------
# Predictions
#----------------------------------------------------------
PredictLibStats = {} # { lib_size : ( rho, r, rmse, mae ) }
# Loop for library sizes
for lib_size in range( start, stop + 1, increment ) :
if args.Debug :
print( "CCM(): lib_size " + str( lib_size ) )
prediction_rho = np.zeros( ( maxSamples, 3 ) )
# Loop for subsamples
for n in range( maxSamples ) :
if args.randomLib :
if args.replacement :
# Uniform random sample of rows, with replacement
lib_i = randint( low = 0,
high = N_row,
size = lib_size )
else :
# Uniform random sample of rows, without replacement
lib_i = choice( a = N_row, # = np.arange(a)
size = lib_size,
replace = False )
else :
if lib_size >= N_row :
# library size exceeded, back down
lib_i = np.arange( 0, N_row )
if args.warnings or args.verbose :
print( "CCM(): max lib_size is {}, "
"lib_size has been limited.".format( N_row ) )
else :
# Contiguous blocks up to N_rows = maxSamples
if n + lib_size < N_row :
lib_i = np.arange( n, n + lib_size )
else:
# n + lib_size exceeds N_row, wrap around to data origin
lib_start = np.arange( n, N_row )
max_i = min( lib_size - (N_row - n), N_row )
lib_wrap = np.arange( 0, max_i )
lib_i = np.concatenate((lib_start,lib_wrap),axis=0)
#----------------------------------------------------------
# k_NN nearest neighbors : Local CCMGetNeighbors() function
#----------------------------------------------------------
neighbors, distances = CCMGetNeighbors( Distances, lib_i, args )
predictions, const_predict = \
SimplexProjection( libraryMatrix[ lib_i, : ],
target [ lib_i ],
target [ lib_i ], # const_target NA
neighbors,
distances,
args )
rho, rmse, mae = ComputeError( target[ lib_i ], predictions )
prediction_rho[ n, : ] = [ rho, rmse, mae ]
rho_ = np.mean( prediction_rho[ :, 0 ] )
rmse_ = np.mean( prediction_rho[ :, 1 ] )
mae_ = np.mean( prediction_rho[ :, 2 ] )
PredictLibStats[ lib_size ] = ( rho_, rmse_, mae_ )
# Return tuple with ( ID, PredictLibStats{} )
return ( str( args.columns ) + " to " + args.target, PredictLibStats )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def CCMGetDistances( libraryMatrix, args ) :
'''
Note that for CCM the libraryMatrix and predictionMatrix are the same.
Return Distances: a square matrix with distances.
Matrix elements D[i,j] hold the distance between the E-dimensional
phase space point (vector) between rows (observations) i and j.
'''
N_row = nRow( libraryMatrix )
D = np.full( (N_row, N_row), 1E30 ) # Distance matrix init to 1E30
E = args.E + 1
for row in range( N_row ) :
# Get E-dimensional vector from this library row
# Exclude the 1st column (j=0) of times
y = libraryMatrix[ row, 1:E: ]
for col in range( N_row - 1 ) :
# Avoid redundant calculations
if row >= col :
continue
# Find distance between vector (y) and other library vector
# Exclude the 1st column (j=0) of Time
D[ row, col ] = Distance( libraryMatrix[ col, 1:E: ], y )
# Insert degenerate values since D[i,j] = D[j,i]
D[ col, row ] = D[ row, col ]
return ( D )
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
def CCMGetNeighbors( Distances, lib_i, args ) :
'''
Return a tuple of ( neighbors, distances ). neighbors is a matrix of
row indices in the library matrix. Each neighbors row represents one
prediction vector. Columns are the indices of k_NN nearest neighbors
for the prediction vector (phase-space point) in the library matrix.
distances is a matrix with the same shape as neighbors holding the
corresponding distance values in each row.
Note that the indices in neighbors are not the original indices in
the libraryMatrix rows (observations), but are with respect to the
distances subset defined by the list of rows lib_i, and so have values
from 0 to len(lib_i)-1.
'''
N_row = len( lib_i ) # Subset of libraryMatrix and Distances
col_i = np.arange( len( lib_i ) )#Vector of col indices [0,...len(lib_i)-1]