-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathfinetune_classification.py
More file actions
1464 lines (1205 loc) · 68.2 KB
/
finetune_classification.py
File metadata and controls
1464 lines (1205 loc) · 68.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import argparse
import datetime
from pyexpat import model
import numpy as np
import time
import torch.backends.cudnn as cudnn
import json
import os
import pandas as pd
from pathlib import Path
import torch.distributed as dist
from sklearn.tests.test_multiclass import n_classes
from tqdm import tqdm
from collections import OrderedDict
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from utils import FocalLoss, DynamicFocalLoss,GHMC,MarginLoss, BinaryFocalLoss
from timm.utils import ModelEma
from utils import create_optimizer, get_parameter_groups, LayerDecayValueAssigner
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
from scipy import interpolate
import task_model
import math
import sys
from typing import Iterable, Optional
import torch
import torch.nn.functional as F
from timm.utils import ModelEma
import utils
from einops import rearrange
import warnings
warnings.filterwarnings("ignore")
def get_args():
parser = argparse.ArgumentParser('Fine-tuning and evaluation script for EEG classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--distributed', default=True, type= bool, help='Enabling distributed')
# robust evaluation
parser.add_argument('--robust_test', default=None, type=str,
help='robust evaluation dataset')
# Model parameters
parser.add_argument('--model', default='base_patch200_200', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--qkv_bias', action='store_true')
parser.add_argument('--disable_qkv_bias', action='store_false', dest='qkv_bias')
parser.set_defaults(qkv_bias=True)
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--input_size', default=200, type=int,
help='EEG input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--focalloss', action='store_true', default=False)
parser.add_argument('--focal_alpha', type=str, default='1.0 1.0 1.0', help='Focal Loss alpha')
parser.add_argument('--focal_gamma', type=float, default=2, help='Focal Loss gamma')
parser.add_argument('--marginloss', action='store_true', default=False)
parser.add_argument('--margin', type=float, default=1, help='margin in margin loss')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--model_filter_name', default='gzp', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling')
parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False)
# Dataset parameters
parser.add_argument('--nb_classes', default=0, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--task_model', default='',
help='resume the fine-tuned task model from checkpoint')
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--train_eeg_montage', default='average',
help='average (19) | bipolar (18) | combine (19 average + 18 bipolar)')
parser.add_argument('--train_eeg_length', default=False, type=int,
help='15 | 10 | 5 Training samples length, use it for training IIIC (15s, 10s, 5s), but not for wave like spike. Training sample for IIIC is prepared with 15 seconds, and can be cut for assigned train_eeg_length; Spike samples should be prepared with fixed length like 1 seconds')
parser.add_argument('--training_data_dir', default='',help='training data dir')
parser.add_argument('--predict', action='store_true',
help='Perform evaluation only')
parser.add_argument('--eval_list_file', default='', type=str,
help='a file to list test or evaluation dataset')
parser.add_argument('--eval_list_column', default='',
help='file name column in test or evaluation dataset')
parser.add_argument('--eval_results_dir', default='',
help='path where to save prediction results')
parser.add_argument('--eeg_montage', default='average',
help='average (19) | bipolar (18) | combine (19 average + 18 bipolar) | single')
parser.add_argument('--prediction_slipping_step', default=0, type=int,
help='slipping step in continuous prediction, if original hz>200 prediction_slipping_step better to be x/100 or xx/128')
parser.add_argument('--prediction_slipping_step_second', default=0, type=int,
help='slipping step in second in continuous prediction')
parser.add_argument('--data_format', default='mat', type=str,
help='in continuous test, original data format mat | edf | pkl')
parser.add_argument('--sampling_rate', default=0, type=int,
help='give sampling rate if mat/edf data files do not have this information')
parser.add_argument('--already_format_channel_order', default='no',
choices=['yes','y', 'no','n'],type=str,
help='do not have channel info in data file, but the data if ordered by FP1, F3, C3, P3, F7, T3, T5, O1, FZ, CZ, PZ, FP2, F4, C4, P4, F8, T4, T6, O2')
parser.add_argument('--already_average_montage', default='no',
choices=['yes','y', 'no','n'],type=str,
help='the original data already have average montage')
parser.add_argument( '--allow_missing_channels', type=str,
choices=['yes','y', 'no','n'], default='no',
help='Allow missing channels (yes/no)')
parser.add_argument('--polarity', default=1, type=int,
help='Set 1 | -1, default 1. If set -1, the signal is inverted')
parser.add_argument('--smooth_result', default='', type=str,
help='smooth the continuous results by EMA')
parser.add_argument('--need_spikes_10s_result', default='no', type=str,
choices=['yes', 'y', 'no', 'n'],
help='not only output 1-second prediction, but also output 10-second')
parser.add_argument('--spikes_10s_result_slipping_step_second', default=0, type=int, help='slipping step (in second) in continuous results based on 10s spikes segments. Could set it as 10')
parser.add_argument('--test_data_format', default='pkl', type=str,
help='in test, original data format mat | pkl')
parser.add_argument('--max_length_hour', default='no', type=str,
help='Only analyze the first n hours of data.')
parser.add_argument('--rewrite_results', default='no', type=str,
choices=['yes', 'y', 'no', 'n'],
help='rewrite reults')
parser.add_argument('--leave_one_hemisphere_out', default='no', type=str,
choices=['no', 'right', 'left', 'middle'],
help='set right or left or middle hemisphere 0')
parser.add_argument('--channel_symmetric_flip', default='no', type=str,
choices=['no', 'right', 'left'],
help='symmetrically map the right(left) hemisphere data to the left(right) hemisphere.')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed', action='store_true', default=False)
parser.add_argument('--dataset', default='TUAB', type=str,
help='dataset: IIIC | SPIKES | NORMAL | SLOWING | BS | SLEEP')
parser.add_argument('--eval_sub_dir', type=str,
help='dataset: subject directory')
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
print("Use deepspeed==0.4.0'")
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed==0.4.0'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init
def train_class_batch(model, samples, target, criterion, ch_names):
outputs = model(samples, ch_names)
loss = criterion(outputs, target)
return loss, outputs
def get_loss_scale_for_deepspeed(model):
optimizer = model.optimizer
return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale
def train_one_epoch(args,model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, log_writer=None,
start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None, ch_names=None, is_binary=True):
input_chans = None
if ch_names is not None:
input_chans = utils.get_input_chans(ch_names)
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if loss_scaler is None:
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group.get("lr_scale", 1.0)
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.float().to(device, non_blocking=True) / 100
samples = rearrange(samples, 'B N (A T) -> B N A T', T=200)
targets = targets.to(device, non_blocking=True)
if is_binary:
targets = targets.float().unsqueeze(-1)
if loss_scaler is None:
samples = samples.half()
loss, output = train_class_batch(
model, samples, targets, criterion, input_chans)
else:
with torch.autocast(args.device):
loss, output = train_class_batch(
model, samples, targets, criterion, input_chans)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
#sys.exit(1)
if loss_scaler is None:
loss /= update_freq
model.backward(loss)
model.step()
if (data_iter_step + 1) % update_freq == 0:
# model.zero_grad()
# Deepspeed will call step() & model.zero_grad() automatic
if model_ema is not None:
model_ema.update(model)
grad_norm = None
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
if is_binary:
# hard label (target is 0 or 1)
# class_acc = utils.get_metrics(torch.sigmoid(output).detach().cpu().numpy(), targets.detach().cpu().numpy(), ["accuracy"], is_binary)["accuracy"]
# soft label (target is 0-1), change it to hard label with a threshold 0.5
class_acc = utils.get_metrics(torch.sigmoid(output).detach().cpu().numpy(), (targets >= 0.5).int().detach().cpu().numpy(), ["accuracy"], is_binary)["accuracy"]
else:
class_acc = (output.max(-1)[-1] == targets.squeeze()).float().mean()
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def predict(args, data_loader, model, device, header='Prediction:', ch_names=None, is_binary=True):
input_chans = None
if ch_names is not None:
input_chans = utils.get_input_chans(ch_names)
# metric_logger = utils.MetricLogger(delimiter=" ")
# header = 'Test:'
# switch to evaluation mode
model.eval()
pred = []
for step, batch in enumerate(data_loader):
EEG = batch[0]
EEG = EEG.float().to(device, non_blocking=True)/ 100
EEG = rearrange(EEG, 'B N (A T) -> B N A T', T=200)
# compute output
with torch.autocast(args.device):
output = model(EEG, input_chans=input_chans)
if is_binary:
output = torch.sigmoid(output).cpu()
else:
output = output.cpu()
if args.device == 'cpu':
output = output.to(torch.float32)
pred.append(output)
pred = torch.cat(pred, dim=0).numpy()
return pred
@torch.no_grad()
def evaluate(args, data_loader, model, device, header='Test:', ch_names=None, metrics=['acc'], is_binary=False):
input_chans = None
if ch_names is not None:
input_chans = utils.get_input_chans(ch_names)
if is_binary:
criterion = torch.nn.BCEWithLogitsLoss()
else:
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
# header = 'Test:'
# switch to evaluation mode
model.eval()
pred = []
true = []
for step, batch in enumerate(metric_logger.log_every(data_loader, 10, header)):
EEG = batch[0]
target = batch[-1]
EEG = EEG.float().to(device, non_blocking=True)
EEG = rearrange(EEG, 'B N (A T) -> B N A T', T=200)
target = target.to(device, non_blocking=True)
if is_binary:
target = target.float().unsqueeze(-1)
# compute output
with torch.autocast(args.device):
output = model(EEG, input_chans=input_chans)
# print(output)
# print(target)
loss = criterion(output, target)
if is_binary:
output = torch.sigmoid(output).cpu()
# target = target.cpu() # hard label
target=(target >= 0.5).int().detach().cpu() # soft label (target is 0-1), change it to hard label with a threshold 0.5
else:
output = output.cpu()
target = target.cpu()
results = utils.get_metrics(output.numpy(), target.numpy(), metrics, is_binary)
pred.append(output)
true.append(target)
batch_size = EEG.shape[0]
metric_logger.update(loss=loss.item())
for key, value in results.items():
metric_logger.meters[key].update(value, n=batch_size)
# metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* loss {losses.global_avg:.3f}'.format(losses=metric_logger.loss))
pred = torch.cat(pred, dim=0).numpy()
true = torch.cat(true, dim=0).numpy()
ret = utils.get_metrics(pred, true, metrics, is_binary, 0.5)
ret['loss'] = metric_logger.loss.global_avg
return ret,pred,true
def get_models(args):
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
use_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
qkv_bias=args.qkv_bias,
)
return model
def get_dataset(args, evaluation=False, sub_dir=False):
Bipolar = False
addBipolar = False
sample_length=False
if args.dataset == 'TUAB' or args.dataset == 'TUEP':
root=args.training_data_dir
ch_names = ['EEG FP1', 'EEG FP2-REF', 'EEG F3-REF', 'EEG F4-REF', 'EEG C3-REF', 'EEG C4-REF', 'EEG P3-REF', 'EEG P4-REF', 'EEG O1-REF', 'EEG O2-REF', 'EEG F7-REF', \
'EEG F8-REF', 'EEG T3-REF', 'EEG T4-REF', 'EEG T5-REF', 'EEG T6-REF', 'EEG A1-REF', 'EEG A2-REF',
'EEG FZ-REF', 'EEG CZ-REF', 'EEG PZ-REF', 'EEG T1-REF', 'EEG T2-REF']
ch_names = [name.split(' ')[-1].split('-')[0] for name in ch_names]
args.nb_classes = 1
metrics = ["pr_auc", "roc_auc", "accuracy", "balanced_accuracy"]
elif args.dataset == 'TUEV':
root=args.training_data_dir
ch_names = ['EEG FP1-REF', 'EEG FP2-REF', 'EEG F3-REF', 'EEG F4-REF', 'EEG C3-REF', 'EEG C4-REF', 'EEG P3-REF',
'EEG P4-REF', 'EEG O1-REF', 'EEG O2-REF', 'EEG F7-REF', \
'EEG F8-REF', 'EEG T3-REF', 'EEG T4-REF', 'EEG T5-REF', 'EEG T6-REF', 'EEG A1-REF', 'EEG A2-REF',
'EEG FZ-REF', 'EEG CZ-REF', 'EEG PZ-REF', 'EEG T1-REF', 'EEG T2-REF']
ch_names = [name.split(' ')[-1].split('-')[0] for name in ch_names]
args.nb_classes = 6
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
elif args.dataset=='IIIC':
if args.train_eeg_montage=="combine":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8', 'T4', 'T6', 'O2', 'FP1-F7', 'F7-T3', 'T3-T5', 'T5-O1', 'FP2-F8', 'F8-T4', 'T4-T6', 'T6-O2', 'FP1-F3', 'F3-C3', 'C3-P3', 'P3-O1', 'FP2-F4', 'F4-C4', 'C4-P4', 'P4-O2', 'FZ-CZ', 'CZ-PZ']
addBipolar = True
elif args.train_eeg_montage == "bipolar":
ch_names = ['FP1-F7', 'F7-T3', 'T3-T5', 'T5-O1', 'FP2-F8', 'F8-T4', 'T4-T6', 'T6-O2', 'FP1-F3', 'F3-C3', 'C3-P3', 'P3-O1', 'FP2-F4', 'F4-C4', 'C4-P4', 'P4-O2', 'FZ-CZ', 'CZ-PZ']
Bipolar = True
else:
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8', 'T4', 'T6', 'O2']
root = args.training_data_dir
args.nb_classes = 6
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
sample_length = args.train_eeg_length
elif args.dataset == "SPIKES":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8', 'T4', 'T6', 'O2']
args.nb_classes = 1
metrics = ["pr_auc", "roc_auc", "accuracy", "balanced_accuracy"]
root=args.training_data_dir
elif args.dataset == "FOC_GEN_SPIKES":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8','T4', 'T6', 'O2']
root = args.training_data_dir
args.nb_classes = 3
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
elif args.dataset == "FOC_SPIKES":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8', 'T4', 'T6', 'O2']
args.nb_classes = 1
metrics = ["pr_auc", "roc_auc", "accuracy", "balanced_accuracy"]
root=args.training_data_dir
elif args.dataset == "GEN_SPIKES":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8', 'T4', 'T6', 'O2']
args.nb_classes = 1
metrics = ["pr_auc", "roc_auc", "accuracy", "balanced_accuracy"]
root = args.training_data_dir
elif args.dataset== "SLOWING":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8','T4', 'T6', 'O2']
root = args.training_data_dir
args.nb_classes = 3
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
elif args.dataset == "NORMAL":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8','T4', 'T6', 'O2']
root = args.training_data_dir
args.nb_classes = 1
metrics = ["pr_auc", "roc_auc", "accuracy", "balanced_accuracy"]
elif args.dataset=='BS':
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8',
'T4', 'T6', 'O2']
args.nb_classes = 1
metrics = ["pr_auc", "roc_auc", "accuracy", "balanced_accuracy"]
root = args.training_data_dir
elif args.dataset == "MGBSLEEP3stages":
ch_names = ['FP1', 'F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'FP2', 'F4', 'C4', 'P4', 'F8','T4', 'T6', 'O2']
root = args.training_data_dir
args.nb_classes = 3
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
elif args.dataset == "SLEEPPSG":
ch_names = ['F3', 'C3', 'O1', 'F4', 'C4', 'O2']
root = args.training_data_dir
args.nb_classes = 5
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
elif args.dataset == "SLEEPPENN":
ch_names = ['C3','C4']
root = args.training_data_dir
args.nb_classes = 5
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
elif args.dataset == "SLEEPMASS":
ch_names = ['F3', 'C3', 'P3', 'F7', 'T3', 'T5', 'O1', 'FZ', 'CZ', 'PZ', 'F4', 'C4', 'P4', 'F8', 'T4', 'T6', 'O2']
root = args.training_data_dir
args.nb_classes = 5
metrics = ["accuracy", "balanced_accuracy", "cohen_kappa", "f1_weighted"]
else:
print ('The dataset in args format is wrong')
return
print(f"Data from {root}")
if evaluation:
result_file, evaluation_dataset = utils.prepare_classification_dataset(root, only_evaluate=True, original_format=args.test_data_format, sub_dir=sub_dir,Bipolar=Bipolar,addBipolar=addBipolar,sample_length=sample_length)
if result_file==False:
return False, False, False,False
return result_file, evaluation_dataset, ch_names, metrics
else:
train_dataset, test_dataset, val_dataset = utils.prepare_classification_dataset(root,Bipolar=Bipolar,addBipolar=addBipolar,sample_length=sample_length)
return train_dataset, test_dataset, val_dataset, ch_names, metrics
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
# print(args)
device = torch.device(args.device)
# predict continuous (continuous test)------------------------------------------------------------------------------------
if args.predict:
if args.prediction_slipping_step== 0 and args.prediction_slipping_step_second==0:
print('--prediction_slipping_step or --prediction_slipping_step_second is required.')
exit(0)
elif args.prediction_slipping_step != 0:
used_slipping_step = args.prediction_slipping_step
step_in_point = True
if args.prediction_slipping_step_second !=0:
print('Get both --prediction_slipping_step and --prediction_slipping_step_second, use the first parameter.')
else:
used_slipping_step = args.prediction_slipping_step_second
step_in_point = False
if str(args.need_spikes_10s_result).lower()=='y' or str(args.need_spikes_10s_result).lower()=='yes':
args.need_spikes_10s_result=True
if args.spikes_10s_result_slipping_step_second==0:
print('Set --need_spikes_10s_result yes, but not set --spikes_10s_result_slipping_step_second or default 10')
args.spikes_10s_result_slipping_step_second=10
else:
args.need_spikes_10s_result=False
if str(args.already_format_channel_order).lower() == 'yes' or str(args.already_format_channel_order).lower() == 'y':
args.already_format_channel_order = True
else:
args.already_format_channel_order = False
if str(args.already_average_montage).lower() == 'yes' or str(args.already_average_montage).lower() == 'y':
args.already_average_montage = True
else:
args.already_average_montage = False
if str(args.allow_missing_channels).lower() == 'yes' or str(args.allow_missing_channels).lower() == 'y':
args.allow_missing_channels = True
else:
args.allow_missing_channels = False
if str(args.leave_one_hemisphere_out).lower() == 'n' or str(args.leave_one_hemisphere_out).lower() == 'no':
args.leave_one_hemisphere_out=False
else:
args.leave_one_hemisphere_out = str(args.leave_one_hemisphere_out).lower()
if str(args.channel_symmetric_flip).lower() == 'n' or str(args.channel_symmetric_flip).lower() == 'no':
args.channel_symmetric_flip=False
else:
args.channel_symmetric_flip = str(args.channel_symmetric_flip).lower()
if str(args.max_length_hour).lower() == 'n' or str(args.max_length_hour).lower() == 'no':
args.max_length_hour = None
else:
args.max_length_hour = int(args.max_length_hour)
args.nb_classes = utils.get_n_classes(args.dataset)
model = get_models(args)
patch_size = model.patch_size # patch_size 200 (1s)
#print("Patch size = %s" % str(patch_size))
args.window_size = (1, args.input_size // patch_size)
args.patch_size = patch_size
if args.device == 'cpu':
# 将模型强制转换为 float32
model = model.to(torch.float32)
else:
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
#print("Model = %s" % str(model_without_ddp))
#print('number of params:', n_parameters)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module # Extract the original model before encapsulation by ddp
utils.load_from_task_model(
args=args, model_without_ddp=model_without_ddp)
os.makedirs(args.eval_results_dir, exist_ok=True)
os.chmod(args.eval_results_dir, 0o777)
if args.eval_list_file:
if args.eval_list_file.endswith("xlsx"):
eval_list = pd.read_excel(args.eval_list_file).astype(str)
elif args.eval_list_file.endswith("csv"):
eval_list = pd.read_csv(args.eval_list_file).astype(str)
else:
print("eval_list_file should be in xlsx or cvs format")
exit(0)
file_list=eval_list[args.eval_list_column].to_list()
file_list = [f"{file.split('.')[0]}.{args.data_format}" for file in file_list]
current_files = set(os.listdir(args.eval_sub_dir))
file_list = [file.split('.')[0] for file in file_list if file in current_files]
else:
# If all the data are stored in one directory
# file_list=[file.split('.')[0] for file in os.listdir(args.eval_sub_dir) if file.endswith(args.data_format)]
# If the data are organized in different folders under a root path, use recursive search to find them.
file_list = utils.recursive_files(root_dir=args.eval_sub_dir, file_type=args.data_format)
file_list = [file.split('.')[0] for file in file_list]
# If the files are numbered, sort them based on the numerical values in their names.
file_list = sorted(file_list, key=utils.extract_number)
print(f'\n ')
print(f'[*] For {args.dataset} task')
print(f'Input {len(file_list)} EEG files...')
for eval_file in tqdm(file_list,desc=f"{args.dataset} event level results"):
####### Whether overwrite former results #######
if args.rewrite_results in ['no' , 'n' , 'No' , 'N' , 'NO']:
result_file_path = os.path.join(args.eval_results_dir, f'{eval_file}.csv')
if os.path.exists(result_file_path):
print(f'{result_file_path} already exists, skip')
continue
####### Whether overwrite former results#######
# If all the data are stored in one directory
# data_path = os.path.join(args.eval_sub_dir, f'{eval_file}.{args.data_format}')
# if not os.path.exists(data_path):
# print(f"{data_path} data not found")
# continue
# If the files are numbered, sort them based on the numerical values in their names.
data_path=utils.find_file_path(root_dir=args.eval_sub_dir, target_filename=f'{eval_file}.{args.data_format}')
if data_path==None:
print(f"{data_path} data not found")
continue
data_transform=utils.Compose([utils.Clipping(),utils.Scaling()])
if args.eeg_montage == "single":
df = pd.DataFrame()
for i in range(19):
montage = utils.single_channel_average_montage(i)
ch_names=montage.get_channel_names()
try:
EEG_data = utils.ContinuousToSnippetDataset(
type=args.dataset,
path_signal=data_path,
given_fs=args.sampling_rate,
has_formatted_channel=args.already_format_channel_order,
has_avg=args.already_average_montage,
allow_missing_channels=args.allow_missing_channels,
montage=montage,
transform=data_transform,
step=used_slipping_step,
step_in_point=step_in_point,
polarity=int(args.polarity),
max_length_hour=args.max_length_hour)
except ValueError as e:
print(f"DataError: {e}")
continue
except Exception as e:
print(f"Unknown error: {e}")
continue
sampler_evaluate = torch.utils.data.SequentialSampler(EEG_data)
data_loader_evaluate = torch.utils.data.DataLoader(
EEG_data, sampler=sampler_evaluate,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False)
pred = predict(args, data_loader_evaluate, model, device, header='Prediction:',
ch_names=ch_names,
is_binary=(args.nb_classes == 1))
df[f'pred_{ch_names[0]}']= pred.flatten()
result_file_path = os.path.join(args.eval_results_dir, f'{eval_file}.csv')
df.to_csv(result_file_path, index=False)
os.chmod(result_file_path, 0o777)
continue
elif args.eeg_montage=="combine":
montage=utils.combine_montage()
ch_names = montage.get_channel_names()
elif args.eeg_montage=="bipolar":
montage = utils.bipolar_montage()
ch_names = montage.get_channel_names()
elif args.eeg_montage=="average":
if args.dataset=='SLEEPPSG':
montage = utils.sleep_common_average_montage()
ch_names = montage.get_channel_names()
else:
montage = utils.common_average_montage()
ch_names=montage.get_channel_names()
else:
print(f"The input parameter eeg_montage is set incorrectly")
continue
try:
EEG_data = utils.ContinuousToSnippetDataset(
type=args.dataset,
path_signal=data_path,
given_fs=args.sampling_rate,
has_formatted_channel=args.already_format_channel_order,
has_avg=args.already_average_montage,
allow_missing_channels=args.allow_missing_channels,
montage=montage,
transform=data_transform,
step=used_slipping_step,
step_in_point=step_in_point,
polarity=int(args.polarity),
leave_one_hemisphere_out=args.leave_one_hemisphere_out,
channel_symmetric_flip=args.channel_symmetric_flip,
max_length_hour=args.max_length_hour
)
except ValueError as e:
print(f"DataError: {e}")
continue
except utils.SnippetsError as e:
# print(f"{eval_file}: {e}, all results will be classified as others.")
result_segment_shapes=e.result_segment_shapes
if args.nb_classes==1:
new_result_vector = np.zeros(result_segment_shapes)
df = pd.DataFrame({'pred': new_result_vector})
else:
new_shape = (result_segment_shapes, args.nb_classes)
new_result_matrix = np.zeros(new_shape)
new_result_matrix[..., 0] = 1 # the first class others is 1
df = pd.DataFrame(new_result_matrix, columns=[f'class_{i}_prob' for i in range(args.nb_classes)])
df['pred_class'] = df.idxmax(axis=1)
df['pred_class'] = df['pred_class'].map(lambda x: int(x.split('_')[1]))
file_path = os.path.join(args.eval_results_dir, f'{eval_file}.csv')
df.to_csv(file_path, index=False)
os.chmod(file_path, 0o777)
continue
except Exception as e:
print(f"Unknown error: {e}")
continue
sampler_evaluate = torch.utils.data.SequentialSampler(EEG_data)
data_loader_evaluate = torch.utils.data.DataLoader(
EEG_data, sampler=sampler_evaluate,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False)
pred = predict(args, data_loader_evaluate, model, device, header='Prediction:',
ch_names=ch_names,
is_binary=(args.nb_classes == 1))
# padding results, valid_index should have the same shape as the output
valid_index, valid_start_end_indices, result_segment_shapes = EEG_data.get_valid_indices()
if args.nb_classes==1:
prob=pred.flatten()
new_result_vector = np.zeros(len(valid_index), dtype=prob.dtype)
prob_segments = utils.split_nd_to_plus1d(arr=prob, segment_shape=result_segment_shapes)
for valid_start_end_index, prob_segment in zip(valid_start_end_indices, prob_segments):
valid_start_index,valid_end_index=valid_start_end_index
valid_result_segment_shape=valid_end_index-valid_start_index+1
#Interpolate and extend the result segments to match the length of the returned output
new_prob_segment=utils.resize_array_along_axis0(arr=prob_segment, d=1,target_length=valid_result_segment_shape)
new_result_vector[valid_start_index:valid_end_index+1]=new_prob_segment
if args.dataset == 'SPIKES':
original_fs = EEG_data.get_original_fs()
if args.need_spikes_10s_result:
if (step_in_point==True and used_slipping_step/original_fs>1 or (step_in_point==False and used_slipping_step!=1)):
print('could not output 10s continuous results for spikes, the step in 1s results should <= 1s')
else:
results_10s=utils.spikes_results_from_1s_to_10s(
predictions=new_result_vector,
data_fs=original_fs,
original_result_step= used_slipping_step, original_step_in_point=step_in_point,new_result_step_second=args.spikes_10s_result_slipping_step_second)
df_10s = pd.DataFrame({'pred': results_10s})
file_path_10s = os.path.join(args.eval_results_dir, f'{eval_file}_10s.csv')
df_10s.to_csv(file_path_10s, index=False)
os.chmod(file_path_10s, 0o777)
if step_in_point:
smooth_method = args.smooth_result
new_result_vector = utils.continuous_binary_probabilities(predictions=new_result_vector, data_fs=original_fs,result_step=args.prediction_slipping_step, smooth_method=smooth_method) # smooth_method = 'ema' or 'window_ema' or ''
if args.allow_missing_channels == True:
new_result_vector = utils.continuous_binary_probabilities2(
predictions=new_result_vector,
data_fs=original_fs,
result_step=args.prediction_slipping_step)
df = pd.DataFrame({'pred': new_result_vector})
else:
prob = np.exp(pred) / np.sum(np.exp(pred), axis=1, keepdims=True)
prob = np.array(prob)
#Due to resampling, result_count and valid_count may not be equal.
new_shape = (len(valid_index), prob.shape[1])
new_result_matrix = np.zeros(new_shape, dtype=prob.dtype)
new_result_matrix[..., 0] = 1 #classify as others
# Slice the result array
prob_segments=utils.split_nd_to_plus1d(arr=prob, segment_shape=result_segment_shapes)
for valid_start_end_index, prob_segment in zip(valid_start_end_indices, prob_segments):
valid_start_index,valid_end_index=valid_start_end_index
valid_result_segment_shape=valid_end_index-valid_start_index+1
# Interpolate and expand result segments to match the return length
new_prob_segment=utils.resize_array_along_axis0(arr=prob_segment, d=2, target_length=valid_result_segment_shape)
new_result_matrix[valid_start_index:valid_end_index+1,:]=new_prob_segment