-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathcontinurous.py
More file actions
65 lines (52 loc) · 2.43 KB
/
continurous.py
File metadata and controls
65 lines (52 loc) · 2.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import os
import pickle
from torchvision import transforms
import pytorch_lightning as pl
import torch
from tqdm import tqdm
# load own code
import sys
sys.path.append('../')
from sleeplib.Resnet_15.model import ResNet
from sleeplib.datasets import BonoboDataset, ContinousToSnippetDataset
# this holds all the configuration parameters
from sleeplib.config import Config
import pickle
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import DataLoader
from torchvision import transforms
from sleeplib.datasets import BonoboDataset , ContinousToSnippetDataset
from sleeplib.montages import CDAC_bipolar_montage,CDAC_common_average_montage,CDAC_combine_montage,con_combine_montage, con_ECG_combine_montage
from sleeplib.transforms import cut_and_jitter, channel_flip,extremes_remover
# load config and show all default parameters
config = Config()
path_model = 'your_path/Models/spikenet2/'
# set up dataloader to predict all samples in test dataset
transform_train = transforms.Compose([extremes_remover(signal_max = 2000, signal_min = 20)])
con_combine_montage = con_ECG_combine_montage()
# load pretrained model
model = ResNet.load_from_checkpoint('your_path/Models/spikenet2/hardmine.ckpt',
lr=config.LR,
n_channels=37,
)
#map_location=torch.device('cpu') add this if running on CPU machine
# init trainer
trainer = pl.Trainer(fast_dev_run=False,enable_progress_bar=False,devices = 1,strategy ='ddp')
# store results
path_controls = os.path.join("your_path/Models/spikenet2/controlset.csv")
controls = pd.read_csv(path_controls)
i = 0
#controls = controls[controls['Mode']=='Test']
for eeg_file in tqdm(controls.EEG_index):
path = 'your_path/continuousEEG/'+eeg_file+'.mat'
Bonobo_con = ContinousToSnippetDataset(path,montage=con_combine_montage,transform=transform_train,window_size=config.WINDOWSIZE)
con_dataloader = DataLoader(Bonobo_con, batch_size=128,shuffle=False,num_workers=os.cpu_count())
preds = trainer.predict(model,con_dataloader)
#preds = [np.squeeze(p) for p in preds] # Ensure each part is 1D
preds = np.concatenate(preds)
preds = preds.astype(float)
preds = pd.DataFrame(preds)
preds.to_csv(path_model+'/hard_mine/'+ eeg_file +'.csv',index=False)