-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscript_running_classification_v5_working.py
More file actions
188 lines (139 loc) · 5.67 KB
/
script_running_classification_v5_working.py
File metadata and controls
188 lines (139 loc) · 5.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 21 15:31:23 2022
@author: patrickmayerhofer
script_running_classification_v5
loads the tfrecords, and uses them to do optimize a neural network
Based on:
https://www.kaggle.com/code/danmaccagnola/activity-recognition-data-w-tfrecords/notebook
and for model this could help. consider using a cnn-lstm model
https://machinelearningmastery.com/how-to-develop-rnn-models-for-human-activity-recognition-time-series-classification/
"""
import keras
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
import tensorflow as tf
import numpy as np
from keras import layers
from keras.models import Sequential
from keras.layers import LSTM
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import confusion_matrix, accuracy_score, precision_recall_curve, PrecisionRecallDisplay
from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D,LSTM,Activation
"""some variables"""
n_timesteps = 30 # predefined, when we saved tfrecords in "create_TFRecords3"
n_features = 12 # same here. 3 accelerations, 3 angular velocities, 2 feet
examples_per_file = 128 # same here, these are the nr of files in one tfrecord
epochs = 50
"""import and shuffle"""
dir_root = '/Volumes/GoogleDrive/My Drive/Running Plantiga Project/Data/Prepared/'
dir_tfr = dir_root + "tfrecords/"
filenames = tf.io.gfile.glob(f"{dir_tfr}*.tfrec")
shuffled_filenames = filenames.copy()
np.random.shuffle(shuffled_filenames)
print(f"Total: {len(shuffled_filenames)}")
print("---------")
"""create training and validation sizes and filenames"""
train_val_size = 0.8 # 80% for training, 20% for testing
train_size = 0.8 # of those 80%, 80% will be for training, 20% will be for validation
train_val_len = int(len(shuffled_filenames)*train_val_size)
train_val_filenames, test_filenames = shuffled_filenames[:train_val_len], shuffled_filenames[train_val_len:]
train_len = int(len(train_val_filenames)*train_size)
train_filenames, val_filenames = train_val_filenames[:train_len], train_val_filenames[train_len:]
print(f"Train: {len(train_filenames)}")
print(f"Validation: {len(val_filenames)}")
print(f"Test: {len(test_filenames)}")
"""
for batch in tf.data.TFRecordDataset(filenames):
print(batch)
break
"""
"""parse serialized data"""
def parse_tfrecord_fn(example):
feature_description = {
"feature_matrix": tf.io.VarLenFeature(tf.float32),
"score_10k": tf.io.FixedLenFeature([], tf.int64),
"fullpath": tf.io.FixedLenFeature([], tf.string),
"subject_id": tf.io.FixedLenFeature([], tf.int64),
'tread_or_overground': tf.io.VarLenFeature(tf.int64)
}
example = tf.io.parse_single_example(example, feature_description)
example["feature_matrix"] = tf.reshape(tf.sparse.to_dense(example["feature_matrix"]), (n_timesteps, n_features, 1))
return example
"""
for batch in tf.data.TFRecordDataset(filenames).map(parse_tfrecord_fn):
print(batch)
break
"""
"""prepare input and output for model"""
def prepare_sample(features):
#image = tf.image.resize(features["image"], size=(224, 224))
#return image, features["category_id"]
input_data = features["feature_matrix"]
output_data = features['score_10k']
return input_data, output_data
"""fetch the data"""
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 32
def get_dataset(filenames, batch_size):
dataset = (
tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE)
.map(parse_tfrecord_fn, num_parallel_calls=AUTOTUNE)
.map(prepare_sample, num_parallel_calls=AUTOTUNE)
.shuffle(batch_size * 10)
.batch(batch_size)
.prefetch(AUTOTUNE)
)
return dataset
get_dataset(filenames, batch_size)
"""create neural network """
model = tf.keras.Sequential([
layers.Conv2D(16, 12, activation = "relu", input_shape = (30,12,1)),
#layers.MaxPool2D(2),
layers.Reshape((19,16,1)),
layers.Conv2D(32, 3, activation = "relu"),
layers.MaxPool2D(2),
layers.Flatten(),
layers.Dense(16, activation = "relu"),
layers.Dense(1, activation = "relu") # softmax va bene per i multiclass, altrimenti uso sigmoid
])
model.summary()
model.compile(loss='MeanSquaredError',
optimizer='adam', metrics=['MeanSquaredError'])
get_dataset(filenames, batch_size)
"""
steps_per_epoch = np.int(np.ceil(examples_per_file*len(train_filenames)/batch_size))
validation_steps = np.int(np.ceil(examples_per_file*len(val_filenames)/batch_size))
steps = np.int(np.ceil(examples_per_file*len(test_filenames)/batch_size))
print("steps_per_epoch = ", steps_per_epoch)
print("validation_steps = ", validation_steps)
print("steps = ", steps)
"""
train_dataset = get_dataset(train_filenames, batch_size)
val_dataset = get_dataset(val_filenames, batch_size)
test_dataset = get_dataset(test_filenames, batch_size)
model.fit(train_dataset,
validation_data = val_dataset,
#steps_per_epoch = steps_per_epoch,
#validation_steps = validation_steps,
epochs = epochs
)
model.evaluate(test_dataset, steps = len(test_filenames))
pred_values = model.predict(test_dataset)
steps_to_take = len(test_filenames)
pred_values_list = []
pred_list = []
true_list = []
true_list_onehot = []
for x, y in test_dataset.take(steps_to_take):
pred_value = model.predict(x).astype(int)
pred_values_list = pred_values_list + list(pred_value)
true_list = true_list + list(y.numpy().astype(int))
print('Accuracy')
#print(accuracy_score(true_list, [x.astype(int) for x in pred_list]))
print('Confusion Matrix')
#print(confusion_matrix(true_list, [x.astype(int) for x in pred_list]))
m = tf.keras.metrics.AUC(curve = 'PR')
m.result().numpy()