-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_CNN.py
More file actions
260 lines (221 loc) · 11.3 KB
/
train_CNN.py
File metadata and controls
260 lines (221 loc) · 11.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
import datetime
import os
import csv
import tensorflow as tf
from sklearn.metrics import f1_score
from sklearn.decomposition import PCA
from numpy import fft
import pywt
import numpy as np
import time
import random
import matplotlib.pyplot as plt
from config import train_config
from model import CNNModel
def load_data(config):
print('Loading data from {} ...'.format(config['data_dir']))
data = np.load('../data/data_train.npy')
data.astype(np.float)
labels = np.load('../data/labels_train.npy')
labels.astype(np.float)
# selecting the samples from data_train to be used for cross-validation
num_cv_samples = int(config['cross_validation_samples']*data.shape[1])
cv_samples = random.sample(range(data.shape[1]), num_cv_samples)
data_cv = data[:,cv_samples,:]
labels_cv = labels[:, cv_samples,:]
data_train = np.delete(data, cv_samples, axis=1)
labels_train = np.delete(labels, cv_samples, axis=1)
data_test = np.load('../data/data_test.npy')
data_test.astype(np.float)
labels_test = np.load('../data/labels_test.npy')
labels_test.astype(np.float)
return [data_train, data_cv, data_test, labels_train, labels_cv, labels_test]
def preprocess_data(data):
# normalizing the data
data = data[:,:,1024:5120]
dataMean =data.mean(axis=-1)
dataStd = np.std(data, axis=-1)
data = (data - np.tile(np.expand_dims(dataMean, axis=-1),(1,1,data.shape[-1])))/np.tile(np.expand_dims(dataStd,axis=-1), (1,1,data.shape[-1]))
# downsample the signal along last dimension which is time dimension
data = data[:,:,::4]
return data
def get_model_and_placeholders(config):
# create placeholders that we need to feed the required data into the model
# None means that the dimension is variable, which we want for the batch size and the sequence length
input_dim = config['input_dim']
output_dim = config['output_dim']
input_pl = tf.placeholder(tf.float32, shape=[None, input_dim], name='input_pl')
target_pl = tf.placeholder(tf.float32, shape=[None, output_dim], name='target_pl')
placeholders = {'input_pl': input_pl,
'target_pl': target_pl}
rnn_model_class = CNNModel
return rnn_model_class, placeholders
def main(config):
# create unique output directory for this model
timestamp = str(int(time.time()))
config['name'] = 'ECG_class_CNN_nowt'
config['model_dir'] = os.path.abspath(os.path.join(config['output_dir'], config['name']))
try:
os.makedirs(config['model_dir'])
except Exception:
print('Model dir exists already.')
print('Writing checkpoints into {}'.format(config['model_dir']))
# load the data, this requires that the *.npz files you downloaded from Kaggle be named `train.npz` and `valid.npz`
[data_train, data_cv, data_test, labels_train, labels_cv, labels_test] = load_data(config)
print(data_train.shape)
print(labels_train.shape)
print(data_test.shape)
# preprocessing the data
data_train = preprocess_data(data_train)
data_cv = preprocess_data(data_cv)
data_test = preprocess_data(data_test)
config['input_dim'] = data_train.shape[-1]
config['output_dim'] = labels_train.shape[-1]
# get input placeholders and get the model that we want to train
rnn_model_class, placeholders = get_model_and_placeholders(config)
# Create a variable that stores how many training iterations we performed.
# This is useful for saving/storing the network
global_step = tf.Variable(1, name='global_step', trainable=False)
# create a training graph, this is the graph we will use to optimize the parameters
with tf.name_scope('training'):
rnn_model = rnn_model_class(config, placeholders, mode='training')
rnn_model_valid = rnn_model_class(config, placeholders, mode='validation')
rnn_model_test = rnn_model_class(config, placeholders, mode='inference')
rnn_model.build_graph()
rnn_model_valid.build_graph()
rnn_model_test.build_graph()
print('created RNN model with {} parameters'.format(rnn_model.n_parameters))
lr = config['learning_rate']
# configure learning rate
params = tf.trainable_variables()
train_op = tf.train.AdamOptimizer(
learning_rate=lr,
).minimize(
loss=rnn_model.loss,
var_list=params,
name='adam',
)
# Create summary ops for monitoring the training
# Each summary op annotates a node in the computational graph and collects data data from it
tf.summary.scalar('learning_rate', lr, collections=['training_summaries'])
# Merge summaries used during training and reported after every step
summaries_training = tf.summary.merge(tf.get_collection('training_summaries'))
# create summary ops for monitoring the validation
# caveat: we want to store the performance on the entire validation set, not just one validation batch
# Tensorflow does not directly support this, so we must process every batch independently and then aggregate
# the results outside of the model
# so, we create a placeholder where can feed the aggregated result back into the model
loss_valid_pl = tf.placeholder(tf.float32, name='loss_valid_pl')
loss_valid_s = tf.summary.scalar('loss_valid', loss_valid_pl, collections=['validation_summaries'])
# merge validation summaries
summaries_valid = tf.summary.merge([loss_valid_s])
# dump the config to the model directory in case we later want to see it
fitted_output = []
predictions = []
zeroPts = []
with tf.Session() as sess:
# Add the ops to initialize variables.
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# Actually intialize the variables
sess.run(init_op)
# create a saver for writing training checkpoints
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=config['n_keep_checkpoints'])
checkpoint = tf.train.get_checkpoint_state(config['model_dir'])
checkpoint_restore_successful = False
if checkpoint and checkpoint.model_checkpoint_path:
checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path)
try:
saver.restore(sess, '%s/%s' % (os.path.join(config['model_dir']), checkpoint_name) )
print('Checkpoint restore successful')
except Exception as e:
print('Checkpoint restore unsuccessful')
# start training
start_time = time.time()
current_step = 0
for e in range(config['n_epochs']):
# reshuffle the batches
#data_train.reshuffle()
try:
predNow = []
# loop through all training batches
for i, batch, labels_batch in zip(list(range(data_train.shape[0])), list(data_train), list(labels_train)):
step = tf.train.global_step(sess, global_step)
current_step += 1
# we want to train, so must request at least the train_op
fetches = {'summaries': summaries_training,
'loss': rnn_model.loss,
'train_op': train_op,
'output': rnn_model.prediction}
feed_dict = {rnn_model.input_: batch,
rnn_model.target: labels_batch}
# feed data into the model and run optimization
training_out = sess.run(fetches, feed_dict)
# print training performance of this batch onto console
time_delta = str(datetime.timedelta(seconds=int(time.time() - start_time)))
if (e % 100 == 0) and i == 0:
print('\rEpoch: {:3d} [{:4d}/{:4d}] time: {:>8} loss: {:.4f}'.format(
e + 1, i + 1, 1, time_delta, training_out['loss']), end='\t')
# save predictions
predNow = training_out['output']
fitted_output=np.array(predNow[0])
except KeyboardInterrupt:
saver.save(sess, os.path.join(config['model_dir'], 'model'), global_step)
break
# after every 100th epoch evaluate the performance on the validation set
total_valid_loss = 0.0
n_valid_samples = 0
target_labels = []
pred_labels = []
for batch, labels_batch in zip(list(data_cv), list(labels_cv)):
fetches = {'loss': rnn_model_valid.loss,
'output': rnn_model_valid.prediction}
feed_dict = {rnn_model_valid.input_: batch,
rnn_model_valid.target: labels_batch}
valid_out = sess.run(fetches, feed_dict)
fitted_output = valid_out['output']
total_valid_loss += valid_out['loss'] * batch.shape[1]
n_valid_samples += batch.shape[1]
target_labels.append( np.argmax(labels_batch, axis=-1))
pred_labels.append(np.argmax(fitted_output, axis=-1))
# write validation logs
avg_valid_loss = total_valid_loss / n_valid_samples
# F1 score
target_labels = np.concatenate(target_labels, axis=0)
pred_labels = np.concatenate(pred_labels, axis=0)
f1_ScoreNow = f1_score(target_labels, pred_labels, average='binary')
# print validation performance onto console
if (e % 100 == 0):
print(' | validation loss: {:.6f} | f1 score: {:.6f}'.format(avg_valid_loss, f1_ScoreNow), end='\n')
# save this checkpoint if necessary
if (e + 1) % config['save_checkpoints_every_epoch'] == 0:
saver.save(sess, os.path.join(config['model_dir'], 'model'), global_step)
# Training finished
print('Training finished')
ckpt_path = saver.save(sess, os.path.join(config['model_dir'], 'model'), global_step)
print('Model saved to file {}'.format(ckpt_path))
predictions = []
target = []
for batch, labels in zip(list(data_test), list(labels_test)):
input_ = batch
feed_dict = {rnn_model_test.input_: batch}
fetch = [rnn_model_test.prediction]
class_proba = sess.run(fetch, feed_dict)
predictions.append(class_proba)
target.append(labels)
print('Finished evalualtion for all batches')
predictions = np.concatenate(predictions, axis=1)
predictions = np.argmax(predictions, axis=-1)
predictions = np.squeeze(predictions)
target = np.concatenate(target, axis=1)
target = np.argmax(target, axis=-1)
target = np.squeeze(target)
f1_ScoreNow = f1_score(target, predictions, average='binary')
print('f1 score of the test samples is - {:.6f}'.format(f1_ScoreNow))
outfile = open('test_results_CNN_nowt.csv','w')
outfile.write('ID,Prediction\n')
for i, p in enumerate(predictions):
outfile.write('{},{}\n'.format(i+1, p))
outfile.close()
#intgr_output = np.matmul(np.tri(fitted_output.shape[0]),fitted_output) + data_train[0,1,:]
if __name__ == '__main__':
main(train_config)