-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathcontext_train.py
More file actions
66 lines (56 loc) · 2.88 KB
/
context_train.py
File metadata and controls
66 lines (56 loc) · 2.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import numpy as np
from datetime import datetime
import multiprocessing
def context_train(epoch, epoch_fn, opt, train_loader, discriminator, context_fn, logger,
optimizer=None, loss_fn=None, fcn=None, coAttn=None):
start_time = datetime.now()
# set all gradients to True and the model in evaluation format.
for param in discriminator.parameters():
param.requires_grad = False
discriminator.eval()
if opt.cuda:
discriminator.cuda()
# set all gradients to True and the fcn in evaluation format.
if opt.apply_wrn:
for param in fcn.parameters():
param.requires_grad = False
fcn.eval()
if opt.cuda:
fcn.cuda()
# set all gradient to True
if opt.use_coAttn:
for param in coAttn.parameters():
param.requires_grad = False
coAttn.eval()
if opt.cuda:
coAttn.cuda()
# set all gradients to true in the context model.
for param in context_fn.parameters():
param.requires_grad = True
context_fn.train(mode=True) # Set to train the naive/full-context model
if opt.cuda:
context_fn.cuda()
if opt.apply_wrn:
train_acc_epoch, train_loss_epoch = epoch_fn(opt=opt, loss_fn=loss_fn,
discriminator=discriminator,
data_loader=train_loader,
model_fn=context_fn,
optimizer=optimizer, fcn=fcn, coAttn=coAttn)
else:
train_acc_epoch, train_loss_epoch = epoch_fn(opt=opt, loss_fn=loss_fn,
discriminator=discriminator,
data_loader=train_loader,
model_fn=context_fn,
optimizer=optimizer, coAttn=coAttn)
time_elapsed = datetime.now() - start_time
train_acc_epoch = np.mean(train_acc_epoch)
train_loss_epoch = np.mean(train_loss_epoch)
print ("[%s] epoch: %d, train loss: %f, train acc: %.2f, time: %02ds:%02dms" %
(multiprocessing.current_process().name, epoch, np.round(train_loss_epoch, 6), np.round(train_acc_epoch, 6),
time_elapsed.seconds, time_elapsed.microseconds / 1000))
logger.log_value('context_train_loss', train_loss_epoch)
logger.log_value('context_train_acc', train_acc_epoch)
assert np.isnan(train_loss_epoch) == False, 'ERROR. Found NAN in context_train.'
# Reduce learning rate when a metric has stopped improving
logger.log_value('context_train_lr', [param_group['lr'] for param_group in optimizer.param_groups][0])
return train_acc_epoch, train_loss_epoch