-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathmain.py
More file actions
158 lines (124 loc) · 6.43 KB
/
main.py
File metadata and controls
158 lines (124 loc) · 6.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
from network import network
import tensorflow as tf
import numpy as np
import pickle
from keras.optimizers import Adam
import argparse
from keras.utils import multi_gpu_model
from keras.applications.vgg16 import VGG16
import os
from keras.models import Model
from keras import backend as K
from load_data import load_training_batch
from exp_fusion import *
parser = argparse.ArgumentParser()
parser.add_argument('-e' ,'--epochs', type = int, default = 100, help = 'number of epochs for training')
parser.add_argument('-b' ,'--batch_size', type = int, default = 32, help = 'batch size for training')
parser.add_argument('-l' ,'--log_file', type = str, default = 'log', help = 'log file name to be saved')
parser.add_argument('-exp' ,'--experiment_title', type = str, default = 'isp_learn', help = 'experiment title is used as a folder name to save respective files')
parser.add_argument('-w' ,'--weights_file', type = str, default = 'weights' , help = 'weight file name to be appended while saving')
parser.add_argument('-o' ,'--optimizer_weights', type = str, default = 'opt', help = 'optimizer file name to be appended while saving')
parser.add_argument('-lr' ,'--learning_rate', type = float, default = 0.0001, help = 'initial learning rate for the optimizer')
parser.add_argument('-dataset' ,'--dataset_path', type = str, default = '/home/puneesh/isp_learn' , help = 'complete path for the dataset')
parser.add_argument('-save' ,'--save_path', type = str, default = '/home/puneesh/deep_isp_exps' , help = 'path where weights are to be saved')
parser.add_argument('-resume_weight', '--file_to_resume_training', type = str, help = 'name of weight file to begin training from')
parser.add_argument('--resume_train', action='store_true', default = False, help='Provide yes or no if resuming training and giving a file to resume training from')
parser.add_argument('--resume_opt', type = str, help = 'Optimizer file to begin training from')
args = parser.parse_args()
n_epochs = args.epochs
n_batch = args.batch_size
log_file = args.log_file
weights_file = args.weights_file
lr = args.learning_rate
opt_file = args.optimizer_weights
exp_folder = args.experiment_title
save_path = args.save_path
dataset_dir = args.dataset_path
resume_weight = args.file_to_resume_training
resume_train = args.resume_train
resume_opt = args.resume_opt
current_path = os.getcwd()
if not os.path.exists(os.path.join(save_path, exp_folder)):
os.mkdir(os.path.join(save_path, exp_folder))
def mssim(y_true, y_pred):
costs = 1.0 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, 1.0))
return costs
def color(y_true, y_pred):
#e = tf.math.scalar_mul(1e-7, tf.ones_like(y_true))
#y_true = tf.math.add(y_true,e)
#y_pred = tf.math.add(y_true,e)
ytn = tf.math.l2_normalize(y_true, axis = -1, epsilon=1e-9)
ypn = tf.math.l2_normalize(y_pred, axis = -1, epsilon=1e-9)
color_cos = tf.einsum('aijk,aijk->aij', ytn, ypn)
#color_cos = tf.clip_by_value(color_cos, -1, 1)
#color_angle = tf.math.acos(color_cos)
ca_mean = 1.0 - tf.reduce_mean(color_cos)
return ca_mean
def vgg_loss(y_true, y_pred):
cost = tf.reduce_mean(tf.math.square(tf.math.subtract(vgg1(y_true), vgg1(y_pred))))
return cost
def exp_fusion(y_true, y_pred):
#costs = tf.reduce_mean(tf.keras.losses.MAE(exp_map(y_true, 1, 1, 1), exp_map(y_pred, 1, 1, 1)))
costs = tf.reduce_mean(tf.math.abs(tf.math.subtract(exp_map(y_true, 1, 1, 1), exp_map(y_pred, 1, 1, 1))))
return costs
def lr_decay(lr, epoch):
if epoch%50==0:
lr = lr*0.8
#print(lr)
return lr
def train(d_par, vgg, n_epochs, n_batch, f, current_path, save_path, exp_folder, weights_file, dataset_dir):
train_size = 5000
bat_per_epo = int(train_size/n_batch)
for i in range(n_epochs):
raw, canon = load_training_batch(dataset_dir, train_size, PATCH_WIDTH = 224, PATCH_HEIGHT = 224, DSLR_SCALE = 2)
for j in range(bat_per_epo):
ix = np.random.randint(0, train_size, n_batch)
X_real = canon[ix]
X_in = raw[ix]
d_loss = d_par.train_on_batch(X_in,[X_real, X_real, X_real, X_real, vgg.predict(X_real)])
f.write('>%d, %d/%d, d=%.3f, mae=%.3f, mssim=%.3f, color=%.3f, exp_fus = %.5f, vgg=%.5f' %(i+1, j+1, bat_per_epo, d_loss[0], d_loss[1], d_loss[2], d_loss[3], d_loss[4], d_loss[5]))
f.write('\n')
print('>%d, %d/%d, d=%.3f, mae=%.3f, mssim=%.3f, color=%.3f, exp_fus = %.5f, vgg=%.5f' %(i+1, j+1, bat_per_epo, d_loss[0], d_loss[1], d_loss[2], d_loss[3], d_loss[4], d_loss[5]))
filename = os.path.join(save_path, exp_folder, weights_file + '_%04d.h5' % (i+1))
d_save = d_par.get_layer('model_3')
d_save.save_weights(filename)
K.set_value(d_par.optimizer.lr, lr_decay(K.get_value(d_par.optimizer.lr),(i+1)))
if (i+1)%5==0:
symbolic_weights = getattr(d_par.optimizer, 'weights')
weight_values = K.batch_get_value(symbolic_weights)
with open(os.path.join(save_path, exp_folder, opt_file + '_%04d.pkl' %(i+1)), 'wb') as f2:
pickle.dump(weight_values, f2)
f2.close()
del symbolic_weights
del weight_values
del raw
del canon
f.close()
in_shape = (224,224,4)
base_vgg = VGG16(weights = 'imagenet', include_top = False, input_shape = (448,448,3))
vgg1 = Model(inputs = base_vgg.input, outputs = base_vgg.get_layer('block4_pool').output)
for layer in vgg1.layers:
layer.trainable = False
vgg = multi_gpu_model(vgg1, gpus = 4, cpu_relocation = True)
vgg.summary()
d_model = network(vgg1, inp_shape = in_shape, trainable = True)
d_model.summary()
if resume_train:
file_dis = os.path.join(save_path, exp_folder, resume_weight)
d_model.load_weights(file_dis)
d_par = multi_gpu_model(d_model, gpus = 4, cpu_relocation = True)
d_par.summary()
if resume_train:
d_par.layers[-6].set_weights(d_model.get_weights())
opt = Adam(lr = lr, beta_1 = 0.5)
d_par.compile(loss = ['mae', mssim, color, exp_fusion, 'mse'], optimizer = opt, loss_weights = [5.0, 1.0, 0.5, 0.5, 1.0])
d_par.summary()
if resume_train:
d_par._make_train_function()
file_d_opt = os.path.join(save_path, exp_folder, resume_opt)
with open(file_d_opt, 'rb') as f3:
weight_values = pickle.load(f3)
d_par.optimizer.set_weights(weight_values)
f = open(os.path.join(save_path, exp_folder, log_file + '.txt'), 'x')
f = open(os.path.join(save_path, exp_folder, log_file + '.txt'), 'a')
train(d_par, vgg, n_epochs, n_batch, f, current_path, save_path, exp_folder, weights_file, dataset_dir)