hi
when i run your code then i found kernel restarting error....
then i made my own prototxt with resnet model.
At the end Convolution18 layer do not get values.it shows just zero.plz guide me...
layer {
name: "FineCustomData"
type: "Python"
top: "Python1"
python_param {
module: "ResCustomData"
layer: "ResCustomData"
param_str: "1,3,300,600"
}
}
layer {
name: "Convolution1"
type: "Convolution"
bottom: "Python1"
top: "Convolution1"
convolution_param {
num_output: 64
bias_term: false
pad: 0
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm1"
type: "BatchNorm"
bottom: "Convolution1"
top: "Convolution1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale1"
type: "Scale"
bottom: "Convolution1"
top: "Convolution1"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU1"
type: "ReLU"
bottom: "Convolution1"
top: "Convolution1"
}
layer {
name: "Convolution2"
type: "Convolution"
bottom: "Convolution1"
top: "Convolution2"
convolution_param {
num_output: 64
bias_term: false
pad: 0
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm2"
type: "BatchNorm"
bottom: "Convolution2"
top: "Convolution2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale2"
type: "Scale"
bottom: "Convolution2"
top: "Convolution2"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU2"
type: "ReLU"
bottom: "Convolution2"
top: "Convolution2"
}
layer {
name: "Pooling1"
type: "Pooling"
bottom: "Convolution2"
top: "Pooling1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
}
}
layer {
name: "BatchNorm3"
type: "BatchNorm"
bottom: "Pooling1"
top: "Pooling1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale3"
type: "Scale"
bottom: "Pooling1"
top: "Pooling1"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution3"
type: "Convolution"
bottom: "Pooling1"
top: "Convolution3"
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm4"
type: "BatchNorm"
bottom: "Convolution3"
top: "Convolution3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale4"
type: "Scale"
bottom: "Convolution3"
top: "Convolution3"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU3"
type: "ReLU"
bottom: "Convolution3"
top: "Convolution3"
}
layer {
name: "Convolution4"
type: "Convolution"
bottom: "Convolution3"
top: "Convolution4"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Convolution5"
type: "Convolution"
bottom: "Pooling1"
top: "Convolution5"
convolution_param {
num_output: 128
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise1"
type: "Eltwise"
bottom: "Convolution4"
bottom: "Convolution5"
top: "Eltwise1"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm5"
type: "BatchNorm"
bottom: "Eltwise1"
top: "BatchNorm5"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale5"
type: "Scale"
bottom: "BatchNorm5"
top: "BatchNorm5"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU4"
type: "ReLU"
bottom: "BatchNorm5"
top: "BatchNorm5"
}
layer {
name: "Convolution6"
type: "Convolution"
bottom: "BatchNorm5"
top: "Convolution6"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm6"
type: "BatchNorm"
bottom: "Convolution6"
top: "Convolution6"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale6"
type: "Scale"
bottom: "Convolution6"
top: "Convolution6"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU5"
type: "ReLU"
bottom: "Convolution6"
top: "Convolution6"
}
layer {
name: "Convolution7"
type: "Convolution"
bottom: "Convolution6"
top: "Convolution7"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise2"
type: "Eltwise"
bottom: "Eltwise1"
bottom: "Convolution7"
top: "Eltwise2"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm7"
type: "BatchNorm"
bottom: "Eltwise2"
top: "Eltwise2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale7"
type: "Scale"
bottom: "Eltwise2"
top: "Eltwise2"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution8"
type: "Convolution"
bottom: "Eltwise2"
top: "Convolution8"
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm8"
type: "BatchNorm"
bottom: "Convolution8"
top: "Convolution8"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale8"
type: "Scale"
bottom: "Convolution8"
top: "Convolution8"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU6"
type: "ReLU"
bottom: "Convolution8"
top: "Convolution8"
}
layer {
name: "Convolution9"
type: "Convolution"
bottom: "Convolution8"
top: "Convolution9"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Convolution10"
type: "Convolution"
bottom: "Eltwise2"
top: "Convolution10"
convolution_param {
num_output: 256
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise3"
type: "Eltwise"
bottom: "Convolution9"
bottom: "Convolution10"
top: "Eltwise3"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm9"
type: "BatchNorm"
bottom: "Eltwise3"
top: "BatchNorm9"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale9"
type: "Scale"
bottom: "BatchNorm9"
top: "BatchNorm9"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU7"
type: "ReLU"
bottom: "BatchNorm9"
top: "BatchNorm9"
}
layer {
name: "Convolution11"
type: "Convolution"
bottom: "BatchNorm9"
top: "Convolution11"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm10"
type: "BatchNorm"
bottom: "Convolution11"
top: "Convolution11"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale10"
type: "Scale"
bottom: "Convolution11"
top: "Convolution11"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU8"
type: "ReLU"
bottom: "Convolution11"
top: "Convolution11"
}
layer {
name: "Convolution12"
type: "Convolution"
bottom: "Convolution11"
top: "Convolution12"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise4"
type: "Eltwise"
bottom: "Eltwise3"
bottom: "Convolution12"
top: "Eltwise4"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm11"
type: "BatchNorm"
bottom: "Eltwise4"
top: "Eltwise4"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale11"
type: "Scale"
bottom: "Eltwise4"
top: "Eltwise4"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution13"
type: "Convolution"
bottom: "Eltwise4"
top: "Convolution13"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm12"
type: "BatchNorm"
bottom: "Convolution13"
top: "Convolution13"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale12"
type: "Scale"
bottom: "Convolution13"
top: "Convolution13"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU9"
type: "ReLU"
bottom: "Convolution13"
top: "Convolution13"
}
layer {
name: "Convolution14"
type: "Convolution"
bottom: "Convolution13"
top: "Convolution14"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Convolution15"
type: "Convolution"
bottom: "Eltwise4"
top: "Convolution15"
convolution_param {
num_output: 512
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise5"
type: "Eltwise"
bottom: "Convolution14"
bottom: "Convolution15"
top: "Eltwise5"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm13"
type: "BatchNorm"
bottom: "Eltwise5"
top: "BatchNorm13"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale13"
type: "Scale"
bottom: "BatchNorm13"
top: "BatchNorm13"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU10"
type: "ReLU"
bottom: "BatchNorm13"
top: "BatchNorm13"
}
layer {
name: "Convolution16"
type: "Convolution"
bottom: "BatchNorm13"
top: "Convolution16"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm14"
type: "BatchNorm"
bottom: "Convolution16"
top: "Convolution16"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale14"
type: "Scale"
bottom: "Convolution16"
top: "Convolution16"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU11"
type: "ReLU"
bottom: "Convolution16"
top: "Convolution16"
}
layer {
name: "Convolution17"
type: "Convolution"
bottom: "Convolution16"
top: "Convolution17"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise6"
type: "Eltwise"
bottom: "Eltwise5"
bottom: "Convolution17"
top: "Eltwise6"
eltwise_param {
operation: SUM
}
}
layer {
name: "Pooling2"
type: "Pooling"
bottom: "Eltwise6"
top: "Pooling2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
}
}
layer {
name: "Convolution18"
type: "Convolution"
bottom: "Pooling2"
top: "Convolution18"
convolution_param {
num_output: 1
pad: 1
kernel_size: 3
}
}
hi
when i run your code then i found kernel restarting error....
then i made my own prototxt with resnet model.
At the end Convolution18 layer do not get values.it shows just zero.plz guide me...
layer {
name: "FineCustomData"
type: "Python"
top: "Python1"
python_param {
module: "ResCustomData"
layer: "ResCustomData"
param_str: "1,3,300,600"
}
}
layer {
name: "Convolution1"
type: "Convolution"
bottom: "Python1"
top: "Convolution1"
convolution_param {
num_output: 64
bias_term: false
pad: 0
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm1"
type: "BatchNorm"
bottom: "Convolution1"
top: "Convolution1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale1"
type: "Scale"
bottom: "Convolution1"
top: "Convolution1"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU1"
type: "ReLU"
bottom: "Convolution1"
top: "Convolution1"
}
layer {
name: "Convolution2"
type: "Convolution"
bottom: "Convolution1"
top: "Convolution2"
convolution_param {
num_output: 64
bias_term: false
pad: 0
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm2"
type: "BatchNorm"
bottom: "Convolution2"
top: "Convolution2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale2"
type: "Scale"
bottom: "Convolution2"
top: "Convolution2"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU2"
type: "ReLU"
bottom: "Convolution2"
top: "Convolution2"
}
layer {
name: "Pooling1"
type: "Pooling"
bottom: "Convolution2"
top: "Pooling1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
}
}
layer {
name: "BatchNorm3"
type: "BatchNorm"
bottom: "Pooling1"
top: "Pooling1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale3"
type: "Scale"
bottom: "Pooling1"
top: "Pooling1"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution3"
type: "Convolution"
bottom: "Pooling1"
top: "Convolution3"
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm4"
type: "BatchNorm"
bottom: "Convolution3"
top: "Convolution3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale4"
type: "Scale"
bottom: "Convolution3"
top: "Convolution3"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU3"
type: "ReLU"
bottom: "Convolution3"
top: "Convolution3"
}
layer {
name: "Convolution4"
type: "Convolution"
bottom: "Convolution3"
top: "Convolution4"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Convolution5"
type: "Convolution"
bottom: "Pooling1"
top: "Convolution5"
convolution_param {
num_output: 128
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise1"
type: "Eltwise"
bottom: "Convolution4"
bottom: "Convolution5"
top: "Eltwise1"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm5"
type: "BatchNorm"
bottom: "Eltwise1"
top: "BatchNorm5"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale5"
type: "Scale"
bottom: "BatchNorm5"
top: "BatchNorm5"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU4"
type: "ReLU"
bottom: "BatchNorm5"
top: "BatchNorm5"
}
layer {
name: "Convolution6"
type: "Convolution"
bottom: "BatchNorm5"
top: "Convolution6"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm6"
type: "BatchNorm"
bottom: "Convolution6"
top: "Convolution6"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale6"
type: "Scale"
bottom: "Convolution6"
top: "Convolution6"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU5"
type: "ReLU"
bottom: "Convolution6"
top: "Convolution6"
}
layer {
name: "Convolution7"
type: "Convolution"
bottom: "Convolution6"
top: "Convolution7"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise2"
type: "Eltwise"
bottom: "Eltwise1"
bottom: "Convolution7"
top: "Eltwise2"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm7"
type: "BatchNorm"
bottom: "Eltwise2"
top: "Eltwise2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale7"
type: "Scale"
bottom: "Eltwise2"
top: "Eltwise2"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution8"
type: "Convolution"
bottom: "Eltwise2"
top: "Convolution8"
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm8"
type: "BatchNorm"
bottom: "Convolution8"
top: "Convolution8"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale8"
type: "Scale"
bottom: "Convolution8"
top: "Convolution8"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU6"
type: "ReLU"
bottom: "Convolution8"
top: "Convolution8"
}
layer {
name: "Convolution9"
type: "Convolution"
bottom: "Convolution8"
top: "Convolution9"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Convolution10"
type: "Convolution"
bottom: "Eltwise2"
top: "Convolution10"
convolution_param {
num_output: 256
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise3"
type: "Eltwise"
bottom: "Convolution9"
bottom: "Convolution10"
top: "Eltwise3"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm9"
type: "BatchNorm"
bottom: "Eltwise3"
top: "BatchNorm9"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale9"
type: "Scale"
bottom: "BatchNorm9"
top: "BatchNorm9"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU7"
type: "ReLU"
bottom: "BatchNorm9"
top: "BatchNorm9"
}
layer {
name: "Convolution11"
type: "Convolution"
bottom: "BatchNorm9"
top: "Convolution11"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm10"
type: "BatchNorm"
bottom: "Convolution11"
top: "Convolution11"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale10"
type: "Scale"
bottom: "Convolution11"
top: "Convolution11"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU8"
type: "ReLU"
bottom: "Convolution11"
top: "Convolution11"
}
layer {
name: "Convolution12"
type: "Convolution"
bottom: "Convolution11"
top: "Convolution12"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise4"
type: "Eltwise"
bottom: "Eltwise3"
bottom: "Convolution12"
top: "Eltwise4"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm11"
type: "BatchNorm"
bottom: "Eltwise4"
top: "Eltwise4"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale11"
type: "Scale"
bottom: "Eltwise4"
top: "Eltwise4"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution13"
type: "Convolution"
bottom: "Eltwise4"
top: "Convolution13"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm12"
type: "BatchNorm"
bottom: "Convolution13"
top: "Convolution13"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale12"
type: "Scale"
bottom: "Convolution13"
top: "Convolution13"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU9"
type: "ReLU"
bottom: "Convolution13"
top: "Convolution13"
}
layer {
name: "Convolution14"
type: "Convolution"
bottom: "Convolution13"
top: "Convolution14"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Convolution15"
type: "Convolution"
bottom: "Eltwise4"
top: "Convolution15"
convolution_param {
num_output: 512
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise5"
type: "Eltwise"
bottom: "Convolution14"
bottom: "Convolution15"
top: "Eltwise5"
eltwise_param {
operation: SUM
}
}
layer {
name: "BatchNorm13"
type: "BatchNorm"
bottom: "Eltwise5"
top: "BatchNorm13"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale13"
type: "Scale"
bottom: "BatchNorm13"
top: "BatchNorm13"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU10"
type: "ReLU"
bottom: "BatchNorm13"
top: "BatchNorm13"
}
layer {
name: "Convolution16"
type: "Convolution"
bottom: "BatchNorm13"
top: "Convolution16"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "BatchNorm14"
type: "BatchNorm"
bottom: "Convolution16"
top: "Convolution16"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale14"
type: "Scale"
bottom: "Convolution16"
top: "Convolution16"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU11"
type: "ReLU"
bottom: "Convolution16"
top: "Convolution16"
}
layer {
name: "Convolution17"
type: "Convolution"
bottom: "Convolution16"
top: "Convolution17"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "Eltwise6"
type: "Eltwise"
bottom: "Eltwise5"
bottom: "Convolution17"
top: "Eltwise6"
eltwise_param {
operation: SUM
}
}
layer {
name: "Pooling2"
type: "Pooling"
bottom: "Eltwise6"
top: "Pooling2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
}
}
layer {
name: "Convolution18"
type: "Convolution"
bottom: "Pooling2"
top: "Convolution18"
convolution_param {
num_output: 1
pad: 1
kernel_size: 3
}
}