name: "GTANet" # Can be used with pretrained Caffenet (AlexNet architecture). # Layers with names containing 'gtanet' are not transferred from Caffenet. layer { name: "gta_frames_input_layer" type: "HDF5Data" top: "images" top: "targets" hdf5_data_param { source: "data/deep_drive/full_train_h5_list.txt" batch_size: 64 shuffle: true } include { phase:TRAIN } } layer { name: "gta_frames_input_layer" type: "HDF5Data" top: "images" top: "targets" hdf5_data_param { source: "data/deep_drive/full_test_h5_list.txt" batch_size: 64 } include { phase:TEST } } layer { name: "conv1" type: "Convolution" bottom: "images" top: "conv1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 11 stride: 4 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "relu1" type: "ReLU" bottom: "conv1" top: "conv1" } layer { name: "pool1" type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "norm1" type: "LRN" bottom: "pool1" top: "norm1" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } layer { name: "conv2" type: "Convolution" bottom: "norm1" top: "conv2" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 pad: 2 kernel_size: 5 group: 2 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 1 } } } layer { name: "relu2" type: "ReLU" bottom: "conv2" top: "conv2" } layer { name: "pool2" type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "norm2" type: "LRN" bottom: "pool2" top: "norm2" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } layer { name: "conv3" type: "Convolution" bottom: "norm2" top: "conv3" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 384 pad: 1 kernel_size: 3 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "relu3" type: "ReLU" bottom: "conv3" top: "conv3" } layer { name: "conv4" type: "Convolution" bottom: "conv3" top: "conv4" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 384 pad: 1 kernel_size: 3 group: 2 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 1 } } } layer { name: "relu4" type: "ReLU" bottom: "conv4" top: "conv4" } layer { name: "conv5" type: "Convolution" bottom: "conv4" top: "conv5" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 pad: 1 kernel_size: 3 group: 2 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 1 } } } layer { name: "relu5" type: "ReLU" bottom: "conv5" top: "conv5" } layer { name: "pool5" type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "fc6_gtanet" type: "InnerProduct" bottom: "pool5" top: "fc6" param { lr_mult: 1 # learning rate multiplier for the filters decay_mult: 1 # weight decay multiplier for the filters } param { lr_mult: 2 # learning rate multiplier for the biases decay_mult: 0 # weight decay multiplier for the biases } inner_product_param { num_output: 4096 weight_filler { type: "gaussian" std: 0.005 } bias_filler { type: "constant" value: 1 } } } layer { name: "relu6_gtanet" type: "ReLU" bottom: "fc6" top: "fc6" } layer { name: "drop6_gtanet" type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } layer { name: "fc7_gtanet" type: "InnerProduct" bottom: "fc6" top: "fc7" param { lr_mult: 1 # learning rate multiplier for the filters decay_mult: 1 # weight decay multiplier for the filters } param { lr_mult: 2 # learning rate multiplier for the biases decay_mult: 0 # weight decay multiplier for the biases } inner_product_param { num_output: 4096 weight_filler { type: "gaussian" std: 0.005 } bias_filler { type: "constant" value: 1 } } } layer { name: "relu7_gtanet" type: "ReLU" bottom: "fc7" top: "fc7" } layer { name: "drop7_gtanet" type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.05 } } # The output layer is a fully-connected linear layer with a single output for each valid action. layer { name: "gtanet_fctop" type: "InnerProduct" bottom: "fc7" top: "gtanet_fctop" param { lr_mult: 1 # learning rate multiplier for the filters decay_mult: 1 # weight decay multiplier for the filters } param { lr_mult: 2 # learning rate multiplier for the biases decay_mult: 0 # weight decay multiplier for the biases } inner_product_param { num_output: 6 # Number of output neurons weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "loss" type: "EuclideanLoss" bottom: "gtanet_fctop" bottom: "targets" top: "loss" }