Alexnet网络结构

来源:互联网 发布:tst营销模式 知乎 编辑:程序博客网 时间:2024/06/10 19:04

急,又有什么用呢微笑

本篇文章是自己的一些学习笔记。由于自己是自学深度学习,算是小白,难免理解有所偏差。欢迎批评指正。

这篇文章主要是通过比较alexNet和LeNet细节的方式,来学习深度学习,感觉这样更有针对性,毕竟alexNet是继承LeNet的思想,但过去了十几年,alexNex网络相对与LeNet的做了一些变动,这些变动肯定是有它的道理。

首先放出AlexNet的网络结构(搬运自caffe/models/bvlc_alexnet/train_val.prototxt)

name: "AlexNet"layer {  name: "data"  type: "Data"  top: "data"  top: "label"  include {    phase: TRAIN  }  transform_param {    mirror: true    crop_size: 227    mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"  }  data_param {    source: "examples/imagenet/ilsvrc12_train_lmdb"    batch_size: 256    backend: LMDB  }}layer {  name: "data"  type: "Data"  top: "data"  top: "label"  include {    phase: TEST  }  transform_param {    mirror: false    crop_size: 227    mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"  }  data_param {    source: "examples/imagenet/ilsvrc12_val_lmdb"    batch_size: 50    backend: LMDB  }}layer {  name: "conv1"  type: "Convolution"  bottom: "data"  top: "conv1"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  convolution_param {    num_output: 96    kernel_size: 11    stride: 4    weight_filler {      type: "gaussian"      std: 0.01    }    bias_filler {      type: "constant"      value: 0    }  }}layer {  name: "relu1"  type: "ReLU"  bottom: "conv1"  top: "conv1"}layer {  name: "norm1"  type: "LRN"  bottom: "conv1"  top: "norm1"  lrn_param {    local_size: 5    alpha: 0.0001    beta: 0.75  }}layer {  name: "pool1"  type: "Pooling"  bottom: "norm1"  top: "pool1"  pooling_param {    pool: MAX    kernel_size: 3    stride: 2  }}layer {  name: "conv2"  type: "Convolution"  bottom: "pool1"  top: "conv2"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  convolution_param {    num_output: 256    pad: 2    kernel_size: 5    group: 2    weight_filler {      type: "gaussian"      std: 0.01    }    bias_filler {      type: "constant"      value: 0.1    }  }}layer {  name: "relu2"  type: "ReLU"  bottom: "conv2"  top: "conv2"}layer {  name: "norm2"  type: "LRN"  bottom: "conv2"  top: "norm2"  lrn_param {    local_size: 5    alpha: 0.0001    beta: 0.75  }}layer {  name: "pool2"  type: "Pooling"  bottom: "norm2"  top: "pool2"  pooling_param {    pool: MAX    kernel_size: 3    stride: 2  }}layer {  name: "conv3"  type: "Convolution"  bottom: "pool2"  top: "conv3"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  convolution_param {    num_output: 384    pad: 1    kernel_size: 3    weight_filler {      type: "gaussian"      std: 0.01    }    bias_filler {      type: "constant"      value: 0    }  }}layer {  name: "relu3"  type: "ReLU"  bottom: "conv3"  top: "conv3"}layer {  name: "conv4"  type: "Convolution"  bottom: "conv3"  top: "conv4"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  convolution_param {    num_output: 384    pad: 1    kernel_size: 3    group: 2    weight_filler {      type: "gaussian"      std: 0.01    }    bias_filler {      type: "constant"      value: 0.1    }  }}layer {  name: "relu4"  type: "ReLU"  bottom: "conv4"  top: "conv4"}layer {  name: "conv5"  type: "Convolution"  bottom: "conv4"  top: "conv5"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  convolution_param {    num_output: 256    pad: 1    kernel_size: 3    group: 2    weight_filler {      type: "gaussian"      std: 0.01    }    bias_filler {      type: "constant"      value: 0.1    }  }}layer {  name: "relu5"  type: "ReLU"  bottom: "conv5"  top: "conv5"}layer {  name: "pool5"  type: "Pooling"  bottom: "conv5"  top: "pool5"  pooling_param {    pool: MAX    kernel_size: 3    stride: 2  }}layer {  name: "fc6"  type: "InnerProduct"  bottom: "pool5"  top: "fc6"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  inner_product_param {    num_output: 4096    weight_filler {      type: "gaussian"      std: 0.005    }    bias_filler {      type: "constant"      value: 0.1    }  }}layer {  name: "relu6"  type: "ReLU"  bottom: "fc6"  top: "fc6"}layer {  name: "drop6"  type: "Dropout"  bottom: "fc6"  top: "fc6"  dropout_param {    dropout_ratio: 0.5  }}layer {  name: "fc7"  type: "InnerProduct"  bottom: "fc6"  top: "fc7"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  inner_product_param {    num_output: 4096    weight_filler {      type: "gaussian"      std: 0.005    }    bias_filler {      type: "constant"      value: 0.1    }  }}layer {  name: "relu7"  type: "ReLU"  bottom: "fc7"  top: "fc7"}layer {  name: "drop7"  type: "Dropout"  bottom: "fc7"  top: "fc7"  dropout_param {    dropout_ratio: 0.5  }}layer {  name: "fc8"  type: "InnerProduct"  bottom: "fc7"  top: "fc8"  param {    lr_mult: 1    decay_mult: 1  }  param {    lr_mult: 2    decay_mult: 0  }  inner_product_param {    num_output: 1000    weight_filler {      type: "gaussian"      std: 0.01    }    bias_filler {      type: "constant"      value: 0    }  }}layer {  name: "accuracy"  type: "Accuracy"  bottom: "fc8"  bottom: "label"  top: "accuracy"  include {    phase: TEST  }}layer {  name: "loss"  type: "SoftmaxWithLoss"  bottom: "fc8"  bottom: "label"  top: "loss"}
这里要说明一点,在caffe/models/下还有一个叫做bvlc_reference_caffenet的文件夹,里面搭建了一个叫做caffenet的网络。其实比较一下两者的网络结构就可以发现,alexnet和caffenet几乎是同一个网络,只是caffenet将alexnet的poo1和norm1先后调换了位置,pool2和norm2调换了位置。至于为什么要贾扬清要做这么一改动,不是很清楚,有知道的同学可以留言,谢谢指教。还有就是一个查看net结构可视化的很好的工具见:net可视化工具:Netscope。

这都不是重点,本文地主要目的还是想通过比较LeNet和alexNet的差异来学习深度学习除了卷积之外的一些思想。

这是LeNet的网络结构(caffe/examples/mnist/lenet_train_test.prototxt)

-name: "LeNet"layer {  name: "mnist"  type: "Data"  top: "data"  top: "label"  include {    phase: TRAIN  }  transform_param {    scale: 0.00390625  }  data_param {    source: "examples/mnist/mnist_train_lmdb"    batch_size: 64    backend: LMDB  }}layer {  name: "mnist"  type: "Data"  top: "data"  top: "label"  include {    phase: TEST  }  transform_param {    scale: 0.00390625  }  data_param {    source: "examples/mnist/mnist_test_lmdb"    batch_size: 100    backend: LMDB  }}layer {  name: "conv1"  type: "Convolution"  bottom: "data"  top: "conv1"  param {    lr_mult: 1  }  param {    lr_mult: 2  }  convolution_param {    num_output: 20    kernel_size: 5    stride: 1    weight_filler {      type: "xavier"    }    bias_filler {      type: "constant"    }  }}layer {  name: "pool1"  type: "Pooling"  bottom: "conv1"  top: "pool1"  pooling_param {    pool: MAX    kernel_size: 2    stride: 2  }}layer {  name: "conv2"  type: "Convolution"  bottom: "pool1"  top: "conv2"  param {    lr_mult: 1  }  param {    lr_mult: 2  }  convolution_param {    num_output: 50    kernel_size: 5    stride: 1    weight_filler {      type: "xavier"    }    bias_filler {      type: "constant"    }  }}layer {  name: "pool2"  type: "Pooling"  bottom: "conv2"  top: "pool2"  pooling_param {    pool: MAX    kernel_size: 2    stride: 2  }}layer {  name: "ip1"  type: "InnerProduct"  bottom: "pool2"  top: "ip1"  param {    lr_mult: 1  }  param {    lr_mult: 2  }  inner_product_param {    num_output: 500    weight_filler {      type: "xavier"    }    bias_filler {      type: "constant"    }  }}layer {  name: "relu1"  type: "ReLU"  bottom: "ip1"  top: "ip1"}layer {  name: "ip2"  type: "InnerProduct"  bottom: "ip1"  top: "ip2"  param {    lr_mult: 1  }  param {    lr_mult: 2  }  inner_product_param {    num_output: 10    weight_filler {      type: "xavier"    }    bias_filler {      type: "constant"    }  }}layer {  name: "accuracy"  type: "Accuracy"  bottom: "ip2"  bottom: "label"  top: "accuracy"  include {    phase: TEST  }}layer {  name: "loss"  type: "SoftmaxWithLoss"  bottom: "ip2"  bottom: "label"  top: "loss"}






原创粉丝点击