【连载14】VGG、MSRANet和Highway Networks

VGG

在论文《Very Deep Convolutional Networks for Large-Scale Image Recognition》中提出,通过缩小卷积核大小来构建更深的网络。


网络结构
图中D和E分别为VGG-16和VGG-19,是文中两个效果最好的网络结构,VGG网络结构可以看做是AlexNet的加深版,VGG在图像检测中效果很好(如:Faster-RCNN),这种传统结构相对较好的保存了图片的局部位置信息(不像GoogLeNet中引入Inception可能导致位置信息的错乱)。
与AlexNet相比:
·  相同点
 · 整体结构分五层;
· 除softmax层外,最后几层为全连接层;
·五层之间通过max pooling连接。
·  不同点
· 使用3×3的小卷积核代替7×7大卷积核,网络构建的比较          深;
· 由于LRN太耗费计算资源,性价比不高,所以被去掉;
· 采用了更多的feature map,能够提取更多的特征,从而能      够做更多特征的组合。

VGG代码实践

VGG-16/VGG-19
使用CIFAR-100数据集,ps复杂网络在这种数据集上表现不好。
# -*- coding: utf-8 -*-
import copy
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import os
from matplotlib.pyplot import plot,savefig
from scipy.misc import toimage
from keras.datasets import cifar100,mnist
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.optimizers import SGD, RMSprop
from keras.utils import np_utils
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import tensorflow as tf
tf.python.control_flow_ops = tf
from PIL import Image
def data_visualize(x, y, num):
plt.figure()
for i in range(0, num*num):
axes=plt.subplot(num,num,i + 1)
axes.set_title("label=" + str(y[i]))
axes.set_xticks([0,10,20,30])
axes.set_yticks([0,10,20,30])
plt.imshow(toimage(x[i]))
plt.tight_layout()
plt.savefig('sample.jpg')
def build_VGG_16(s):
model = Sequential()
fm = 3
model.add(ZeroPadding2D((1,1),input_shape=s))
model.add(Convolution2D(64, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(100, activation='softmax'))
return model
def build_VGG_19(s):
model = Sequential()
fm = 3
model.add(ZeroPadding2D((1,1),input_shape=s))
model.add(Convolution2D(64, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, fm, fm, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(100, activation='softmax'))
return model
if __name__=="__main__":
from keras.utils.visualize_util import plot
with tf.device('/gpu:2'):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1, allow_growth=True)
os.environ["CUDA_VISIBLE_DEVICES"]="2"
tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True,
gpu_options=gpu_options))
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
data_visualize(X_train, y_train, 4)
s = X_train.shape[1:]
print (s)
model = build_VGG_16(s) #build_VGG_19(s)
model.summary()
plot(model, to_file="VGG.jpg", show_shapes=True)
#定义输入数据并做归一化
dim = 32
channel = 3
class_num = 100
X_train = X_train.reshape(X_train.shape[0], dim, dim, channel).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], dim, dim, channel).astype('float32') / 255
Y_train = np_utils.to_categorical(y_train, class_num)
Y_test = np_utils.to_categorical(y_test, class_num)
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=25, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
# training
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
batch_size = 32
nb_epoch = 10
#import pdb
#pdb.set_trace()
ModelCheckpoint("weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5", monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

MSRANet

该网络的亮点有两个:提出PReLU和一种鲁棒性强的参数初始化方法

PReLU

前面已经介绍过传统ReLU的一些缺点,PReLU是其中一种解决方案:
如何合理保留负向信息,一种方式是上图中值是可以不通过人为指定而自动学出来:
定义Parametric Rectifiers如下:
利用误差反向传播原理:
当采用动量法更新权重:
详情请阅读Kaiming He等人的《Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification》论文。

Highway Networks

Highway Networks在我看来是一种承上启下的结构,来源于论文《Highway Networks》借鉴了类似LSTM(后面会介绍)中门(gate)的思想,结构很通用(太通用的结构不一定是件好事儿),给出了一种建立更深网络的思路:
‍‍‍‍‍‍‍‍

1.机器学习原来这么有趣!【第一章】

2.机器学习原来这么有趣!【第二章】:用机器学习制作超级马里奥的关卡

3.机器学习从零开始系列连载(1)——基本概念

4.机器学习从零开始系列连载(2)——线性回归

5.机器学习从零开始系列连载(3)——支持向量机

6.机器学习从零开始系列连载(4)——逻辑回归

7.机器学习从零开始系列连载(5)——Bagging and Boosting框架

8.机器学习从零开始系列连载(6)—— Additive Tree 模型

记得把公号加星标,会第一时间收到通知。

创作不易,如果觉得有点用,希望可以随手转发或者”在看“,拜谢各位老铁

(0)

相关推荐