tensorflow从入门到放弃---第三篇

来源:互联网 发布:gta5 日式女孩捏脸数据 编辑:程序博客网 时间:2024/05/15 08:11

对VGG卷积神经网络的特征图可视化

import scipy.io

import numpy as np 
import os 
import scipy.misc 
import matplotlib.pyplot as plt 

import tensorflow as tf

#定义卷积层

def _conv_layer(input, weights, bias):
    conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
            padding='SAME')
    return tf.nn.bias_add(conv, bias)
#定义池化层

def _pool_layer(input):
    return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
            padding='SAME')

#对图像进行去均值操作
def preprocess(image, mean_pixel):
    return image - mean_pixel

def unprocess(image, mean_pixel):
    return image + mean_pixel

#读取图像的路径
def imread(path):
    return scipy.misc.imread(path).astype(np.float)
#保存图像

def imsave(path, img):
    img = np.clip(img, 0, 255).astype(np.uint8)
    scipy.misc.imsave(path, img)
print ("Functions for VGG ready")

#定义网络

def net(data_path, input_image):

    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    #获取VGG网络的mean_pixel
    data = scipy.io.loadmat(data_path)
    mean = data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    #获取VGG网络的weights
    weights = data['layers'][0]


    #定义net的字典结构,每一层的输出结果
    net = {}
    current = input_image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]

            #将.mat格式转换成tensorflow支持的格式
            kernels = np.transpose(kernels, (1, 0, 2, 3))
            bias = bias.reshape(-1)
            current = _conv_layer(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current)
        elif kind == 'pool':
            current = _pool_layer(current)

        #net中的name表示的是每一层的名字,它的值是current表示每一个层前向传播的输出结果
        net[name] = current
    assert len(net) == len(layers)
    return net, mean_pixel, layers  
print ("Network for VGG ready")


#读取当前路径

cwd  = os.getcwd()

#读取当前的VGG模型,这个模型存储的是神经网络的很多w和b
VGG_PATH = cwd + "/data/imagenet-vgg-verydeep-19.mat"
IMG_PATH = cwd + "/data/cat.jpg"
input_image = imread(IMG_PATH)
shape = (1,input_image.shape[0],input_image.shape[1],input_image.shape[2]) 
with tf.Session() as sess:
    image = tf.placeholder('float', shape=shape)

    #返回nets,mean_pixel,all_layers
    nets, mean_pixel, all_layers = net(VGG_PATH, image)
    input_image_pre = np.array([preprocess(input_image, mean_pixel)])
    layers = all_layers # For all layers 


    # layers = ('relu2_1', 'relu3_1', 'relu4_1')
    for i, layer in enumerate(layers):
        print ("[%d/%d] %s" % (i+1, len(layers), layer))

        #features表示特征图
        features = nets[layer].eval(feed_dict={image: input_image_pre})
        print (" Type of 'features' is ", type(features))
        print (" Shape of 'features' is %s" % (features.shape,))
        # Plot response 
        if 1:
            plt.figure(i+1, figsize=(10, 5))
            plt.matshow(features[0, :, :, 0], cmap=plt.cm.gray, fignum=i+1)
            plt.title("" + layer)
            plt.colorbar()
            plt.show()

测试结果:

输入:


输出:

经过第一个卷积层输出的特征图:



经过第一个RELU层输出的特征图:



经过第三个卷积层之后的特征图:



0 0