tensorflow学习系列(一)

来源:互联网 发布:linux重启oracle命令 编辑:程序博客网 时间:2024/05/21 14:46

kaggle上面的项目,猫狗大战
给一个大牛的github:https://github.com/kevin28520/My-TensorFlow-tutorials
这个博客里面主要介绍数据的构建,模型的构建以及网络的训练
下面直接上代码,下面解释
input_data.py

# coding=utf-8import tensorflow as tfimport numpy as npimport os#%%# you need to change this to your data directory#train_dir = '/home/ccf/Study/tensorflow/My-TensorFlow-tutorials-master/01_cats_vs_dogs/data/train/'def get_files(file_dir):    '''    Args:        file_dir: file directory    Returns:        list of images and labels    '''    cats = []    label_cats = []    dogs = []    label_dogs = []    for file in os.listdir(file_dir):        name = file.split('.')#这里file.split()函数内的参数py3和py2不同py3:name = file.split(seq='.');py2:name = file.split('.')        if name[0]=='cat':            cats.append(file_dir + file)            label_cats.append(0)        else:            dogs.append(file_dir + file)            label_dogs.append(1)    print('There are %d cats\nThere are %d dogs' %(len(cats), len(dogs)))    image_list = np.hstack((cats, dogs))    label_list = np.hstack((label_cats, label_dogs))    temp = np.array([image_list, label_list])    temp = temp.transpose()    np.random.shuffle(temp)    image_list = list(temp[:, 0])    label_list = list(temp[:, 1])    label_list = [int(i) for i in label_list]    return image_list, label_list#%%def get_batch(image, label, image_W, image_H, batch_size, capacity):#capacity队列中最大容纳数据的个数    '''    Args:        image: list type        label: list type        image_W: image width        image_H: image height        batch_size: batch size        capacity: the maximum elements in queue    Returns:        image_batch: 4D tensor [batch_size, width, height, 3], dtype=tf.float32        label_batch: 1D tensor [batch_size], dtype=tf.int32    '''    image = tf.cast(image, tf.string)    label = tf.cast(label, tf.int32)    # make an input queue    input_queue = tf.train.slice_input_producer([image, label])    label = input_queue[1]    image_contents = tf.read_file(input_queue[0])    image = tf.image.decode_jpeg(image_contents, channels=3)    ######################################    # data argumentation should go to here    ######################################    image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)    # if you want to test the generated batches of images, you might want to comment the following line.    image = tf.image.per_image_standardization(image)    image_batch, label_batch = tf.train.batch([image, label],                                                batch_size= batch_size,                                                num_threads= 64,                                                 capacity = capacity)    #you can also use shuffle_batch #    image_batch, label_batch = tf.train.shuffle_batch([image,label],#                                                      batch_size=BATCH_SIZE,#                                                      num_threads=64,#                                                      capacity=CAPACITY,#                                                      min_after_dequeue=CAPACITY-1)    label_batch = tf.reshape(label_batch, [batch_size])    image_batch = tf.cast(image_batch, tf.float32)    return image_batch, label_batch#%% TEST# To test the generated batches of images# When training the model, DO comment the following codes# import matplotlib.pyplot as plt## BATCH_SIZE = 2# CAPACITY = 256# IMG_W = 208# IMG_H = 208## train_dir = '/home/ccf/Study/tensorflow/My-TensorFlow-tutorials-master/01_cats_vs_dogs/data/train/'## image_list, label_list = get_files(train_dir)# image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)## with tf.Session() as sess:#    i = 0#    coord = tf.train.Coordinator()#    threads = tf.train.start_queue_runners(coord=coord)##    try:#        while not coord.should_stop() and i<1:##            img, label = sess.run([image_batch, label_batch])##            # just test one batch#            for j in np.arange(BATCH_SIZE):#                print('label: %d' %label[j])#                plt.imshow(img[j,:,:,:])#                plt.show()#            i+=1##    except tf.errors.OutOfRangeError:#        print('done!')#    finally:#        coord.request_stop()#    coord.join(threads)##%%

model.py

import tensorflow as tf#%%def inference(images, batch_size, n_classes):    '''Build the model    Args:        images: image batch, 4D tensor, tf.float32, [batch_size, width, height, channels]    Returns:        output tensor with the computed logits, float, [batch_size, n_classes]    '''    #conv1, shape = [kernel size, kernel size, channels, kernel numbers]    with tf.variable_scope('conv1') as scope:        weights = tf.get_variable('weights',                                   shape = [3,3,3, 16],                                  dtype = tf.float32,                                   initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))        biases = tf.get_variable('biases',                                  shape=[16],                                 dtype=tf.float32,                                 initializer=tf.constant_initializer(0.1))        conv = tf.nn.conv2d(images, weights, strides=[1,1,1,1], padding='SAME')        pre_activation = tf.nn.bias_add(conv, biases)        conv1 = tf.nn.relu(pre_activation, name= scope.name)    #pool1 and norm1       with tf.variable_scope('pooling1_lrn') as scope:        pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1],strides=[1,2,2,1],                               padding='SAME', name='pooling1')        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,                          beta=0.75,name='norm1')    #conv2    with tf.variable_scope('conv2') as scope:        weights = tf.get_variable('weights',                                  shape=[3,3,16,16],                                  dtype=tf.float32,                                  initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))        biases = tf.get_variable('biases',                                 shape=[16],                                  dtype=tf.float32,                                 initializer=tf.constant_initializer(0.1))        conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')        pre_activation = tf.nn.bias_add(conv, biases)        conv2 = tf.nn.relu(pre_activation, name='conv2')    #pool2 and norm2    with tf.variable_scope('pooling2_lrn') as scope:        norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,                          beta=0.75,name='norm2')        pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,1,1,1],                               padding='SAME',name='pooling2')    #local3    with tf.variable_scope('local3') as scope:        reshape = tf.reshape(pool2, shape=[batch_size, -1])        dim = reshape.get_shape()[1].value        weights = tf.get_variable('weights',                                  shape=[dim,128],                                  dtype=tf.float32,                                  initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))        biases = tf.get_variable('biases',                                 shape=[128],                                 dtype=tf.float32,                                  initializer=tf.constant_initializer(0.1))        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)        #local4    with tf.variable_scope('local4') as scope:        weights = tf.get_variable('weights',                                  shape=[128,128],                                  dtype=tf.float32,                                   initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))        biases = tf.get_variable('biases',                                 shape=[128],                                 dtype=tf.float32,                                 initializer=tf.constant_initializer(0.1))        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')    # softmax    with tf.variable_scope('softmax_linear') as scope:        weights = tf.get_variable('softmax_linear',                                  shape=[128, n_classes],                                  dtype=tf.float32,                                  initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))        biases = tf.get_variable('biases',                                  shape=[n_classes],                                 dtype=tf.float32,                                  initializer=tf.constant_initializer(0.1))        softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')    return softmax_linear#%%def losses(logits, labels):    '''Compute loss from logits and labels    Args:        logits: logits tensor, float, [batch_size, n_classes]        labels: label tensor, tf.int32, [batch_size]    Returns:        loss tensor of float type    '''    with tf.variable_scope('loss') as scope:        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits\                        (logits=logits, labels=labels, name='xentropy_per_example')        loss = tf.reduce_mean(cross_entropy, name='loss')        tf.summary.scalar(scope.name+'/loss', loss)    return loss#%%def trainning(loss, learning_rate):    '''Training ops, the Op returned by this function is what must be passed to         'sess.run()' call to cause the model to train.    Args:        loss: loss tensor, from losses()    Returns:        train_op: The op for trainning    '''    with tf.name_scope('optimizer'):        optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)        global_step = tf.Variable(0, name='global_step', trainable=False)        train_op = optimizer.minimize(loss, global_step= global_step)    return train_op#%%def evaluation(logits, labels):  """Evaluate the quality of the logits at predicting the label.  Args:    logits: Logits tensor, float - [batch_size, NUM_CLASSES].    labels: Labels tensor, int32 - [batch_size], with values in the      range [0, NUM_CLASSES).  Returns:    A scalar int32 tensor with the number of examples (out of batch_size)    that were predicted correctly.  """  with tf.variable_scope('accuracy') as scope:      correct = tf.nn.in_top_k(logits, labels, 1)      correct = tf.cast(correct, tf.float16)      accuracy = tf.reduce_mean(correct)      tf.summary.scalar(scope.name+'/accuracy', accuracy)  return accuracy#%%

training.py

#%%import osimport numpy as npimport tensorflow as tfimport input_dataimport model#%%N_CLASSES = 2IMG_W = 208  # resize the image, if the input image is too large, training will be very slow.IMG_H = 208BATCH_SIZE = 64CAPACITY = 2000MAX_STEP = 15000 # with current parameters, it is suggested to use MAX_STEP>10klearning_rate = 0.0001 # with current parameters, it is suggested to use learning rate<0.0001#%%def run_training():    # you need to change the directories to yours.    train_dir = '/home/ccf/Study/tensorflow/My-TensorFlow-tutorials-master/01_cats_vs_dogs/data/train/'    logs_train_dir = '/home/ccf/Study/tensorflow/My-TensorFlow-tutorials-master/01_cats_vs_dogs/logs/train/'    train, train_label = input_data.get_files(train_dir)    train_batch, train_label_batch = input_data.get_batch(train,                                                          train_label,                                                          IMG_W,                                                          IMG_H,                                                          BATCH_SIZE,                                                          CAPACITY)    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)    train_loss = model.losses(train_logits, train_label_batch)    train_op = model.trainning(train_loss, learning_rate)    train__acc = model.evaluation(train_logits, train_label_batch)    summary_op = tf.summary.merge_all()    sess = tf.Session()    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)    saver = tf.train.Saver()    sess.run(tf.global_variables_initializer())    coord = tf.train.Coordinator()    threads = tf.train.start_queue_runners(sess=sess, coord=coord)    try:        for step in np.arange(MAX_STEP):            if coord.should_stop():                    break            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])            if step % 50 == 0:                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))                summary_str = sess.run(summary_op)                train_writer.add_summary(summary_str, step)            if step % 2000 == 0 or (step + 1) == MAX_STEP:                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')                saver.save(sess, checkpoint_path, global_step=step)    except tf.errors.OutOfRangeError:        print('Done training -- epoch limit reached')    finally:        coord.request_stop()    coord.join(threads)    sess.close()run_training()#%% Evaluate one image# when training, comment the following codes.## from PIL import Image# import matplotlib.pyplot as plt## def get_one_image(train):#    '''Randomly pick one image from training data#    Return: ndarray#    '''#    n = len(train)#    ind = np.random.randint(0, n)#    img_dir = train[ind]##    image = Image.open(img_dir)#    plt.imshow(image)#    image = image.resize([208, 208])#    image = np.array(image)#    return image## def evaluate_one_image():#    '''Test one image against the saved models and parameters#    '''##    # you need to change the directories to yours.#    train_dir = '/home/ccf/Study/tensorflow/My-TensorFlow-tutorials-master/01_cats_vs_dogs/data/train/'#    train, train_label = input_data.get_files(train_dir)#    image_array = get_one_image(train)##    with tf.Graph().as_default():#        BATCH_SIZE = 1#        N_CLASSES = 2##        image = tf.cast(image_array, tf.float32)#        image = tf.image.per_image_standardization(image)#        image = tf.reshape(image, [1, 208, 208, 3])#        logit = model.inference(image, BATCH_SIZE, N_CLASSES)##        logit = tf.nn.softmax(logit)##        x = tf.placeholder(tf.float32, shape=[208, 208, 3])##        # you need to change the directories to yours.#        logs_train_dir = '/home/ccf/Study/tensorflow/My-TensorFlow-tutorials-master/01_cats_vs_dogs/logs/train/'##        saver = tf.train.Saver()##        with tf.Session() as sess:##            print("Reading checkpoints...")#            ckpt = tf.train.get_checkpoint_state(logs_train_dir)#            if ckpt and ckpt.model_checkpoint_path:#                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]#                saver.restore(sess, ckpt.model_checkpoint_path)#                print('Loading success, global_step is %s' % global_step)#            else:#                print('No checkpoint file found')##            prediction = sess.run(logit, feed_dict={x: image_array})#            max_index = np.argmax(prediction)#            if max_index==0:#                print('This is a cat with possibility %.6f' %prediction[:, 0])#            else:#                print('This is a dog with possibility %.6f' %prediction[:, 1])## evaluate_one_image()#%%

上面给的大牛的链接,里面也有代码,同时里面还有讲解视频,大家可以参考
对于大牛的视频以及代码我要说明一些问题:
(1)大牛用的是Python3的版本,对于使用Python2版本的小伙伴们就好奔溃了,Python3和Python2版本的差异,使用Python2跑作者的程序就没有用喽,会出错,然而我使用的就是python2,进过修改是可以跑的,大家可以借鉴;
(2)使用的调试软件,我用的pycharm,用我修改的代码大家可以直接操作,大牛用的spyder大家可以尝试使用,我也用了试了是可以的。

原创粉丝点击