吴恩达 深度学习 1-4 课后作业2 Deep Neural Network for Image Classification: Application
来源:互联网 发布:Linux check_match 编辑:程序博客网 时间:2024/05/20 07:17
In [1]:
import timeimport numpy as npimport h5pyimport matplotlib.pyplot as pltimport scipyfrom PIL import Imagefrom scipy import ndimagefrom dnn_app_utils_v2 import *%matplotlib inlineplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plotsplt.rcParams['image.interpolation'] = 'nearest'plt.rcParams['image.cmap'] = 'gray'%load_ext autoreload%autoreload 2np.random.seed(1)
In [2]:
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
In [6]:
# Example of a pictureindex = 2plt.imshow(train_x_orig[index])print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
In [8]:
# Explore your dataset print(train_x_orig.shape)print(train_y.shape)print(test_x_orig.shape)print(test_y.shape)m_train = train_x_orig.shape[0]num_px = train_x_orig.shape[1]m_test = test_x_orig.shape[0]print ("Number of training examples: " + str(m_train))print ("Number of testing examples: " + str(m_test))print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")print ("train_x_orig shape: " + str(train_x_orig.shape))print ("train_y shape: " + str(train_y.shape))print ("test_x_orig shape: " + str(test_x_orig.shape))print ("test_y shape: " + str(test_y.shape))
In [16]:
# Reshape the training and test examples train_x_flatten = train_x_orig.reshape((train_x_orig.shape[0],-1)).T # The "-1" makes reshape flatten the remaining dimensionstest_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T# Standardize data to have feature values between 0 and 1.train_x = train_x_flatten/255.test_x = test_x_flatten/255.print ("train_x's shape: " + str(train_x.shape))print ("test_x's shape: " + str(test_x.shape))
In [17]:
### CONSTANTS DEFINING THE MODEL ####n_x = 12288 # num_px * num_px * 3n_h = 7n_y = 1layers_dims = (n_x, n_h, n_y)
In [18]:
# GRADED FUNCTION: two_layer_modeldef two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False): """ Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID. Arguments: X -- input data, of shape (n_x, number of examples) Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) layers_dims -- dimensions of the layers (n_x, n_h, n_y) num_iterations -- number of iterations of the optimization loop learning_rate -- learning rate of the gradient descent update rule print_cost -- If set to True, this will print the cost every 100 iterations Returns: parameters -- a dictionary containing W1, W2, b1, and b2 """ np.random.seed(1) grads = {} costs = [] # to keep track of the cost m = X.shape[1] # number of examples (n_x, n_h, n_y) = layers_dims # Initialize parameters dictionary, by calling one of the functions you'd previously implemented ### START CODE HERE ### (≈ 1 line of code) parameters = initialize_parameters(n_x, n_h, n_y) ### END CODE HERE ### # Get W1, b1, W2 and b2 from the dictionary parameters. W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1". Output: "A1, cache1, A2, cache2". ### START CODE HERE ### (≈ 2 lines of code) A1, cache1 = linear_activation_forward(X, W1, b1, "relu") A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid") ### END CODE HERE ### # Compute cost ### START CODE HERE ### (≈ 1 line of code) cost = compute_cost(A2, Y) ### END CODE HERE ### # Initializing backward propagation dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2)) # Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1". ### START CODE HERE ### (≈ 2 lines of code) dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid") dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu") ### END CODE HERE ### # Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2 grads['dW1'] = dW1 grads['db1'] = db1 grads['dW2'] = dW2 grads['db2'] = db2 # Update parameters. ### START CODE HERE ### (approx. 1 line of code) parameters = update_parameters(parameters, grads, learning_rate) ### END CODE HERE ### # Retrieve W1, b1, W2, b2 from parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] # Print the cost every 100 training example if print_cost and i % 100 == 0: print("Cost after iteration {}: {}".format(i, np.squeeze(cost))) if print_cost and i % 100 == 0: costs.append(cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters
In [25]:
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
In [26]:
predictions_train = predict(train_x, train_y, parameters)
In [27]:
predictions_test = predict(test_x, test_y, parameters)
In [36]:
### CONSTANTS ###layers_dims = [12288, 20, 7, 5, 1] # 5-layer model
In [37]:
# GRADED FUNCTION: L_layer_modeldef L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009 """ Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID. Arguments: X -- data, numpy array of shape (number of examples, num_px * num_px * 3) Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) layers_dims -- list containing the input size and each layer size, of length (number of layers + 1). learning_rate -- learning rate of the gradient descent update rule num_iterations -- number of iterations of the optimization loop print_cost -- if True, it prints the cost every 100 steps Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(1) costs = [] # keep track of cost # Parameters initialization. ### START CODE HERE ### parameters = initialize_parameters_deep(layers_dims) ### END CODE HERE ### # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID. ### START CODE HERE ### (≈ 1 line of code) AL, caches = L_model_forward(X, parameters) ### END CODE HERE ### # Compute cost. ### START CODE HERE ### (≈ 1 line of code) cost = compute_cost(AL, Y) ### END CODE HERE ### # Backward propagation. ### START CODE HERE ### (≈ 1 line of code) grads = L_model_backward(AL, Y, caches) ### END CODE HERE ### # Update parameters. ### START CODE HERE ### (≈ 1 line of code) parameters = update_parameters(parameters, grads, learning_rate) ### END CODE HERE ### # Print the cost every 100 training example if print_cost and i % 100 == 0: print ("Cost after iteration %i: %f" %(i, cost)) if print_cost and i % 100 == 0: costs.append(cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters
In [38]:
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
In [40]:
pred_train = predict(train_x, train_y, parameters)
In [41]:
pred_test = predict(test_x, test_y, parameters)
In [42]:
print_mislabeled_images(classes, test_x, test_y, pred_test)
In [43]:
## START CODE HERE ##my_image = "my_image.jpg" # change this to the name of your image file my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)## END CODE HERE ##fname = "images/" + my_imageimage = np.array(ndimage.imread(fname, flatten=False))my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))my_predicted_image = predict(my_image, my_label_y, parameters)plt.imshow(image)print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
阅读全文
0 0
- 吴恩达 深度学习 1-4 课后作业2 Deep Neural Network for Image Classification: Application
- 吴恩达 深度学习 1-4 课后作业2 Deep Neural Network for Image Classification: Application
- Deep Neural Network for Image Classification: Application吴恩达老师第一课第四周作业2
- Deep Neural Network for Image Classification:Application
- Andrew Ng 深度学习课程Deeplearning.ai 编程作业——deep Neural network for image classification(1-4.2)
- 第四周编程作业(二)-Deep Neural Network for Image Classification: Application
- 吴恩达深度学习1-4课后作业1 Building your Deep Neural Network: Step by Step
- 吴恩达深度学习1-4课后作业1 Building your Deep Neural Network: Step by Step
- 深度学习 吴恩达 课后作业1-2 Logistic Regression with a Neural Network mindset
- 深度学习研究理解4:ImageNet Classification with Deep Convolutional Neural Network
- 吴恩达 神经网络和深度学习 第一部分课程 第四章课后习题 Deep Neural Network
- [深度学习论文笔记][Image Classification] ImageNet Classification with Deep Convolutional Neural Networks
- HD-CNN: HIERARCHICAL DEEP CONVOLUTIONAL NEURAL NETWORK FOR IMAGE CLASSIFICATION(泛读)
- A new deep convolutional neural network for fast hyperspectral image classification Review
- 用matlab训练数字分类的深度神经网络Training a Deep Neural Network for Digit Classification
- 多柱深度神经网络——Multi-column Deep Neural Networks for Image Classification
- 【论文笔记】Image Classification with Deep Convolutional Neural Network
- Deep Convolutional Neural Network for Image Deconvolution
- 1.Webpack的理解与使用
- 函数节流与防抖
- 同步工具Exchanger
- java中try....catch的使用原则
- DNS协议详解及报文格式分析
- 吴恩达 深度学习 1-4 课后作业2 Deep Neural Network for Image Classification: Application
- 解决You don't have permission to access问题
- Android 签名相关
- Presto-[8]-Presto Administration-web interface
- 基于SOA模式构建maven工程
- Android_添加购物车
- 库函数——字符串
- Apache用户认证
- leetcode 406. Queue Reconstruction by Height