CS231n Assignment2--Fully-connected Neural Network
来源:互联网 发布:linux查看剩余磁盘空间 编辑:程序博客网 时间:2024/05/21 19:28
课程网址:http://cs231n.github.io/assignments2016/assignment2/
主要目的是保存一下一个比较完整的全连接神经网络代码,不带说明了,代码说明也比较详细。
dataset.py
# -*- coding: utf-8 -*-import numpy as npdef unpickle(file): import cPickle fo = open(file, 'rb') dict = cPickle.load(fo) fo.close() return dict#load dataset of cifar10def load_CIFAR10(cifar10_dir): #get the training data X_train = [] y_train = [] for i in range(1,6): dic = unpickle(cifar10_dir+"\\data_batch_"+str(i)) for item in dic["data"]: X_train.append(item) for item in dic["labels"]: y_train.append(item) #get test data X_test = [] y_test = [] #do not know why the path is not just right as above,add a extra\ dic = unpickle(cifar10_dir+"\\test_batch") for item in dic["data"]: X_test.append(item) for item in dic["labels"]: y_test.append(item) X_train = np.asarray(X_train) y_train = np.asarray(y_train) X_test = np.asarray(X_test) y_test = np.array(y_test) return X_train, y_train, X_test, y_testdef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the linear classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'E:\python\cs231n\cifar-10-batches-py' # make a change X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] # (1000,32,32,3) y_val = y_train[mask] # (1000L,) mask = range(num_training) X_train = X_train[mask] # (49000,32,32,3) y_train = y_train[mask] # (49000L,) mask = range(num_test) X_test = X_test[mask] # (1000,32,32,3) y_test = y_test[mask] # (1000L,) # preprocessing: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # Reshape data to rows X_train = X_train.reshape(num_training, -1) # (49000,3072) X_val = X_val.reshape(num_validation, -1) # (1000,3072) X_test = X_test.reshape(num_test, -1) # (1000,3072) data = {} data['X_train'] = X_train data['y_train'] = y_train data['X_val'] = X_val data['y_val'] = y_val data['X_test'] = X_test data['y_test'] = y_test return data
# -*- coding: utf-8 -*-__coauthor__ = 'Andrew'# 2.25.2017 #import numpy as npimport layersclass TwoLayerNet(object): """ A two-layer fully-connected neural network with ReLU nonlinearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecure should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays. """ def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0): """ Initialize a new network. Inputs: - input_dim: An integer giving the size of the input - hidden_dim: An integer giving the size of the hidden layer - num_classes: An integer giving the number of classes to classify - dropout: Scalar between 0 and 1 giving dropout strength. - weight_scale: Scalar giving the standard deviation for random initialization of the weights. - reg: Scalar giving L2 regularization strength. """ self.params = {} self.reg = reg self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim) self.params['b1'] = np.zeros((1, hidden_dim)) self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes) self.params['b2'] = np.zeros((1, num_classes)) def loss(self, X, y=None): """ Compute loss and gradient for a minibatch of data. Inputs: - X: Array of input data of shape (N, d_1, ..., d_k) - y: Array of labels, of shape (N,). y[i] gives the label for X[i]. Returns: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters. """ scores = None N = X.shape[0] # Unpack variables from the params dictionary W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] h1, cache1 = layers.affine_forward(X, W1, b1) h1, cacheh1 = layers.relu_forward(h1) out, cache2 = layers.affine_forward(h1, W2, b2) scores = out # (N,C) # If y is None then we are in test mode so just return scores if y is None: return scores loss, grads = 0, {} data_loss, dscores = layers.softmax_loss(scores, y) reg_loss = 0.5 * self.reg * np.sum(W1*W1) + 0.5 * self.reg * np.sum(W2*W2) loss = data_loss + reg_loss # Backward pass: compute gradients dh1, dW2, db2 = layers.affine_backward(dscores, cache2) dh1 = layers.relu_backward(dh1,cacheh1) dX, dW1, db1 = layers.affine_backward(dh1, cache1) # Add the regularization gradient contribution dW2 += self.reg * W2 dW1 += self.reg * W1 grads['W1'] = dW1 grads['b1'] = db1 grads['W2'] = dW2 grads['b2'] = db2 return loss, grads
# -*- coding: utf-8 -*-__coauthor__ = 'Andrew'# 2 .26.2017import numpy as npdef affine_forward(x, w, b): """ Computes the forward pass for an affine (fully-connected) layer. The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N examples, where each example x[i] has shape (d_1, ..., d_k). We will reshape each input into a vector of dimension D = d_1 * ... * d_k, and then transform it to an output vector of dimension M. Inputs: - x: A numpy array containing input data, of shape (N, d_1, ..., d_k) - w: A numpy array of weights, of shape (D, M) - b: A numpy array of biases, of shape (M,) Returns a tuple of: - out: output, of shape (N, M) - cache: (x, w, b) """ out = None # Reshape x into rows N = x.shape[0] x_row = x.reshape(N, -1) # (N,D) out = np.dot(x_row, w) + b # (N,M) cache = (x, w, b) return out, cache def affine_backward(dout, cache): """ Computes the backward pass for an affine layer. Inputs: - dout: Upstream derivative, of shape (N, M) - cache: Tuple of: - x: Input data, of shape (N, d_1, ... d_k) - w: Weights, of shape (D, M) Returns a tuple of: - dx: Gradient with respect to x, of shape (N, d1, ..., d_k) - dw: Gradient with respect to w, of shape (D, M) - db: Gradient with respect to b, of shape (M,) """ x, w, b = cache dx, dw, db = None, None, None dx = np.dot(dout, w.T) # (N,D) dx = np.reshape(dx, x.shape) # (N,d1,...,d_k) x_row = x.reshape(x.shape[0], -1) # (N,D) dw = np.dot(x_row.T, dout) # (D,M) db = np.sum(dout, axis=0, keepdims=True) # (1,M) return dx, dw, dbdef relu_forward(x): """ Computes the forward pass for a layer of rectified linear units (ReLUs). Input: - x: Inputs, of any shape Returns a tuple of: - out: Output, of the same shape as x - cache: x """ out = None out = ReLU(x) cache = x return out, cachedef relu_backward(dout, cache): """ Computes the backward pass for a layer of rectified linear units (ReLUs). Input: - dout: Upstream derivatives, of any shape - cache: Input x, of same shape as dout Returns: - dx: Gradient with respect to x """ dx, x = None, cache dx = dout dx[x <= 0] = 0 return dxdef svm_loss(x, y): """ Computes the loss and gradient using for multiclass SVM classification. Inputs: - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class for the ith input. - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and 0 <= y[i] < C Returns a tuple of: - loss: Scalar giving the loss - dx: Gradient of the loss with respect to x """ N = x.shape[0] correct_class_scores = x[np.arange(N), y] margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0) margins[np.arange(N), y] = 0 loss = np.sum(margins) / N num_pos = np.sum(margins > 0, axis=1) dx = np.zeros_like(x) dx[margins > 0] = 1 dx[np.arange(N), y] -= num_pos dx /= N return loss, dxdef softmax_loss(x, y): """ Computes the loss and gradient for softmax classification. Inputs: - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class for the ith input. - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and 0 <= y[i] < C Returns a tuple of: - loss: Scalar giving the loss - dx: Gradient of the loss with respect to x """ probs = np.exp(x - np.max(x, axis=1, keepdims=True)) probs /= np.sum(probs, axis=1, keepdims=True) N = x.shape[0] loss = -np.sum(np.log(probs[np.arange(N), y])) / N dx = probs.copy() dx[np.arange(N), y] -= 1 dx /= N return loss, dxdef ReLU(x): """ReLU non-linearity.""" return np.maximum(0, x)
# -*- coding: utf-8 -*-__coauthor__ = 'Andrew'# 2 .26.2017import numpy as npdef sgd(w, dw, config=None): """ Performs vanilla stochastic gradient descent. config format: - learning_rate: Scalar learning rate. """ if config is None: config = {} config.setdefault('learning_rate', 1e-2) w -= config['learning_rate'] * dw return w, configdef sgd_momentum(w, dw, config=None): """ Performs stochastic gradient descent with momentum. config format: - learning_rate: Scalar learning rate. - momentum: Scalar between 0 and 1 giving the momentum value. Setting momentum = 0 reduces to sgd. - velocity: A numpy array of the same shape as w and dw used to store a moving average of the gradients. """ if config is None: config = {} config.setdefault('learning_rate', 1e-2) config.setdefault('momentum', 0.9) v = config.get('velocity', np.zeros_like(w)) next_w = None v = config['momentum'] * v - config['learning_rate'] * dw next_w = w + v config['velocity'] = v return next_w, configdef rmsprop(x, dx, config=None): """ Uses the RMSProp update rule, which uses a moving average of squared gradient values to set adaptive per-parameter learning rates. config format: - learning_rate: Scalar learning rate. - decay_rate: Scalar between 0 and 1 giving the decay rate for the squared gradient cache. - epsilon: Small scalar used for smoothing to avoid dividing by zero. - cache: Moving average of second moments of gradients. """ if config is None: config = {} config.setdefault('learning_rate', 1e-2) config.setdefault('decay_rate', 0.99) config.setdefault('epsilon', 1e-8) config.setdefault('cache', np.zeros_like(x)) next_x = None cache = config['cache'] decay_rate = config['decay_rate'] learning_rate = config['learning_rate'] epsilon = config['epsilon'] cache = decay_rate * cache + (1 - decay_rate) * (dx**2) x += - learning_rate * dx / (np.sqrt(cache) + epsilon) config['cache'] = cache next_x = x return next_x, configdef adam(x, dx, config=None): """ Uses the Adam update rule, which incorporates moving averages of both the gradient and its square and a bias correction term. config format: - learning_rate: Scalar learning rate. - beta1: Decay rate for moving average of first moment of gradient. - beta2: Decay rate for moving average of second moment of gradient. - epsilon: Small scalar used for smoothing to avoid dividing by zero. - m: Moving average of gradient. - v: Moving average of squared gradient. - t: Iteration number. """ if config is None: config = {} config.setdefault('learning_rate', 1e-3) config.setdefault('beta1', 0.9) config.setdefault('beta2', 0.999) config.setdefault('epsilon', 1e-8) config.setdefault('m', np.zeros_like(x)) config.setdefault('v', np.zeros_like(x)) config.setdefault('t', 0) next_x = None m = config['m'] v = config['v'] beta1 = config['beta1'] beta2 = config['beta2'] learning_rate = config['learning_rate'] epsilon = config['epsilon'] t = config['t'] t += 1 m = beta1 * m + (1 - beta1) * dx v = beta2 * v + (1 - beta2) * (dx**2) m_bias = m / (1 - beta1**t) v_bias = v / (1 - beta2**t) x += - learning_rate * m_bias / (np.sqrt(v_bias) + epsilon) next_x = x config['m'] = m config['v'] = v config['t'] = t return next_x, config
solver.py
# -*- coding: utf-8 -*-import numpy as npimport optimclass Solver(object): """ A Solver encapsulates all the logic necessary for training classification models. The Solver performs stochastic gradient descent using different update rules defined in optim.py. The solver accepts both training and validataion data and labels so it can periodically check classification accuracy on both training and validation data to watch out for overfitting. To train a model, you will first construct a Solver instance, passing the model, dataset, and various optoins (learning rate, batch size, etc) to the constructor. You will then call the train() method to run the optimization procedure and train the model. After the train() method returns, model.params will contain the parameters that performed best on the validation set over the course of training. In addition, the instance variable solver.loss_history will contain a list of all losses encountered during training and the instance variables solver.train_acc_history and solver.val_acc_history will be lists containing the accuracies of the model on the training and validation set at each epoch. Example usage might look something like this: data = { 'X_train': # training data 'y_train': # training labels 'X_val': # validation data 'X_train': # validation labels } model = MyAwesomeModel(hidden_size=100, reg=10) solver = Solver(model, data, update_rule='sgd', optim_config={ 'learning_rate': 1e-3, }, lr_decay=0.95, num_epochs=10, batch_size=100, print_every=100) solver.train() A Solver works on a model object that must conform to the following API: - model.params must be a dictionary mapping string parameter names to numpy arrays containing parameter values. - model.loss(X, y) must be a function that computes training-time loss and gradients, and test-time classification scores, with the following inputs and outputs: Inputs: - X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k) - y: Array of labels, of shape (N,) giving labels for X where y[i] is the label for X[i]. Returns: If y is None, run a test-time forward pass and return: - scores: Array of shape (N, C) giving classification scores for X where scores[i, c] gives the score of class c for X[i]. If y is not None, run a training time forward and backward pass and return a tuple of: - loss: Scalar giving the loss - grads: Dictionary with the same keys as self.params mapping parameter names to gradients of the loss with respect to those parameters. """ def __init__(self, model, data, **kwargs): """ Construct a new Solver instance. Required arguments: - model: A model object conforming to the API described above - data: A dictionary of training and validation data with the following: 'X_train': Array of shape (N_train, d_1, ..., d_k) giving training images 'X_val': Array of shape (N_val, d_1, ..., d_k) giving validation images 'y_train': Array of shape (N_train,) giving labels for training images 'y_val': Array of shape (N_val,) giving labels for validation images Optional arguments: - update_rule: A string giving the name of an update rule in optim.py. Default is 'sgd'. - optim_config: A dictionary containing hyperparameters that will be passed to the chosen update rule. Each update rule requires different hyperparameters (see optim.py) but all update rules require a 'learning_rate' parameter so that should always be present. - lr_decay: A scalar for learning rate decay; after each epoch the learning rate is multiplied by this value. - batch_size: Size of minibatches used to compute loss and gradient during training. - num_epochs: The number of epochs to run for during training. - print_every: Integer; training losses will be printed every print_every iterations. - verbose: Boolean; if set to false then no output will be printed during training. """ self.model = model self.X_train = data['X_train'] self.y_train = data['y_train'] self.X_val = data['X_val'] self.y_val = data['y_val'] # Unpack keyword arguments self.update_rule = kwargs.pop('update_rule', 'sgd') self.optim_config = kwargs.pop('optim_config', {}) self.lr_decay = kwargs.pop('lr_decay', 1.0) self.batch_size = kwargs.pop('batch_size', 100) self.num_epochs = kwargs.pop('num_epochs', 10) self.print_every = kwargs.pop('print_every', 10) self.verbose = kwargs.pop('verbose', True) # Throw an error if there are extra keyword arguments if len(kwargs) > 0: extra = ', '.join('"%s"' % k for k in kwargs.keys()) raise ValueError('Unrecognized arguments %s' % extra) # Make sure the update rule exists, then replace the string # name with the actual function if not hasattr(optim, self.update_rule): raise ValueError('Invalid update_rule "%s"' % self.update_rule) self.update_rule = getattr(optim, self.update_rule) self._reset() def _reset(self): """ Set up some book-keeping variables for optimization. Don't call this manually. """ # Set up some variables for book-keeping self.epoch = 0 self.best_val_acc = 0 self.best_params = {} self.loss_history = [] self.train_acc_history = [] self.val_acc_history = [] # Make a deep copy of the optim_config for each parameter self.optim_configs = {} for p in self.model.params: d = {k: v for k, v in self.optim_config.iteritems()} self.optim_configs[p] = d def _step(self): """ Make a single gradient update. This is called by train() and should not be called manually. """ # Make a minibatch of training data num_train = self.X_train.shape[0] batch_mask = np.random.choice(num_train, self.batch_size) X_batch = self.X_train[batch_mask] y_batch = self.y_train[batch_mask] # Compute loss and gradient loss, grads = self.model.loss(X_batch, y_batch) self.loss_history.append(loss) # Perform a parameter update for p, w in self.model.params.iteritems(): dw = grads[p] config = self.optim_configs[p] next_w, next_config = self.update_rule(w, dw, config) #因为有很多update的方法 self.model.params[p] = next_w self.optim_configs[p] = next_config def check_accuracy(self, X, y, num_samples=None, batch_size=100): """ Check accuracy of the model on the provided data. Inputs: - X: Array of data, of shape (N, d_1, ..., d_k) - y: Array of labels, of shape (N,) - num_samples: If not None, subsample the data and only test the model on num_samples datapoints. - batch_size: Split X and y into batches of this size to avoid using too much memory. Returns: - acc: Scalar giving the fraction of instances that were correctly classified by the model. """ # Maybe subsample the data N = X.shape[0] if num_samples is not None and N > num_samples: mask = np.random.choice(N, num_samples) N = num_samples X = X[mask] y = y[mask] # Compute predictions in batches num_batches = N / batch_size if N % batch_size != 0: num_batches += 1 y_pred = [] for i in xrange(num_batches): start = i * batch_size end = (i + 1) * batch_size scores = self.model.loss(X[start:end]) y_pred.append(np.argmax(scores, axis=1)) y_pred = np.hstack(y_pred) acc = np.mean(y_pred == y) return acc def train(self): """ Run optimization to train the model. """ num_train = self.X_train.shape[0] iterations_per_epoch = max(num_train / self.batch_size, 1) num_iterations = self.num_epochs * iterations_per_epoch for t in xrange(num_iterations): self._step() # Maybe print training loss if self.verbose and t % self.print_every == 0: print '(Iteration %d / %d) loss: %f' % ( t + 1, num_iterations, self.loss_history[-1]) # At the end of every epoch, increment the epoch counter and decay the # learning rate. epoch_end = (t + 1) % iterations_per_epoch == 0 if epoch_end: self.epoch += 1 for k in self.optim_configs: self.optim_configs[k]['learning_rate'] *= self.lr_decay # Check train and val accuracy on the first iteration, the last # iteration, and at the end of each epoch. first_it = (t == 0) last_it = (t == num_iterations + 1) if first_it or last_it or epoch_end: train_acc = self.check_accuracy(self.X_train, self.y_train, num_samples=1000) val_acc = self.check_accuracy(self.X_val, self.y_val) self.train_acc_history.append(train_acc) self.val_acc_history.append(val_acc) if self.verbose: print '(Epoch %d / %d) train acc: %f; val_acc: %f' % ( self.epoch, self.num_epochs, train_acc, val_acc) # Keep track of the best model if val_acc > self.best_val_acc: self.best_val_acc = val_acc self.best_params = {} for k, v in self.model.params.iteritems(): self.best_params[k] = v.copy() # At the end of training swap the best params into the model self.model.params = self.best_params
test.py
# -*- coding: utf-8 -*-import matplotlib.pyplot as pltimport numpy as npimport fc_netfrom solver import Solverimport datasetdata = dataset.get_CIFAR10_data()model = fc_net.TwoLayerNet(reg=0.9,weight_scale=1e-4)solver = Solver(model, data, lr_decay=0.95, print_every=100, num_epochs=40, batch_size=400, update_rule='sgd_momentum', optim_config={'learning_rate': 5e-4, 'momentum': 0.5})solver.train() best_model = modely_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1)y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1)print 'Validation set accuracy: ', (y_val_pred == data['y_val']).mean()print 'Test set accuracy: ', (y_test_pred == data['y_test']).mean()
0 0
- CS231n Assignment2--Fully-connected Neural Network
- [CS231n@Stanford] Assignment2-Q1 (python) Fully-connected Neural Network实现
- cs231n:assignment2——Q1: Fully-connected Neural Network
- CS231n笔记2--Fully Connected Neural Network与Activation Function
- cs231n neural network 笔记
- cs231n Convolutional Neural Network 笔记
- CS231N-Lecture4 Backpropagation&Neural Network
- CS231N-Lecture5 Training Neural Network
- CS231N-10-Recurrent Neural Network
- CS231N-Lecture6 Training Neural Network part-2
- CS231n Assignment2--Q1
- CS231n Assignment2--Q2
- CS231n Assignment2--Q3
- CS231n Assignment2--Q4
- #cs231n#Assignment2:FullyConnectedNets.ipynb
- #cs231n#Assignment2:BatchNormalization.ipynb
- #cs231n#Assignment2:Dropout.ipynb
- #cs231n#Assignment2:ConvolutionalNetworks.ipynb
- 语言模型(N-Gram)
- 给定任意俩组字符串S1和S2,请编程输出他们间的最大相同子串。
- 64 位 linux 安装了32位程序报错的解决办法
- 【codeforces 779A】Pupils Redistribution
- Leetcode算法题JAVA版实现
- CS231n Assignment2--Fully-connected Neural Network
- 年终总结---踏平坎坷成大道,斗罢艰险又出发
- 数据库
- 【OpenCV】简单的Python实现人脸检测
- HDU 2095
- Console.Write和Console.WriteLine
- MatConvNet学习
- 学生类(第一个java程序,你值得拥有)
- Http学习