1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
   | """network2.py ~~~~~~~~~~~~~~ An improved version of network.py, implementing the stochastic gradient descent learning algorithm for a feedforward neural network. Improvements include the addition of the cross-entropy cost function, regularization, and better initialization of network weights.  Note that I have focused on making the code simple, easily readable, and easily modifiable.  It is not optimized, and omits many desirable features. """
 
 
  import json import random import sys
 
  import numpy as np
 
 
 
  class QuadraticCost(object):
      @staticmethod     def fn(a, y):         """Return the cost associated with an output ``a`` and desired output         ``y``.         """         return 0.5*np.linalg.norm(a-y)**2
      @staticmethod     def delta(z, a, y):         """Return the error delta from the output layer."""         return (a-y) * sigmoid_prime(z)
 
  class CrossEntropyCost(object):
      @staticmethod     def fn(a, y):         """Return the cost associated with an output ``a`` and desired output         ``y``.  Note that np.nan_to_num is used to ensure numerical         stability.  In particular, if both ``a`` and ``y`` have a 1.0         in the same slot, then the expression (1-y)*np.log(1-a)         returns nan.  The np.nan_to_num ensures that that is converted         to the correct value (0.0).         """         return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
      @staticmethod     def delta(z, a, y):         """Return the error delta from the output layer.  Note that the         parameter ``z`` is not used by the method.  It is included in         the method's parameters in order to make the interface         consistent with the delta method for other cost classes.         """         return (a-y)
 
 
  class Network(object):
      def __init__(self, sizes, cost=CrossEntropyCost):         """The list ``sizes`` contains the number of neurons in the respective         layers of the network.  For example, if the list was [2, 3, 1]         then it would be a three-layer network, with the first layer         containing 2 neurons, the second layer 3 neurons, and the         third layer 1 neuron.  The biases and weights for the network         are initialized randomly, using         ``self.default_weight_initializer`` (see docstring for that         method).         """         self.num_layers = len(sizes)         self.sizes = sizes         self.default_weight_initializer()         self.cost=cost
      def default_weight_initializer(self):         """Initialize each weight using a Gaussian distribution with mean 0         and standard deviation 1 over the square root of the number of         weights connecting to the same neuron.  Initialize the biases         using a Gaussian distribution with mean 0 and standard         deviation 1.         Note that the first layer is assumed to be an input layer, and         by convention we won't set any biases for those neurons, since         biases are only ever used in computing the outputs from later         layers.         """         self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]         self.weights = [np.random.randn(y, x)/np.sqrt(x)                         for x, y in zip(self.sizes[:-1], self.sizes[1:])]
      def large_weight_initializer(self):         """Initialize the weights using a Gaussian distribution with mean 0         and standard deviation 1.  Initialize the biases using a         Gaussian distribution with mean 0 and standard deviation 1.         Note that the first layer is assumed to be an input layer, and         by convention we won't set any biases for those neurons, since         biases are only ever used in computing the outputs from later         layers.         This weight and bias initializer uses the same approach as in         Chapter 1, and is included for purposes of comparison.  It         will usually be better to use the default weight initializer         instead.         """         self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]         self.weights = [np.random.randn(y, x)                         for x, y in zip(self.sizes[:-1], self.sizes[1:])]
      def feedforward(self, a):         """Return the output of the network if ``a`` is input."""         for b, w in zip(self.biases, self.weights):             a = sigmoid(np.dot(w, a)+b)         return a
      def SGD(self, training_data, epochs, mini_batch_size, eta,             lmbda = 0.0,             evaluation_data=None,             monitor_evaluation_cost=False,             monitor_evaluation_accuracy=False,             monitor_training_cost=False,             monitor_training_accuracy=False):         """Train the neural network using mini-batch stochastic gradient         descent.  The ``training_data`` is a list of tuples ``(x, y)``         representing the training inputs and the desired outputs.  The         other non-optional parameters are self-explanatory, as is the         regularization parameter ``lmbda``.  The method also accepts         ``evaluation_data``, usually either the validation or test         data.  We can monitor the cost and accuracy on either the         evaluation data or the training data, by setting the         appropriate flags.  The method returns a tuple containing four         lists: the (per-epoch) costs on the evaluation data, the         accuracies on the evaluation data, the costs on the training         data, and the accuracies on the training data.  All values are         evaluated at the end of each training epoch.  So, for example,         if we train for 30 epochs, then the first element of the tuple         will be a 30-element list containing the cost on the         evaluation data at the end of each epoch. Note that the lists         are empty if the corresponding flag is not set.         """         if evaluation_data: n_data = len(evaluation_data)         n = len(training_data)         evaluation_cost, evaluation_accuracy = [], []         training_cost, training_accuracy = [], []         for j in xrange(epochs):             random.shuffle(training_data)             mini_batches = [                 training_data[k:k+mini_batch_size]                 for k in xrange(0, n, mini_batch_size)]             for mini_batch in mini_batches:                 self.update_mini_batch(                     mini_batch, eta, lmbda, len(training_data))             print "Epoch %s training complete" % j             if monitor_training_cost:                 cost = self.total_cost(training_data, lmbda)                 training_cost.append(cost)                 print "Cost on training data: {}".format(cost)             if monitor_training_accuracy:                 accuracy = self.accuracy(training_data, convert=True)                 training_accuracy.append(accuracy)                 print "Accuracy on training data: {} / {}".format(                     accuracy, n)             if monitor_evaluation_cost:                 cost = self.total_cost(evaluation_data, lmbda, convert=True)                 evaluation_cost.append(cost)                 print "Cost on evaluation data: {}".format(cost)             if monitor_evaluation_accuracy:                 accuracy = self.accuracy(evaluation_data)                 evaluation_accuracy.append(accuracy)                 print "Accuracy on evaluation data: {} / {}".format(                     self.accuracy(evaluation_data), n_data)             print         return evaluation_cost, evaluation_accuracy, \             training_cost, training_accuracy
      def update_mini_batch(self, mini_batch, eta, lmbda, n):         """Update the network's weights and biases by applying gradient         descent using backpropagation to a single mini batch.  The         ``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the         learning rate, ``lmbda`` is the regularization parameter, and         ``n`` is the total size of the training data set.         """         nabla_b = [np.zeros(b.shape) for b in self.biases]         nabla_w = [np.zeros(w.shape) for w in self.weights]         for x, y in mini_batch:             delta_nabla_b, delta_nabla_w = self.backprop(x, y)             nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]             nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]         self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw                         for w, nw in zip(self.weights, nabla_w)]         self.biases = [b-(eta/len(mini_batch))*nb                        for b, nb in zip(self.biases, nabla_b)]
      def backprop(self, x, y):         """Return a tuple ``(nabla_b, nabla_w)`` representing the         gradient for the cost function C_x.  ``nabla_b`` and         ``nabla_w`` are layer-by-layer lists of numpy arrays, similar         to ``self.biases`` and ``self.weights``."""         nabla_b = [np.zeros(b.shape) for b in self.biases]         nabla_w = [np.zeros(w.shape) for w in self.weights]                  activation = x         activations = [x]          zs = []          for b, w in zip(self.biases, self.weights):             z = np.dot(w, activation)+b             zs.append(z)             activation = sigmoid(z)             activations.append(activation)                  delta = (self.cost).delta(zs[-1], activations[-1], y)         nabla_b[-1] = delta         nabla_w[-1] = np.dot(delta, activations[-2].transpose())                                                               for l in xrange(2, self.num_layers):             z = zs[-l]             sp = sigmoid_prime(z)             delta = np.dot(self.weights[-l+1].transpose(), delta) * sp             nabla_b[-l] = delta             nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())         return (nabla_b, nabla_w)
      def accuracy(self, data, convert=False):         """Return the number of inputs in ``data`` for which the neural         network outputs the correct result. The neural network's         output is assumed to be the index of whichever neuron in the         final layer has the highest activation.         The flag ``convert`` should be set to False if the data set is         validation or test data (the usual case), and to True if the         data set is the training data. The need for this flag arises         due to differences in the way the results ``y`` are         represented in the different data sets.  In particular, it         flags whether we need to convert between the different         representations.  It may seem strange to use different         representations for the different data sets.  Why not use the         same representation for all three data sets?  It's done for         efficiency reasons -- the program usually evaluates the cost         on the training data and the accuracy on other data sets.         These are different types of computations, and using different         representations speeds things up.  More details on the         representations can be found in         mnist_loader.load_data_wrapper.         """         if convert:             results = [(np.argmax(self.feedforward(x)), np.argmax(y))                        for (x, y) in data]         else:             results = [(np.argmax(self.feedforward(x)), y)                         for (x, y) in data]         return sum(int(x == y) for (x, y) in results)
      def total_cost(self, data, lmbda, convert=False):         """Return the total cost for the data set ``data``.  The flag         ``convert`` should be set to False if the data set is the         training data (the usual case), and to True if the data set is         the validation or test data.  See comments on the similar (but         reversed) convention for the ``accuracy`` method, above.         """         cost = 0.0         for x, y in data:             a = self.feedforward(x)             if convert: y = vectorized_result(y)             cost += self.cost.fn(a, y)/len(data)         cost += 0.5*(lmbda/len(data))*sum(             np.linalg.norm(w)**2 for w in self.weights)         return cost
      def save(self, filename):         """Save the neural network to the file ``filename``."""         data = {"sizes": self.sizes,                 "weights": [w.tolist() for w in self.weights],                 "biases": [b.tolist() for b in self.biases],                 "cost": str(self.cost.__name__)}         f = open(filename, "w")         json.dump(data, f)         f.close()
 
  def load(filename):     """Load a neural network from the file ``filename``.  Returns an     instance of Network.     """     f = open(filename, "r")     data = json.load(f)     f.close()     cost = getattr(sys.modules[__name__], data["cost"])     net = Network(data["sizes"], cost=cost)     net.weights = [np.array(w) for w in data["weights"]]     net.biases = [np.array(b) for b in data["biases"]]     return net
 
  def vectorized_result(j):     """Return a 10-dimensional unit vector with a 1.0 in the j'th position     and zeroes elsewhere.  This is used to convert a digit (0...9)     into a corresponding desired output from the neural network.     """     e = np.zeros((10, 1))     e[j] = 1.0     return e
  def sigmoid(z):     """The sigmoid function."""     return 1.0/(1.0+np.exp(-z))
  def sigmoid_prime(z):     """Derivative of the sigmoid function."""     return sigmoid(z)*(1-sigmoid(z))
   |