diff --git a/learning.py b/learning.py index 32cf73d81..4772a6128 100644 --- a/learning.py +++ b/learning.py @@ -21,6 +21,10 @@ def euclidean_distance(X, Y): return math.sqrt(sum((x - y)**2 for x, y in zip(X, Y))) +def cross_entropy_loss(X,Y): + n=len(X) + return (-1.0/n)*sum(x*math.log(y)+(1-x)*math.log(1-y) for x,y in zip(X,Y) ) + def rms_error(X, Y): return math.sqrt(ms_error(X, Y)) diff --git a/neural_nets.ipynb b/neural_nets.ipynb index a6bb6f43b..9c5db9a56 100644 --- a/neural_nets.ipynb +++ b/neural_nets.ipynb @@ -82,7 +82,7 @@ "\n", "In both the Perceptron and the Neural Network, we are using the Backpropagation algorithm to train our weights. Basically it achieves that by propagating the errors from our last layer into our first layer, this is why it is called Backpropagation. In order to use Backpropagation, we need a cost function. This function is responsible for indicating how good our neural network is for a given example. One common cost function is the *Mean Squared Error* (MSE). This cost function has the following format:\n", "\n", - "$$MSE=\\frac{1}{2} \\sum_{i=1}^{n}(y - \\hat{y})^{2}$$\n", + "$$MSE=\\frac{1}{n} \\sum_{i=1}^{n}(y - \\hat{y})^{2}$$\n", "\n", "Where `n` is the number of training examples, $\\hat{y}$ is our prediction and $y$ is the correct prediction for the example.\n", "\n", @@ -221,14 +221,14 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 3 + "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.3" + "pygments_lexer": "ipython2", + "version": "2.7.14" } }, "nbformat": 4, diff --git a/tests/test_learning.py b/tests/test_learning.py index 6afadc282..ec3a2f188 100644 --- a/tests/test_learning.py +++ b/tests/test_learning.py @@ -18,6 +18,16 @@ def test_euclidean(): distance = euclidean_distance([0, 0, 0], [0, 0, 0]) assert distance == 0 +def test_cross_entropy(): + loss = cross_entropy_loss([1,0], [0.9, 0.3]) + assert round(loss,2) == 0.23 + + loss = cross_entropy_loss([1,0,0,1], [0.9,0.3,0.5,0.75]) + assert round(loss,2) == 0.36 + + loss = cross_entropy_loss([1,0,0,1,1,0,1,1], [0.9,0.3,0.5,0.75,0.85,0.14,0.93,0.79]) + assert round(loss,2) == 0.26 + def test_rms_error(): assert rms_error([2, 2], [2, 2]) == 0