{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "id": "UKqu0Ep9HEkF" }, "outputs": [], "source": [ "import numpy as np\n", "X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # two inputs [sleep,study]\n", "y = np.array(([92], [86], [89]), dtype=float) # one output [Expected % in Exams]\n", "X = X/np.amax(X,axis=0) # maximum of X array longitudinally\n", "y = y/100" ] }, { "cell_type": "code", "source": [ "#Sigmoid Function\n", "def sigmoid (x):\n", " return 1/(1 + np.exp(-x))\n", "\n", "\n", "#Derivative of Sigmoid Function\n", "def derivatives_sigmoid(x):\n", " return x * (1 - x)" ], "metadata": { "id": "7048eh0mHHla" }, "execution_count": 2, "outputs": [] }, { "cell_type": "code", "source": [ "#Variable initialization\n", "epoch=5000 #Setting training iterations\n", "lr=0.1 #Setting learning rate\n", "inputlayer_neurons = 2 #number of features in data set\n", "hiddenlayer_neurons = 3 #number of hidden layers neurons\n", "output_neurons = 1 #number of neurons at output layer" ], "metadata": { "id": "LlXGsPfUHL3I" }, "execution_count": 3, "outputs": [] }, { "cell_type": "code", "source": [ "#weight and bias initialization\n", "wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons)) #weight of the link from input node to hidden node\n", "bh=np.random.uniform(size=(1,hiddenlayer_neurons)) # bias of the link from input node to hidden node\n", "wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons)) #weight of the link from hidden node to output node\n", "bout=np.random.uniform(size=(1,output_neurons)) #bias of the link from hidden node to output node\n" ], "metadata": { "id": "tbfwqIHpHOUr" }, "execution_count": 4, "outputs": [] }, { "cell_type": "code", "source": [ "#draws a random range of numbers uniformly of dim x*y\n", "for i in range(epoch):\n", " #Forward Propogation\n", " hinp1=np.dot(X,wh)\n", " hinp=hinp1 + bh\n", " hlayer_act = sigmoid(hinp)\n", " outinp1=np.dot(hlayer_act,wout)\n", " outinp= outinp1+ bout\n", " output = sigmoid(outinp)\n", "\n", "\n", "#Backpropagation\n", " EO = y-output\n", " outgrad = derivatives_sigmoid(output)\n", " d_output = EO* outgrad\n", " EH = d_output.dot(wout.T)\n", "\n", "\n", "#how much hidden layer weights contributed to error\n", " hiddengrad = derivatives_sigmoid(hlayer_act)\n", " d_hiddenlayer = EH * hiddengrad\n", "\n", "\n", "# dotproduct of nextlayererror and currentlayerop\n", "wout += hlayer_act.T.dot(d_output) *lr\n", " \n", "\n", "wh += X.T.dot(d_hiddenlayer) *lr\n", "print(\"Input: \\n\" + str(X)) \n", "print(\"Actual Output: \\n\" + str(y))\n", "print(\"Predicted Output: \\n\" ,output)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ArjgUk33HSJn", "outputId": "9d4654cc-d8fc-4b51-e5c3-078c0ab557fb" }, "execution_count": 5, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Input: \n", "[[0.66666667 1. ]\n", " [0.33333333 0.55555556]\n", " [1. 0.66666667]]\n", "Actual Output: \n", "[[0.92]\n", " [0.86]\n", " [0.89]]\n", "Predicted Output: \n", " [[0.76983028]\n", " [0.75596362]\n", " [0.77020531]]\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "1z1LVDOEHZ-g" }, "execution_count": null, "outputs": [] } ] }