From 9b1f631405fe51051681fb87fe456965b900a91d Mon Sep 17 00:00:00 2001 From: Harshal Date: Wed, 10 Jul 2019 11:31:52 +0530 Subject: [PATCH] KerasTask1done --- .../HarshalDesai_ModelwithLogs.ipynb | 205 ++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 Framework/Keras/Keras Assingment/HarshalDesai_Task1/HarshalDesai_ModelwithLogs.ipynb diff --git a/Framework/Keras/Keras Assingment/HarshalDesai_Task1/HarshalDesai_ModelwithLogs.ipynb b/Framework/Keras/Keras Assingment/HarshalDesai_Task1/HarshalDesai_ModelwithLogs.ipynb new file mode 100644 index 0000000..f75fcbb --- /dev/null +++ b/Framework/Keras/Keras Assingment/HarshalDesai_Task1/HarshalDesai_ModelwithLogs.ipynb @@ -0,0 +1,205 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ + "#Import keras\n", + "import keras\n", + "#Import the MNIST dataset\n", + "from keras.datasets import mnist" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "#Loading dataset into train and test sets\n", + "(X_train, y_train), (X_test, y_test) = mnist.load_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "#Normalize the dataset\n", + "X_train = keras.utils.normalize(X_train, axis=1)\n", + "X_test = keras.utils.normalize(X_test, axis=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Import `Sequential` from `keras.models`\n", + "from keras.models import Sequential" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "#Initialize the model\n", + "model = keras.models.Sequential()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "#Adding input layer by flattening the 28x28 pixels image into a vector of size 784 \n", + "#Using Flatten()\n", + "model.add(keras.layers.Flatten())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Import `Dense` from `keras.layers`\n", + "from keras.layers import Dense\n", + "#Adding hidden layer of size 512 with activation function Relu\n", + "model.add(keras.layers.Dense(512, activation='relu'))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "#Adding output layer of size 10 with activation softmax\n", + "model.add(keras.layers.Dense(10, activation='softmax'))" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "#Compiling the model by using the sgd optimizer\n", + "model.compile(optimizer='sgd',loss='sparse_categorical_crossentropy',metrics=['accuracy'])" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "60000/60000 [==============================] - 13s 221us/step - loss: 0.2341 - acc: 0.9349\n", + "Epoch 2/10\n", + "60000/60000 [==============================] - 14s 230us/step - loss: 0.2259 - acc: 0.9367\n", + "Epoch 3/10\n", + "60000/60000 [==============================] - 14s 225us/step - loss: 0.2181 - acc: 0.9385\n", + "Epoch 4/10\n", + "60000/60000 [==============================] - 10s 171us/step - loss: 0.2111 - acc: 0.9408\n", + "Epoch 5/10\n", + "60000/60000 [==============================] - 10s 168us/step - loss: 0.2044 - acc: 0.9425\n", + "Epoch 6/10\n", + "60000/60000 [==============================] - 14s 237us/step - loss: 0.1981 - acc: 0.9440\n", + "Epoch 7/10\n", + "60000/60000 [==============================] - 14s 235us/step - loss: 0.1922 - acc: 0.9455\n", + "Epoch 8/10\n", + "60000/60000 [==============================] - 14s 231us/step - loss: 0.1866 - acc: 0.9474\n", + "Epoch 9/10\n", + "60000/60000 [==============================] - 14s 238us/step - loss: 0.1814 - acc: 0.9490\n", + "Epoch 10/10\n", + "60000/60000 [==============================] - 14s 239us/step - loss: 0.1764 - acc: 0.9506\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#Training the model\n", + "model.fit(X_train, y_train, epochs=10,batch_size=32, verbose=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "10000/10000 [==============================] - 1s 64us/step\n", + "Loss: 0.18251839239299297\n", + "Accuarcy: 94.69999999999999%\n" + ] + } + ], + "source": [ + "#Evaluating the model on the test dataset\n", + "val_loss, val_acc = model.evaluate(X_test, y_test)\n", + "print(\"Loss: \" + str(val_loss))\n", + "print(\"Accuarcy: \" + str(100*val_acc) + \"%\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}