diff --git a/your-code/Solution Challenge 2.PNG b/your-code/Solution Challenge 2.PNG
new file mode 100644
index 0000000..fb8fffa
Binary files /dev/null and b/your-code/Solution Challenge 2.PNG differ
diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb
index 2487c5f..37dc9b8 100644
--- a/your-code/challenge-1.ipynb
+++ b/your-code/challenge-1.ipynb
@@ -34,11 +34,336 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " o | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 x x x x o o x o o True\n",
+ "1 x x x x o o o x o True\n",
+ "2 x x x x o o o o x True\n",
+ "3 x x x x o o o b b True\n",
+ "4 x x x x o o b o b True"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# your code here\n",
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "df = pd.read_csv('tic-tac-toe.csv')\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 2 2 2 2 1 1 2 1 1 True\n",
+ "1 2 2 2 2 1 1 1 2 1 True\n",
+ "2 2 2 2 2 1 1 1 1 2 True\n",
+ "3 2 2 2 2 1 1 1 0 0 True\n",
+ "4 2 2 2 2 1 1 0 1 0 True"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
- "# your code here"
+ "#Convert the categorical values to numeric in all columns\n",
+ "from sklearn.preprocessing import LabelEncoder\n",
+ "le = LabelEncoder()\n",
+ "columns = ['TL', 'TM', 'TR', 'ML', 'MM', 'MR', 'BL', 'BM', 'BR']\n",
+ "for c in columns:\n",
+ " df[c] = le.fit_transform(df[c])\n",
+ "df.head(5)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "x (958, 9)\n",
+ "y (958,)\n"
+ ]
+ }
+ ],
+ "source": [
+ "#Separate the inputs and output.\n",
+ "X = df.drop(columns=['class'])\n",
+ "y = df['class']\n",
+ "\n",
+ "print('x ',X.shape)\n",
+ "print('y ',y.shape)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 1.00322257 1.08495342 1.00322257 ... 1.00322257 -0.16731812\n",
+ " -0.28682739]\n",
+ " [ 1.00322257 1.08495342 1.00322257 ... -0.28682739 1.08495342\n",
+ " -0.28682739]\n",
+ " [ 1.00322257 1.08495342 1.00322257 ... -0.28682739 -0.16731812\n",
+ " 1.00322257]\n",
+ " ...\n",
+ " [-0.28682739 1.08495342 -0.28682739 ... 1.00322257 -0.16731812\n",
+ " 1.00322257]\n",
+ " [-0.28682739 1.08495342 -0.28682739 ... 1.00322257 -0.16731812\n",
+ " 1.00322257]\n",
+ " [-0.28682739 -0.16731812 1.00322257 ... -0.28682739 1.08495342\n",
+ " 1.00322257]]\n",
+ "0 True\n",
+ "1 True\n",
+ "2 True\n",
+ "3 True\n",
+ "4 True\n",
+ " ... \n",
+ "953 False\n",
+ "954 False\n",
+ "955 False\n",
+ "956 False\n",
+ "957 False\n",
+ "Name: class, Length: 958, dtype: bool\n"
+ ]
+ }
+ ],
+ "source": [
+ "#Normalize the input data\n",
+ "from sklearn.preprocessing import StandardScaler\n",
+ "stdr = StandardScaler().fit(X)\n",
+ "X = stdr.transform(X)\n",
+ "print(X)\n",
+ "print(y)\n",
+ "#print('x ',X.shape)\n"
]
},
{
@@ -60,11 +385,162 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "(718, 9)\n",
+ "(718,)\n",
+ "(240, 9)\n",
+ "(240,)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# your code here\n",
+ "# Split the training and test data.\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "X_train, X_test, y_train, y_test = train_test_split(X,y)\n",
+ "\n",
+ "print(X_train.shape)\n",
+ "print(y_train.shape)\n",
+ "print(X_test.shape)\n",
+ "print(y_test.shape)\n",
+ "#print(y_test)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "from tensorflow.keras.models import Sequential\n",
+ "from tensorflow.keras.layers import Dense\n",
+ "from tensorflow.keras.utils import to_categorical"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch 1/30\n",
+ "90/90 [==============================] - 0s 933us/step - loss: 1.2226 - accuracy: 0.5947\n",
+ "Epoch 2/30\n",
+ "90/90 [==============================] - 0s 911us/step - loss: 0.5855 - accuracy: 0.7228\n",
+ "Epoch 3/30\n",
+ "90/90 [==============================] - 0s 900us/step - loss: 0.5220 - accuracy: 0.7632\n",
+ "Epoch 4/30\n",
+ "90/90 [==============================] - 0s 878us/step - loss: 0.4768 - accuracy: 0.7967\n",
+ "Epoch 5/30\n",
+ "90/90 [==============================] - 0s 911us/step - loss: 0.4309 - accuracy: 0.8259\n",
+ "Epoch 6/30\n",
+ "90/90 [==============================] - 0s 923us/step - loss: 0.3948 - accuracy: 0.8343\n",
+ "Epoch 7/30\n",
+ "90/90 [==============================] - 0s 889us/step - loss: 0.3610 - accuracy: 0.8719\n",
+ "Epoch 8/30\n",
+ "90/90 [==============================] - 0s 900us/step - loss: 0.3320 - accuracy: 0.8802\n",
+ "Epoch 9/30\n",
+ "90/90 [==============================] - 0s 900us/step - loss: 0.3068 - accuracy: 0.8858\n",
+ "Epoch 10/30\n",
+ "90/90 [==============================] - 0s 878us/step - loss: 0.2822 - accuracy: 0.8900\n",
+ "Epoch 11/30\n",
+ "90/90 [==============================] - 0s 889us/step - loss: 0.2708 - accuracy: 0.9039\n",
+ "Epoch 12/30\n",
+ "90/90 [==============================] - 0s 900us/step - loss: 0.2450 - accuracy: 0.9081\n",
+ "Epoch 13/30\n",
+ "90/90 [==============================] - 0s 889us/step - loss: 0.2305 - accuracy: 0.9220\n",
+ "Epoch 14/30\n",
+ "90/90 [==============================] - 0s 889us/step - loss: 0.2062 - accuracy: 0.9276\n",
+ "Epoch 15/30\n",
+ "90/90 [==============================] - 0s 922us/step - loss: 0.1763 - accuracy: 0.9401\n",
+ "Epoch 16/30\n",
+ "90/90 [==============================] - 0s 956us/step - loss: 0.1627 - accuracy: 0.9540\n",
+ "Epoch 17/30\n",
+ "90/90 [==============================] - 0s 956us/step - loss: 0.1436 - accuracy: 0.9596\n",
+ "Epoch 18/30\n",
+ "90/90 [==============================] - ETA: 0s - loss: 0.1300 - accuracy: 0.96 - 0s 911us/step - loss: 0.1310 - accuracy: 0.9652\n",
+ "Epoch 19/30\n",
+ "90/90 [==============================] - 0s 956us/step - loss: 0.1140 - accuracy: 0.9721\n",
+ "Epoch 20/30\n",
+ "90/90 [==============================] - 0s 967us/step - loss: 0.1001 - accuracy: 0.9735\n",
+ "Epoch 21/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0890 - accuracy: 0.9791\n",
+ "Epoch 22/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0804 - accuracy: 0.9916\n",
+ "Epoch 23/30\n",
+ "90/90 [==============================] - 0s 933us/step - loss: 0.0711 - accuracy: 0.9903\n",
+ "Epoch 24/30\n",
+ "90/90 [==============================] - 0s 867us/step - loss: 0.0613 - accuracy: 0.9944\n",
+ "Epoch 25/30\n",
+ "90/90 [==============================] - 0s 900us/step - loss: 0.0533 - accuracy: 0.9972\n",
+ "Epoch 26/30\n",
+ "90/90 [==============================] - 0s 933us/step - loss: 0.0482 - accuracy: 0.9972\n",
+ "Epoch 27/30\n",
+ "90/90 [==============================] - 0s 922us/step - loss: 0.0428 - accuracy: 0.9986\n",
+ "Epoch 28/30\n",
+ "90/90 [==============================] - 0s 911us/step - loss: 0.0378 - accuracy: 0.9986\n",
+ "Epoch 29/30\n",
+ "90/90 [==============================] - 0s 889us/step - loss: 0.0308 - accuracy: 1.0000\n",
+ "Epoch 30/30\n",
+ "90/90 [==============================] - 0s 900us/step - loss: 0.0289 - accuracy: 1.0000\n",
+ "8/8 [==============================] - 0s 1000us/step - loss: 0.2764 - accuracy: 0.8917\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "[0.2763564884662628, 0.8916666507720947]"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#Create a `Sequential` model\n",
+ "\n",
+ "# Build the model.\n",
+ "model = Sequential([ # as far as we know, all networks are sequential\n",
+ " Dense(64, activation='relu', input_shape=(9,)), # 784= 28*28 dense networks means all neurons in one layer are connected to all neuronsof the next layer\n",
+ " Dense(64, activation='relu'), # choosing relu instead of sigmoid, this is somewhat common\n",
+ " Dense(10, activation='softmax'), # the softmax actiavation is the last one to compensate for the high volume additions\n",
+ "])\n",
+ "\n",
+ "# Compile the model.\n",
+ "model.compile(\n",
+ " optimizer='adam', #here we could use stochastic gradient descent, but adam is a de facto standard\n",
+ " loss='sparse_categorical_crossentropy', #this is how we create the original blam to play the blame game\n",
+ " metrics=['accuracy'],\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Train the model.\n",
+ "#tmodel = model.fit(\n",
+ "# X_train,\n",
+ "# to_categorical(y_train), # just to make sure the outputs are not considered numeric (because, ya know, they are numbers...)\n",
+ "# epochs=5, # go 5 times through the whole dataset\n",
+ "# #batch_size=32, # send 32 images at a time before you tweak the network again, to make it faster\n",
+ "#)\n",
+ "\n",
+ "model.fit(X_train,y_train, batch_size=8, epochs=30)\n",
+ "\n",
+ "\n",
+ "# Evaluate the model.\n",
+ "#model.evaluate(\n",
+ "# X_test,\n",
+ "# to_categorical(y_test)\n",
+ "#)\n",
+ "model.evaluate(X_test,y_test)"
]
},
{
@@ -78,11 +554,28 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 8,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[0 1 0 1 1]\n",
+ "655 False\n",
+ "616 True\n",
+ "701 False\n",
+ "471 True\n",
+ "35 True\n",
+ "Name: class, dtype: bool\n"
+ ]
+ }
+ ],
"source": [
- "# your code here"
+ "# your code here\n",
+ "predictions = model.predict(X_test[:5])\n",
+ "print(np.argmax(predictions, axis=1))\n",
+ "print(y_test[:5])"
]
},
{
@@ -104,11 +597,131 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 15,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch 1/30\n",
+ "90/90 [==============================] - 0s 989us/step - loss: 0.6878 - accuracy: 0.6630\n",
+ "Epoch 2/30\n",
+ "90/90 [==============================] - 0s 978us/step - loss: 0.5339 - accuracy: 0.7549\n",
+ "Epoch 3/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.4507 - accuracy: 0.8078\n",
+ "Epoch 4/30\n",
+ "90/90 [==============================] - 0s 978us/step - loss: 0.3945 - accuracy: 0.8440\n",
+ "Epoch 5/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.3078 - accuracy: 0.8872\n",
+ "Epoch 6/30\n",
+ "90/90 [==============================] - 0s 989us/step - loss: 0.2402 - accuracy: 0.9136\n",
+ "Epoch 7/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.2259 - accuracy: 0.9053\n",
+ "Epoch 8/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.1288 - accuracy: 0.9568\n",
+ "Epoch 9/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.1374 - accuracy: 0.9499\n",
+ "Epoch 10/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0861 - accuracy: 0.9652\n",
+ "Epoch 11/30\n",
+ "90/90 [==============================] - 0s 989us/step - loss: 0.0929 - accuracy: 0.9763\n",
+ "Epoch 12/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.1621 - accuracy: 0.9526\n",
+ "Epoch 13/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0628 - accuracy: 0.9889\n",
+ "Epoch 14/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0689 - accuracy: 0.9805\n",
+ "Epoch 15/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0686 - accuracy: 0.9763\n",
+ "Epoch 16/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0145 - accuracy: 0.9958\n",
+ "Epoch 17/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0118 - accuracy: 0.9944\n",
+ "Epoch 18/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 0.0015 - accuracy: 1.0000\n",
+ "Epoch 19/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 3.8732e-04 - accuracy: 1.0000\n",
+ "Epoch 20/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 2.7205e-04 - accuracy: 1.0000\n",
+ "Epoch 21/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 2.0975e-04 - accuracy: 1.0000\n",
+ "Epoch 22/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 1.6800e-04 - accuracy: 1.0000\n",
+ "Epoch 23/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 1.3843e-04 - accuracy: 1.0000\n",
+ "Epoch 24/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 1.1636e-04 - accuracy: 1.0000\n",
+ "Epoch 25/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 9.9200e-05 - accuracy: 1.0000\n",
+ "Epoch 26/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 8.6527e-05 - accuracy: 1.0000\n",
+ "Epoch 27/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 7.6115e-05 - accuracy: 1.0000\n",
+ "Epoch 28/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 6.7418e-05 - accuracy: 1.0000\n",
+ "Epoch 29/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 6.0127e-05 - accuracy: 1.0000\n",
+ "Epoch 30/30\n",
+ "90/90 [==============================] - 0s 1ms/step - loss: 5.3862e-05 - accuracy: 1.0000\n",
+ "8/8 [==============================] - 0s 1ms/step - loss: 0.0561 - accuracy: 0.9833\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "[0.056050438433885574, 0.9833333492279053]"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
- "# your code here"
+ "# your code here\n",
+ "\n",
+ "import tensorflow as tf\n",
+ "#Create a `Sequential` model\n",
+ "\n",
+ "\n",
+ "# I added another layer\n",
+ "# Build the model.\n",
+ "model = Sequential([ # as far as we know, all networks are sequential\n",
+ " Dense(64, activation='relu', input_shape=(9,)), # 784= 28*28 dense networks means all neurons in one layer are connected to all neuronsof the next layer\n",
+ " Dense(64, activation='relu'), # choosing relu instead of sigmoid, this is somewhat common\n",
+ " Dense(64, activation='relu'), \n",
+ " Dense(10, activation='softmax'), # the softmax actiavation is the last one to compensate for the high volume additions\n",
+ "])\n",
+ "\n",
+ "opt = tf.keras.optimizers.Adam(learning_rate=0.01)\n",
+ "\n",
+ "\n",
+ "# Compile the model.\n",
+ "model.compile(\n",
+ " optimizer=opt, #here we could use stochastic gradient descent, but adam is a de facto standard\n",
+ " loss='sparse_categorical_crossentropy', #this is how we create the original blam to play the blame game\n",
+ " metrics=['accuracy'],\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Train the model.\n",
+ "#tmodel = model.fit(\n",
+ "# X_train,\n",
+ "# to_categorical(y_train), # just to make sure the outputs are not considered numeric (because, ya know, they are numbers...)\n",
+ "# epochs=5, # go 5 times through the whole dataset\n",
+ "# #batch_size=32, # send 32 images at a time before you tweak the network again, to make it faster\n",
+ "#)\n",
+ "\n",
+ "model.fit(X_train,y_train, batch_size=8, epochs=30)\n",
+ "\n",
+ "\n",
+ "# Evaluate the model.\n",
+ "#model.evaluate(\n",
+ "# X_test,\n",
+ "# to_categorical(y_test)\n",
+ "#)\n",
+ "model.evaluate(X_test,y_test)"
]
},
{
@@ -120,11 +733,12 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
- "# your answer here"
+ "# your answer here\n",
+ "# increasing the number of layers and decreasing the learining rate"
]
}
],
@@ -144,7 +758,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.3"
+ "version": "3.6.10"
}
},
"nbformat": 4,