diff --git a/your-code/bonus.ipynb b/your-code/bonus.ipynb
index 92e4a72..6664abc 100644
--- a/your-code/bonus.ipynb
+++ b/your-code/bonus.ipynb
@@ -144,7 +144,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.3"
+ "version": "3.7.6"
}
},
"nbformat": 4,
diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb
index 2487c5f..8c2bf08 100644
--- a/your-code/challenge-1.ipynb
+++ b/your-code/challenge-1.ipynb
@@ -34,11 +34,670 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import numpy as np"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " o | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 x x x x o o x o o True\n",
+ "1 x x x x o o o x o True\n",
+ "2 x x x x o o o o x True\n",
+ "3 x x x x o o o b b True\n",
+ "4 x x x x o o b o b True"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "\n",
+ "data = pd.read_csv('tic-tac-toe.csv')\n",
+ "data.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(958, 10)"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "data.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "TL 0\n",
+ "TM 0\n",
+ "TR 0\n",
+ "ML 0\n",
+ "MM 0\n",
+ "MR 0\n",
+ "BL 0\n",
+ "BM 0\n",
+ "BR 0\n",
+ "class 0\n",
+ "dtype: int64"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "data.isnull().sum()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "TL object\n",
+ "TM object\n",
+ "TR object\n",
+ "ML object\n",
+ "MM object\n",
+ "MR object\n",
+ "BL object\n",
+ "BM object\n",
+ "BR object\n",
+ "class bool\n",
+ "dtype: object"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "data.dtypes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "x 418\n",
+ "o 335\n",
+ "b 205\n",
+ "Name: TL, dtype: int64 \n",
+ "\n",
+ "x 378\n",
+ "o 330\n",
+ "b 250\n",
+ "Name: TM, dtype: int64 \n",
+ "\n",
+ "x 418\n",
+ "o 335\n",
+ "b 205\n",
+ "Name: TR, dtype: int64 \n",
+ "\n",
+ "x 378\n",
+ "o 330\n",
+ "b 250\n",
+ "Name: ML, dtype: int64 \n",
+ "\n",
+ "x 458\n",
+ "o 340\n",
+ "b 160\n",
+ "Name: MM, dtype: int64 \n",
+ "\n",
+ "x 378\n",
+ "o 330\n",
+ "b 250\n",
+ "Name: MR, dtype: int64 \n",
+ "\n",
+ "x 418\n",
+ "o 335\n",
+ "b 205\n",
+ "Name: BL, dtype: int64 \n",
+ "\n",
+ "x 378\n",
+ "o 330\n",
+ "b 250\n",
+ "Name: BM, dtype: int64 \n",
+ "\n",
+ "x 418\n",
+ "o 335\n",
+ "b 205\n",
+ "Name: BR, dtype: int64 \n",
+ "\n",
+ "True 626\n",
+ "False 332\n",
+ "Name: class, dtype: int64 \n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# print value counts for all the columns in the dataframe in a readable way\n",
+ "\n",
+ "for col in data.columns:\n",
+ " print(data[col].value_counts(), \"\\n\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " b | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " o | \n",
+ " b | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 x x x x o o x o o 1\n",
+ "1 x x x x o o o x o 1\n",
+ "2 x x x x o o o o x 1\n",
+ "3 x x x x o o o b b 1\n",
+ "4 x x x x o o b o b 1"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# turn boolean in class into numeric values\n",
+ "# another method: \n",
+ "\n",
+ "data[\"class\"] = np.where(data[\"class\"], 1, 0)\n",
+ "\n",
+ "# now \"True\" is 1 and \"False\" is 0\n",
+ "data.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " -1 | \n",
+ " -1 | \n",
+ " 0 | \n",
+ " -1 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 1 1 1 1 -1 -1 1 -1 -1 1\n",
+ "1 1 1 1 1 -1 -1 -1 1 -1 1\n",
+ "2 1 1 1 1 -1 -1 -1 -1 1 1\n",
+ "3 1 1 1 1 -1 -1 -1 0 0 1\n",
+ "4 1 1 1 1 -1 -1 0 -1 0 1"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# do the same of all the other columns\n",
+ "# np.where() method\n",
+ "# 'x' is now 1 and 'o' is now -1\n",
+ "# 'b' is now 0\n",
+ "\n",
+ "for col in data.columns:\n",
+ " if(col != \"class\"):\n",
+ " data[col] = np.where(data[col]==\"x\", 1,\n",
+ " np.where(data[col]==\"o\", -1, 0))\n",
+ " \n",
+ "data.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "TL int64\n",
+ "TM int64\n",
+ "TR int64\n",
+ "ML int64\n",
+ "MM int64\n",
+ "MR int64\n",
+ "BL int64\n",
+ "BM int64\n",
+ "BR int64\n",
+ "class int64\n",
+ "dtype: object"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "data.dtypes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " 1 418\n",
+ "-1 335\n",
+ " 0 205\n",
+ "Name: TL, dtype: int64 \n",
+ "\n",
+ " 1 378\n",
+ "-1 330\n",
+ " 0 250\n",
+ "Name: TM, dtype: int64 \n",
+ "\n",
+ " 1 418\n",
+ "-1 335\n",
+ " 0 205\n",
+ "Name: TR, dtype: int64 \n",
+ "\n",
+ " 1 378\n",
+ "-1 330\n",
+ " 0 250\n",
+ "Name: ML, dtype: int64 \n",
+ "\n",
+ " 1 458\n",
+ "-1 340\n",
+ " 0 160\n",
+ "Name: MM, dtype: int64 \n",
+ "\n",
+ " 1 378\n",
+ "-1 330\n",
+ " 0 250\n",
+ "Name: MR, dtype: int64 \n",
+ "\n",
+ " 1 418\n",
+ "-1 335\n",
+ " 0 205\n",
+ "Name: BL, dtype: int64 \n",
+ "\n",
+ " 1 378\n",
+ "-1 330\n",
+ " 0 250\n",
+ "Name: BM, dtype: int64 \n",
+ "\n",
+ " 1 418\n",
+ "-1 335\n",
+ " 0 205\n",
+ "Name: BR, dtype: int64 \n",
+ "\n",
+ "1 626\n",
+ "0 332\n",
+ "Name: class, dtype: int64 \n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# check value counts again\n",
+ "\n",
+ "for col in data.columns:\n",
+ " print(data[col].value_counts(), \"\\n\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "# Separate inputs and output\n",
+ "# Pass only arrays of values\n",
+ "\n",
+ "# X = data.drop('class', axis=1)\n",
+ "# y = data['class']\n",
+ "\n",
+ "X = data.drop('class', axis=1).values\n",
+ "y = data['class'].values\n"
]
},
{
@@ -60,11 +719,107 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Split the training and test data\n",
+ "\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from keras.models import Sequential\n",
+ "from keras.layers import Dense\n",
+ "from keras.utils import to_categorical\n",
+ "\n",
+ "model = Sequential([\n",
+ " Dense(64, activation='relu', input_shape=(9,)),\n",
+ " Dense(64, activation='relu'),\n",
+ " Dense(2, activation='softmax')])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "model.compile(\n",
+ " optimizer='adam',\n",
+ " loss='sparse_categorical_crossentropy',\n",
+ " metrics=['accuracy'])\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch 1/5\n",
+ "766/766 [==============================] - 1s 764us/step - loss: 0.6232 - accuracy: 0.6775\n",
+ "Epoch 2/5\n",
+ "766/766 [==============================] - 0s 114us/step - loss: 0.5398 - accuracy: 0.7441\n",
+ "Epoch 3/5\n",
+ "766/766 [==============================] - 0s 91us/step - loss: 0.4977 - accuracy: 0.7637\n",
+ "Epoch 4/5\n",
+ "766/766 [==============================] - 0s 116us/step - loss: 0.4617 - accuracy: 0.7820\n",
+ "Epoch 5/5\n",
+ "766/766 [==============================] - 0s 116us/step - loss: 0.4213 - accuracy: 0.8225\n"
+ ]
+ }
+ ],
+ "source": [
+ "history = model.fit(X_train, y_train, epochs=5, batch_size=32)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "192/192 [==============================] - 0s 506us/step\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "[0.4149470975001653, 0.8229166865348816]"
+ ]
+ },
+ "execution_count": 40,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "model.evaluate(X_test, y_test)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# save the model\n",
+ "\n",
+ "model.save_weights('tic-tac-toe.model')"
]
},
{
@@ -78,11 +833,26 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 42,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "87.18 %\n"
+ ]
+ }
+ ],
"source": [
- "# your code here"
+ "\n",
+ "model.load_weights('tic-tac-toe.model')\n",
+ "\n",
+ "_, test_rows, _, test_labels = train_test_split(X_test, y_test, test_size=0.2, random_state=42)\n",
+ "\n",
+ "y_pred = model.predict(test_rows)\n",
+ "\n",
+ "print(round((np.argmax(y_pred, axis=1)==test_labels).sum() * 100 / test_labels.shape[0], 2),'%')\n"
]
},
{
@@ -104,11 +874,160 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 48,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "2 layers; 0.0001 learning rate; 6 epochs; [0.5882300734519958, 0.6927083134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.0001 learning rate; 8 epochs; [0.6038756966590881, 0.7291666865348816] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.0001 learning rate; 10 epochs; [0.5836875935395559, 0.7447916865348816] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.001 learning rate; 6 epochs; [0.5960065921147665, 0.6875] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.001 learning rate; 8 epochs; [0.5889786630868912, 0.6927083134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.001 learning rate; 10 epochs; [0.5797113378842672, 0.7291666865348816] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.01 learning rate; 6 epochs; [0.5916754007339478, 0.7395833134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.01 learning rate; 8 epochs; [0.5951681236426035, 0.703125] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "2 layers; 0.01 learning rate; 10 epochs; [0.5815596381823221, 0.7395833134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 3ms/step\n",
+ "3 layers; 0.0001 learning rate; 6 epochs; [0.5917025705178579, 0.6979166865348816] \n",
+ "\n",
+ "192/192 [==============================] - 0s 3ms/step\n",
+ "3 layers; 0.0001 learning rate; 8 epochs; [0.5955702463785807, 0.6927083134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 3ms/step\n",
+ "3 layers; 0.0001 learning rate; 10 epochs; [0.5581065913041433, 0.7239583134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 2ms/step\n",
+ "3 layers; 0.001 learning rate; 6 epochs; [0.6025189856688181, 0.6927083134651184] \n",
+ "\n",
+ "192/192 [==============================] - 0s 3ms/step\n",
+ "3 layers; 0.001 learning rate; 8 epochs; [0.5807664394378662, 0.6875] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "3 layers; 0.001 learning rate; 10 epochs; [0.5705332656701406, 0.7395833134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "3 layers; 0.01 learning rate; 6 epochs; [0.6014077365398407, 0.6614583134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "3 layers; 0.01 learning rate; 8 epochs; [0.5647442440191904, 0.7083333134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "3 layers; 0.01 learning rate; 10 epochs; [0.5693403085072836, 0.7239583134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.0001 learning rate; 6 epochs; [0.6196440855662028, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.0001 learning rate; 8 epochs; [0.6024164060751597, 0.6927083134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.0001 learning rate; 10 epochs; [0.5663572549819946, 0.703125] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.001 learning rate; 6 epochs; [0.5773806472619375, 0.7083333134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.001 learning rate; 8 epochs; [0.5967416763305664, 0.65625] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.001 learning rate; 10 epochs; [0.5899525980154673, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.01 learning rate; 6 epochs; [0.6040208141009012, 0.6666666865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 5ms/step\n",
+ "4 layers; 0.01 learning rate; 8 epochs; [0.5946759680906931, 0.671875] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "4 layers; 0.01 learning rate; 10 epochs; [0.5864456097284952, 0.6927083134651184] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.0001 learning rate; 6 epochs; [0.6122024158636729, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.0001 learning rate; 8 epochs; [0.61072638630867, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.0001 learning rate; 10 epochs; [0.6052527825037638, 0.671875] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.001 learning rate; 6 epochs; [0.6322441597779592, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.001 learning rate; 8 epochs; [0.5726757148901621, 0.6875] \n",
+ "\n",
+ "192/192 [==============================] - 1s 4ms/step\n",
+ "5 layers; 0.001 learning rate; 10 epochs; [0.6185800135135651, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.01 learning rate; 6 epochs; [0.6193112730979919, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.01 learning rate; 8 epochs; [0.6069303750991821, 0.6510416865348816] \n",
+ "\n",
+ "192/192 [==============================] - 1s 3ms/step\n",
+ "5 layers; 0.01 learning rate; 10 epochs; [0.5859572688738505, 0.6510416865348816] \n",
+ "\n"
+ ]
+ }
+ ],
"source": [
- "# your code here"
+ "\n",
+ "from keras import optimizers as op\n",
+ "\n",
+ "n_layers = [2,3,4,5]\n",
+ "\n",
+ "learning_rate = [0.0001, 0.001, 0.01]\n",
+ "\n",
+ "n_epochs = [6,8,10]\n",
+ "\n",
+ "for nl in n_layers:\n",
+ " for lr in learning_rate:\n",
+ " for ne in n_epochs:\n",
+ " \n",
+ " # initialize optimizer\n",
+ " adam_opt = op.SGD(lr=0.01)\n",
+ " \n",
+ " # define layers\n",
+ " layers = [Dense(64, activation='relu') for i in range(nl - 1)]\n",
+ " layers.append(Dense(2, activation='softmax'))\n",
+ " \n",
+ " # create model with layers\n",
+ " model = Sequential(layers)\n",
+ " \n",
+ " # compile model with optimizer\n",
+ " model.compile(\n",
+ " optimizer=adam_opt,\n",
+ " loss='sparse_categorical_crossentropy',\n",
+ " metrics=['accuracy'])\n",
+ " \n",
+ " # fit model with epochs\n",
+ " history = model.fit(X_train, y_train, epochs=ne, batch_size=32, verbose=0)\n",
+ " \n",
+ " # print results\n",
+ " print(nl, \"layers;\", lr, \"learning rate;\", ne, \"epochs;\", model.evaluate(X_test, y_test), \"\\n\")\n",
+ " "
]
},
{
@@ -124,7 +1043,16 @@
"metadata": {},
"outputs": [],
"source": [
- "# your answer here"
+ "\n",
+ "\n",
+ "'''\n",
+ "There is no correlation between adding more epochs and improvements to performance.\n",
+ "\n",
+ "Overfitting?\n",
+ "\n",
+ "The model accuracy gets worse after a point.\n",
+ "\n",
+ "'''"
]
}
],
@@ -144,7 +1072,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.3"
+ "version": "3.7.7"
}
},
"nbformat": 4,
diff --git a/your-code/challenge-2.md b/your-code/challenge-2.md
index 9d98968..aa366c5 100644
--- a/your-code/challenge-2.md
+++ b/your-code/challenge-2.md
@@ -26,3 +26,29 @@ After you're done, submit a screenshot of your Playground including the followin
* Test and training loss
**Do not google for the end solution!**
+
+## Solution:
+
+Task 1: The model as given combines our two input features into a single neuron. Will this model learn any nonlinearities? Run it to confirm your guess.
+
+No, it is set to Linear.
+
+Task 2: Try increasing the number of neurons in the hidden layer from 1 to 2, and also try changing from a Linear activation to a nonlinear activation like ReLU. Can you create a model that can learn nonlinearities? Can it model the data effectively?
+
+Yes to question 1 (it can learn nonlinearities), and no to question 2 (it does not model the data effectivelly)
+
+Task 3: Try increasing the number of neurons in the hidden layer from 2 to 3, using a nonlinear activation like ReLU. Can it model the data effectively? How model quality vary from run to run?
+
+It may model the data, but quality varies from run to run
+
+Task 4: Continue experimenting by adding or removing hidden layers and neurons per layer. Also feel free to change learning rates, regularization, and other learning settings. What is the smallest number of neurons and layers you can use that gives test loss of 0.177 or lower?
+
+Complex model may overfit and perform worse than simpler model.
+
+
+**TensorFlow Playground**
+Test loss <0.05, spiral
+
+
+
+
diff --git a/your-code/screenshot_celina.png b/your-code/screenshot_celina.png
new file mode 100644
index 0000000..1f30a37
Binary files /dev/null and b/your-code/screenshot_celina.png differ
diff --git a/your-code/tic-tac-toe.model b/your-code/tic-tac-toe.model
new file mode 100644
index 0000000..87b55de
Binary files /dev/null and b/your-code/tic-tac-toe.model differ