diff --git a/your-code/bonus.ipynb b/your-code/bonus.ipynb
index 92e4a72..8908693 100644
--- a/your-code/bonus.ipynb
+++ b/your-code/bonus.ipynb
@@ -144,9 +144,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.3"
+ "version": "3.8.3"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb
index 2487c5f..8770c84 100644
--- a/your-code/challenge-1.ipynb
+++ b/your-code/challenge-1.ipynb
@@ -34,11 +34,539 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " o | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " x | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " x | \n",
+ " o | \n",
+ " o | \n",
+ " b | \n",
+ " o | \n",
+ " b | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 x x x x o o x o o True\n",
+ "1 x x x x o o o x o True\n",
+ "2 x x x x o o o o x True\n",
+ "3 x x x x o o o b b True\n",
+ "4 x x x x o o b o b True"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd \n",
+ "tic_tac_toe = pd.read_csv('tic-tac-toe.csv')\n",
+ "tic_tac_toe.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(958, 10)"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "tic_tac_toe.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "tic_tac_toe.describe"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "TL object\n",
+ "TM object\n",
+ "TR object\n",
+ "ML object\n",
+ "MM object\n",
+ "MR object\n",
+ "BL object\n",
+ "BM object\n",
+ "BR object\n",
+ "class bool\n",
+ "dtype: object"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "tic_tac_toe.dtypes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.DataFrame(tic_tac_toe)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 288 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 234 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 549 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 217 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 126 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "288 2 0 0 1 2 1 1 2 2 True\n",
+ "234 2 0 2 1 1 2 0 1 2 True\n",
+ "549 0 1 2 1 0 1 2 2 2 True\n",
+ "217 2 1 0 0 2 1 2 1 2 True\n",
+ "126 2 1 2 1 2 1 1 2 2 True"
+ ]
+ },
+ "execution_count": 44,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.sample(5)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Index(['TL', 'TM', 'TR', 'ML', 'MM', 'MR', 'BL', 'BM', 'BR', 'class'], dtype='object')"
+ ]
+ },
+ "execution_count": 45,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.columns"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "X = df[['TL', 'TM', 'TR', 'ML', 'MM', 'MR', 'BL', 'BM', 'BR']]\n",
+ "y = df['class']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "from sklearn import preprocessing \n",
+ "label = preprocessing.LabelEncoder()\n",
+ "\n",
+ "# Assigning numerical values to the DataFrame \n",
+ "for x in X:\n",
+ " label.fit(df[x])\n",
+ " df[x] = label.transform(df[x])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " TL | \n",
+ " TM | \n",
+ " TR | \n",
+ " ML | \n",
+ " MM | \n",
+ " MR | \n",
+ " BL | \n",
+ " BM | \n",
+ " BR | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " TL TM TR ML MM MR BL BM BR class\n",
+ "0 2 2 2 2 1 1 2 1 1 True\n",
+ "1 2 2 2 2 1 1 1 2 1 True\n",
+ "2 2 2 2 2 1 1 1 1 2 True\n",
+ "3 2 2 2 2 1 1 1 0 0 True\n",
+ "4 2 2 2 2 1 1 0 1 0 True"
+ ]
+ },
+ "execution_count": 48,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#df = pd.get_dummies(tic_tac_toe, columns = ['class'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Applying standardization to my dataset\n",
+ "mm_scaler = preprocessing.StandardScaler()\n",
+ "X = mm_scaler.fit_transform(X)"
]
},
{
@@ -60,11 +588,194 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Split the training and test data.\n",
+ "\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "X_train, X_test, y_train, y_test = train_test_split(X,y, random_state = 0)\n",
+ "\n",
+ "X_train = mm_scaler.fit_transform(X_train)\n",
+ "X_test = mm_scaler.transform(X_test)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "#Create a Sequential model.\n",
+ "from sklearn.neural_network import MLPClassifier\n",
+ "mlp = MLPClassifier(hidden_layer_sizes= (100,200) ,max_iter=20, alpha=1e-4,\n",
+ " solver='sgd', verbose=10, random_state=1,\n",
+ " learning_rate_init=.1)\n",
+ "\n",
+ "#Add several layers to your model. \n",
+ "#Make sure you use ReLU as the activation function for the middle layers. \n",
+ "#Use Softmax for the output layer because each output has a\n",
+ "#single lable and all the label probabilities add up to 1.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 152,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Iteration 1, loss = 0.66827589\n",
+ "Iteration 2, loss = 0.62438257\n",
+ "Iteration 3, loss = 0.58806163\n",
+ "Iteration 4, loss = 0.56598247\n",
+ "Iteration 5, loss = 0.54282279\n",
+ "Iteration 6, loss = 0.52110810\n",
+ "Iteration 7, loss = 0.49368803\n",
+ "Iteration 8, loss = 0.46570875\n",
+ "Iteration 9, loss = 0.43649043\n",
+ "Iteration 10, loss = 0.40478960\n",
+ "Iteration 11, loss = 0.37263589\n",
+ "Iteration 12, loss = 0.34475488\n",
+ "Iteration 13, loss = 0.31672139\n",
+ "Iteration 14, loss = 0.29666126\n",
+ "Iteration 15, loss = 0.27633654\n",
+ "Iteration 16, loss = 0.26303131\n",
+ "Iteration 17, loss = 0.24032215\n",
+ "Iteration 18, loss = 0.21942277\n",
+ "Iteration 19, loss = 0.20499752\n",
+ "Iteration 20, loss = 0.18970273\n",
+ "Training set score: 0.938719\n",
+ "Test set score: 0.845833\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/Users/jossuebangos/opt/anaconda3/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py:614: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (20) reached and the optimization hasn't converged yet.\n",
+ " warnings.warn(\n"
+ ]
+ }
+ ],
+ "source": [
+ "mlp.fit(X_train, y_train)\n",
+ "\n",
+ "print(\"Training set score: %f\" % mlp.score(X_train, y_train))\n",
+ "print(\"Test set score: %f\" % mlp.score(X_test, y_test))\n",
+ "\n",
+ "# The loss is an estimation of the error in current iteration of the model\n",
+ "# This model optimizes the log-loss function using LBFGS or stochastic"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 153,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " class | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " True | \n",
+ "
\n",
+ " \n",
+ " | ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " | 953 | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " | 954 | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " | 955 | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " | 956 | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ " | 957 | \n",
+ " False | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
958 rows × 1 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " class\n",
+ "0 True\n",
+ "1 True\n",
+ "2 True\n",
+ "3 True\n",
+ "4 True\n",
+ ".. ...\n",
+ "953 False\n",
+ "954 False\n",
+ "955 False\n",
+ "956 False\n",
+ "957 False\n",
+ "\n",
+ "[958 rows x 1 columns]"
+ ]
+ },
+ "execution_count": 153,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "testing_y_real = pd.DataFrame(y)\n",
+ "testing_y_real"
]
},
{
@@ -76,13 +787,51 @@
"Now load your saved model and use it to make predictions on a few random rows in the test dataset. Check if the predictions are correct."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import tensorflow.keras as keras\n",
+ "import tensorflow as tf"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "NotFittedError",
+ "evalue": "This MLPClassifier instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mNotFittedError\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m#Make Predictionn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmlp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mtesting_y_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mtest\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/opt/anaconda3/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, X)\u001b[0m\n\u001b[1;32m 1031\u001b[0m \u001b[0mThe\u001b[0m \u001b[0mpredicted\u001b[0m \u001b[0mclasses\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1032\u001b[0m \"\"\"\n\u001b[0;32m-> 1033\u001b[0;31m \u001b[0mcheck_is_fitted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1034\u001b[0m \u001b[0my_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_pass_fast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1035\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/opt/anaconda3/lib/python3.8/site-packages/sklearn/utils/validation.py\u001b[0m in \u001b[0;36minner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mall_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;31m# extra_args > 0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/opt/anaconda3/lib/python3.8/site-packages/sklearn/utils/validation.py\u001b[0m in \u001b[0;36mcheck_is_fitted\u001b[0;34m(estimator, attributes, msg, all_or_any)\u001b[0m\n\u001b[1;32m 1039\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1040\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1041\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mNotFittedError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m'name'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mestimator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1042\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1043\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mNotFittedError\u001b[0m: This MLPClassifier instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator."
+ ]
+ }
+ ],
+ "source": [
+ "#Make Predictionn \n",
+ "predictions = mlp.predict(X[:100])\n",
+ "testing_y_pred = pd.DataFrame(predictions)\n",
+ "test"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "testing_y_real = pd.DataFrame(y)\n",
+ "testing_y_real"
]
},
{
@@ -108,7 +857,18 @@
"metadata": {},
"outputs": [],
"source": [
- "# your code here"
+ "x_test = tf.keras.utils.normalize(x_test, axis=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tf.keras.optimizers.Adam(\n",
+ " learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,\n",
+ " name='Adam')"
]
},
{
@@ -144,9 +904,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.3"
+ "version": "3.8.3"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}