diff --git a/README.md b/README.md index 1b3b751..2c1b624 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ -# Machine Learning from Scratch +# Machine Learning from Scratch. This is the code repository for my [Machine Learning from Scratch youtube playlist](https://www.youtube.com/watch?v=4PHI11lX11I&list=PLP3ANEJKF1TzOz3hwOoRclgRFVi8A76k2) -## 01 Linear Regression using Least Squares +## 01 Linear Regression using Least Squares. [Check out the tutorial video](https://www.youtube.com/watch?v=kR6tBAq16ng&t=2s) -## 02 Linear Regression using Gradient Descent +## 02 Linear Regression using Gradient Descent. [Check out the tutorial video](https://www.youtube.com/watch?v=4PHI11lX11I&t=2s) [Check out the medium post](https://towardsdatascience.com/linear-regression-using-gradient-descent-97a6c8700931) -## 03 Linear Regression in 2 minutes +## 03 Linear Regression in 2 minutes. [Check out the medium post](https://towardsdatascience.com/linear-regression-in-6-lines-of-python-5e1d0cd05b8d) diff --git a/movie.py b/movie.py new file mode 100644 index 0000000..2f33013 --- /dev/null +++ b/movie.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat May 12 00:18:03 2018 +@author: jishn +""" +#Code which is used to find sentiment of the viewers. +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd + +# Importing the dataset +dataset = pd.read_csv('Churn_Modelling.csv') +X = dataset.iloc[:, 3:13].values +y = dataset.iloc[:, 13].values + +# Encoding categorical data +from sklearn.preprocessing import LabelEncoder, OneHotEncoder +labelencoder_X_1 = LabelEncoder() +X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) +labelencoder_X_2 = LabelEncoder() +X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) +onehotencoder = OneHotEncoder(categorical_features = [1]) +X = onehotencoder.fit_transform(X).toarray() +X = X[:, 1:] + +# Splitting the dataset into the Training set and Test set +from sklearn.model_selection import train_test_split +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) + +# Feature Scaling +from sklearn.preprocessing import StandardScaler +sc = StandardScaler() +X_train = sc.fit_transform(X_train) +X_test = sc.transform(X_test) + + +import keras +from keras.models import Sequential +from keras.layers import Dense + +## used to initialise differnet layers +classifier=Sequential() +## adding the first layer init specifies the initial value to the weights closer to 0 +## activation fn is Rectifier +## here the number of nodes taken is the avg of no of input parameters and output +## Best method is k cross validation +classifier.add(Dense(output_dim=6,init='uniform',activation='relu',input_dim=11)) + +classifier.add(Dense(output_dim=6,init='uniform',activation='relu')) + + +classifier.add(Dense(output_dim=1,init='uniform',activation='sigmoid')) + +# Compiling the ANN +classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) + +# Fitting the ANN to the Training set +classifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100) + +# Part 3 - Making the predictions and evaluating the model + +# Predicting the Test set results +y_pred = classifier.predict(X_test) +y_pred = (y_pred > 0.5) + +# Making the Confusion Matrix +from sklearn.metrics import confusion_matrix +cm = confusion_matrix(y_test, y_pred) + +print("Accuracy:",((cm[0][0]+cm[1][1])/2000))