-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathclassification_random_forest.py
More file actions
100 lines (69 loc) · 2.72 KB
/
classification_random_forest.py
File metadata and controls
100 lines (69 loc) · 2.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
import json
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from nltk.corpus import stopwords
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from dataset import DATASET
movie_data = load_files(DATASET.root)
X, y = movie_data.data, movie_data.target
documents = []
from nltk.stem import WordNetLemmatizer
stemmer = WordNetLemmatizer()
for sen in range(0, len(X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))
X = vectorizer.fit_transform(documents)
# sum_words = X.sum(axis=0)
# words_freq = [(word, sum_words[0, idx]) for word, idx in ]
# words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
# freq = words_freq[:30]
# print(freq)
keywords = ['scene',
'good',
'make',
'story',
'would']
feature_names = vectorizer.get_feature_names()
# weights = np.ones(len(feature_names))
# for key, value in word_weights.items():
# weights[feature_names.index(key)] = value
weight_factor = 5
for keyword in keywords:
position = vectorizer.vocabulary_[keyword]
print("counts")
print(X[0:10,position])
print("weighted counts")
X[:,position] *= weight_factor
print(X[0:10,position])
print("----------------------------------------------------------------------")
tfidfconverter = TfidfTransformer()
X = tfidfconverter.fit_transform(X.toarray()).toarray()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
classifier = RandomForestClassifier(n_estimators=1000, random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(accuracy_score(y_test, y_pred))