-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
94 lines (73 loc) · 3.1 KB
/
app.py
File metadata and controls
94 lines (73 loc) · 3.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Flatten, Dense
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
data = pd.read_csv("train.txt", sep=';')
data.columns = ["Text", "Emotions"]
print(data.head())
texts = data["Text"].tolist()
labels = data["Emotions"].tolist()
# Tokenize the text data
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
max_length = max([len(seq) for seq in sequences])
padded_sequences = pad_sequences(sequences, maxlen=max_length)
# Encode the string labels to integers
label_encoder = LabelEncoder()
labels = label_encoder.fit_transform(labels)
# One-hot encode the labels
one_hot_labels = tf.keras.utils.to_categorical(labels)
xtrain, xtest, ytrain, ytest = train_test_split(padded_sequences,
one_hot_labels,
test_size=0.2)
model = Sequential()
model.add(Embedding(input_dim=len(tokenizer.word_index) + 1,
output_dim=128, input_length=max_length))
model.add(Flatten())
model.add(Dense(units=128, activation="relu"))
model.add(Dense(units=len(one_hot_labels[0]), activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(xtrain, ytrain, epochs=10, batch_size=32, validation_data=(xtest, ytest))
# Evaluate the model using metrics
y_true = np.argmax(ytest, axis=1)
y_pred = model.predict(xtest)
y_pred_class = np.argmax(y_pred, axis=1)
accuracy = accuracy_score(y_true, y_pred_class)
precision = precision_score(y_true, y_pred_class, average='weighted')
recall = recall_score(y_true, y_pred_class, average='weighted')
f1 = f1_score(y_true, y_pred_class, average='weighted')
conf_matrix = confusion_matrix(y_true, y_pred_class)
print(f"Accuracy: {accuracy}")
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1 Score: {f1}")
print(f"Confusion Matrix:\n{conf_matrix}")
from transformers import BertTokenizer, BertModel
import torch
# Load pre-trained model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
# Tokenize input text
text = "Your input text here"
inputs = tokenizer(text, return_tensors='pt')
# Get BERT embeddings
with torch.no_grad():
outputs = model(**inputs)
embeddings = outputs.last_hidden_state
import streamlit as st
from transformers import pipeline
# Load pre-trained emotion classification model
classifier = pipeline('sentiment-analysis', model='bhadresh-savani/bert-base-uncased-emotion')
# Streamlit UI
st.title("Text Emotion Classification")
user_input = st.text_area("Enter text here:")
if st.button("Classify"):
result = classifier(user_input)
st.write(result)