Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,46 @@
# P19
# Virtual Assistant
## Overview
Welcome to the Virtual Assistant project! This Python-based virtual assistant leverages various libraries, including OpenCV, PyWhatKit, PyAudio, and more, to perform face detection, object detection, automated email generation, and playing music on YouTube. This README file provides essential information to get you started and use the virtual assistant effectively.

## Features
### Face Detection: The virtual assistant can detect faces in images and videos using the OpenCV library. This feature enhances the security and interaction capabilities of the assistant.

### Object Detection: Utilizing OpenCV's object detection capabilities, the virtual assistant can identify and classify objects in images or video streams. This is useful for a variety of applications, including surveillance and analysis.

### Automated Email Generation: The virtual assistant can compose and send emails automatically using the PyWhatKit library. This feature is handy for sending predefined messages or automated responses.

### Playing Music on YouTube: With the help of PyWhatKit and other libraries, the virtual assistant can search for and play music on YouTube. Users can specify their preferences or choose from predefined playlists.

## Prerequisites
Before using the virtual assistant, ensure you have the following prerequisites installed:
Python 3.x
Required libraries (install using pip install -r requirements.txt):
OpenCV
PyWhatKit
PyAudio
Other necessary libraries

## Usage
### Face Detection:
Activate face detection by specifying the appropriate command or trigger phrase.
The virtual assistant will use the device's camera or process a specified image or video file.

### Object Detection:
Use the command or trigger phrase to initiate object detection.
The assistant will analyze the input image or video stream and provide information about detected objects.

### Automated Email Generation:
Provide input or specify triggers to compose and send emails automatically.

### Playing Music on YouTube:
Request music playback by providing the song name, artist, or genre.
The assistant will use PyWhatKit to search for the music on YouTube and play it.

## Customization
Feel free to customize the virtual assistant to suit your needs. You can modify the behavior, add new features, or integrate additional libraries as required.

## Contributing
Contributions are welcome! If you find bugs or want to add new features, please open an issue or submit a pull request.

# Happy coding with your virtual assistant!
108 changes: 108 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import streamlit as st
import smtplib
from langchain.prompts import PromptTemplate
from langchain_community.llms import CTransformers
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart


def send_email(sender_email, receiver_email, subject, body):
# Set up SMTP server
smtp_server = smtplib.SMTP('smtp.gmail.com', 587)
smtp_server.starttls()
smtp_server.login(sender_email, "aacr gmev ylhg uplf")

# Create email message
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = receiver_email
msg['Subject'] = subject

msg.attach(MIMEText(body, 'plain'))

# Send the email
smtp_server.sendmail(sender_email, receiver_email, msg.as_string())
smtp_server.quit()




# Function to get the response back
def getLLMResponse(form_input, email_sender, email_recipient, email_style):
# llm = OpenAI(temperature=.9, model="text-davinci-003")

# Wrapper for Llama-2-7B-Chat, Running Llama 2 on CPU

# Quantization is reducing model precision by converting weights from 16-bit floats to 8-bit integers,
# enabling efficient deployment on resource-limited devices, reducing model size, and maintaining performance.

# C Transformers offers support for various open-source models,
# among them popular ones like Llama, GPT4All-J, MPT, and Falcon.

# C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library

llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
# https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
model_type='llama',
config={'max_new_tokens': 256,
'temperature': 0.01},
access_token="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"
)

# Template for building the PROMPT
template = """
Write a email with {style} style and includes topic :{email_topic}.\n\nSender: {sender}\nRecipient: {recipient}
\n\nEmail Text:

"""

# Creating the final PROMPT
prompt = PromptTemplate(
input_variables=["style", "email_topic", "sender", "recipient"],
template=template, )

# Generating the response using LLM
response = llm(
prompt.format(email_topic=form_input, sender=email_sender, recipient=email_recipient, style=email_style))
print(response)

return response


st.set_page_config(page_title="Generate Emails",
page_icon='📧',
layout='centered',
initial_sidebar_state='collapsed')
st.header("Generate Emails 📧")

form_input = st.text_area('Enter the email topic', height=275)

# Creating columns for the UI - To receive inputs from user
col1, col2, col3 = st.columns([10, 10, 5])
with col1:
email_sender = st.text_input('Sender Name')
with col2:
email_recipient = st.text_input('Recipient Name')
with col3:
email_style = st.selectbox('Writing Style',
('Formal', 'Appreciating', 'Not Satisfied', 'Neutral'),
index=0)

submit = st.button("Generate")
send_button = st.button("Send Email")
# When 'Generate' button is clicked, execute the below code

while True:
if submit:
generate_response=getLLMResponse(form_input, email_sender, email_recipient, email_style)
st.write(generate_response)


if send_button:
sender_email = "mysteryverse9@gmail.com"
receiver_email = st.text_input("Please enter your email address: ")
subject = "Test email from Python"
body = generate_response
send_email(sender_email, receiver_email, subject, body)
st.success("Email Sent Successfully!")
break
52 changes: 52 additions & 0 deletions dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import cv2, os, time
# change the paths below to the location where these files are on your machine
haar_file = 'haarcascade_frontalface_default.xml'
# All of the faces data (images) will be stored here
datasets ="dataset"
# change the name below when creating a new dataset for a new person
name = 'priyanshu'

path = os.path.join(datasets, name)
# if sub_dataset folder doesn't already exist, make the folder with the name defined above
if not os.path.isdir(path):
os.mkdir(path)

# defining the size of images
(width, height) = (640, 480)

face_cascade = cv2.CascadeClassifier(haar_file)
cap = cv2.VideoCapture(0)
# returns true or false (if the camera is on or not)
print("Webcam is open? ", cap.isOpened())
# wait for the camera to turn on (just to be safe, in case the camera needs time to load up)
time.sleep(2)
#Takes pictures of detected face and saves them
count = 1
print("Taking pictures...")
# this takes 300 pictures of your face. Change this number if you want.
# Having too many images, however, might slow down the program
while count < 300:
# im = camera stream
ret, frame = cap.read()
# if it recieves something from the webcam...
if ret == True:
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect face using the haar cascade file
faces = face_cascade.detectMultiScale(img_gray, 1.3, 4) #(source,scale,min_neighbours)
for (x,y,w,h) in faces:
# draws a rectangle around your face when taking pictures
# this is to create a ROI (region of interest) so it only takes pictures of your face
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
# define 'face' as the inside of the rectangle we made above and make it grayscale
face = img_gray[y:y + h, x:x + w]
# resize the face images to the size of the 'face' variable above (i.e: area captured inside of the rectangle)
face_resize = cv2.resize(face, (width, height))
# save images with their corresponding number
cv2.imwrite('%s/%s.png' % (path,count), face_resize)
count += 1
cv2.imshow('Face Capturing', frame)
if cv2.waitKey(1) == ord("q"):
break
print("Your face has been created.")
cap.release()
cv2.destroyAllWindows()
53 changes: 53 additions & 0 deletions face_detect.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import cv2
import os
import numpy as np

haar_file = 'haarcascade_frontalface_default.xml'
datasets ='dataset/'
(images, labels, names, id) = ([], [], {}, 0)
for (subdir, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] =subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = os.path.join(subjectpath, filename)
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id = id + 1

(images, labels) = [np.array(lists) for lists in [images, labels]]
# Load the face recognizer from the 'face' submodule
model = cv2.face.LBPHFaceRecognizer_create()

# Rest of your code...

model.train(images, labels)

face_cascade = cv2.CascadeClassifier(haar_file)
cap = cv2.VideoCapture(0)

while True:
ret, frame = cap.read()
if ret == True:
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img_gray, 1.3, 4)
# detectMultiscale(source_image,scale,min_neighbours)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
face = img_gray[y:y + h, x:x + h]
face_resize = cv2.resize(face, (640, 480))
prediction = model.predict(face_resize)
if prediction[1] < 25:
person_name=names[prediction[0]]
cv2.putText(frame, '%s' % (person_name), (x + 5, (y + 25 + h)),
cv2.FONT_HERSHEY_PLAIN, 1.5, (20, 185, 20), 2)
else:
cv2.putText(frame, "Unknown", (x + 5, (y + 25 + h)), cv2.FONT_HERSHEY_PLAIN,
1.5, (65, 65, 255), 2)

cv2.imshow("Face Recognition", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
Loading