-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathspeak.py
More file actions
130 lines (114 loc) · 4.53 KB
/
speak.py
File metadata and controls
130 lines (114 loc) · 4.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import os
import streamlit as st
import google.generativeai as genai
import speech_recognition as sr
from gtts import gTTS
import tempfile
# Configure the API with your API key
genai.configure(api_key="")
# Create the model and set up the generation configuration
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
# Initialize the model
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
generation_config=generation_config,
system_instruction="You are an AI assistant answering questions about artificial intelligence. Respond concisely and clearly.",
)
# Start the chat session with predefined AI-related context
chat_session = model.start_chat(
history=[
{
"role": "user",
"parts": [
"What is artificial intelligence?",
],
},
{
"role": "model",
"parts": [
"Artificial intelligence (AI) refers to the simulation of human intelligence in machines designed to think and act like humans. It involves learning, reasoning, problem-solving, perception, and language understanding.",
],
},
{
"role": "user",
"parts": [
"What are the types of machine learning?",
],
},
{
"role": "model",
"parts": [
"Machine learning can be classified into three main types: supervised learning, unsupervised learning, and reinforcement learning. Each has its own approach and application in AI tasks.",
],
},
]
)
# Initialize speech recognition
recognizer = sr.Recognizer()
# Set up speech-to-text function
def listen_to_audio():
with sr.Microphone() as source:
print("Listening for your input...")
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
try:
print("Recognizing your speech...")
user_input = recognizer.recognize_google(audio)
print(f"You said: {user_input}")
# user_input = generate_speech(user_input)
# Create audio tag to play the bot's response on the webpage
# st.audio(user_input, format="audio/mp3")
return user_input
except sr.UnknownValueError:
print("Sorry, I couldn't understand your speech.")
return ""
except sr.RequestError:
print("Sorry, there was an issue with the speech recognition service.")
return ""
# Set up text-to-speech function for the bot's response
def generate_speech(text):
tts = gTTS(text)
# Use a temporary file to store the generated speech
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.close()
tts.save(tmpfile.name)
return tmpfile.name
# Streamlit UI
st.title("Voice Assistant with AI")
# Option to start listening to user's voice input
if st.button("Start Listening"):
user_input = listen_to_audio()
if user_input:
# Send the user input to the chat model and get the response
response = chat_session.send_message(user_input)
bot_response = response.text
# Display user input and bot response as text
st.markdown(f"""
<div style="display: flex; flex-direction: column; margin-bottom: 10px;">
<!-- User's input on the right -->
<div style="display: flex; justify-content: flex-end; margin-bottom: 10px;">
<div style="background-color: #f1f1f1; padding: 10px; border-radius: 5px; max-width: 80%; word-wrap: break-word;">
<strong>You 🧑 :</strong> {user_input}
</div>
</div>
# <!-- Bot's response on the left -->
# <div style="display: flex; justify-content: flex-start; margin-bottom: 10px;">
# <div style="background-color: #e0e0e0; padding: 10px; border-radius: 5px; max-width: 80%; word-wrap: break-word;">
# <strong>Bot 🤖:</strong> {bot_response}
# </div>
# </div>
# </div>
# """, unsafe_allow_html=True)
# Generate speech for the bot's response
bot_audio_path = generate_speech(bot_response)
# Create audio tag to play the bot's response on the webpage
st.audio(bot_audio_path, format="audio/mp3")
# Display exit option
if st.button('Exit'):
st.stop()