diff --git a/frontend/app/(tabs)/home.tsx b/frontend/app/(tabs)/home.tsx index 3ce8af08..d637d0a1 100644 --- a/frontend/app/(tabs)/home.tsx +++ b/frontend/app/(tabs)/home.tsx @@ -5,111 +5,115 @@ import { ExpoSpeechRecognitionModule, useSpeechRecognitionEvent, } from "expo-speech-recognition"; -import { Text, TextInput, TouchableOpacity, Image, Modal } from 'react-native'; -import { Animated, Easing } from 'react-native'; -import * as Speech from 'expo-speech'; - -interface WebSocketEvent { - data: string; -} - -interface SpeechRecognitionErrorEvent { - error: string; - message: string; +import { Text, TextInput, TouchableOpacity, Image, Modal } from "react-native"; +import { Animated, Easing } from "react-native"; +import * as Speech from "expo-speech"; +import { ElevenLabsClient, play } from "elevenlabs"; +import { Audio } from "expo-av"; +import * as FileSystem from "expo-file-system"; +import { Buffer } from 'buffer'; + +// Define interfaces for WebSocket messages +interface InstructionMessage { + type: string; + data: { + instruction?: string; + message?: string; + }; } -interface SpeechRecognitionResultEvent { - results: { - transcript?: string; - }[]; -} +// Define WebSocket type to address WebSocket errors +type WebSocketType = WebSocket | null; export default function HomeScreen() { - const [speechQueue, setSpeechQueue] = useState([]); - const ws = useRef(null); // WebSocket reference with proper type - const [recognizing, setRecognizing] = useState(true); + // Lock to prevent concurrent playback + const audioLock = useRef(false); + + // Enqueue initial navigation message with delay + const enqueueInitialNavigation = async (destination: string) => { + const initialMessage = `Starting navigation to ${destination}`; + await speakResponse(initialMessage); + await new Promise((resolve) => setTimeout(resolve, 500)); + }; + const audioQueue = useRef([]); // Queue for audio requests + const isPlayingRef = useRef(false); // Track if audio is playing + // Update the speakWithElevenLabs function to return a Promise + // ElevenLabs TTS function with queue support + const speakWithElevenLabs = async (text: string): Promise => { + if (!text) return; + let path = ""; + try { + const voiceId = "21m00Tcm4TlvDq8ikWAM"; + const url = `https://api.elevenlabs.io/v1/text-to-speech/${voiceId}`; + const response = await fetch(url, { + method: "POST", + headers: { + "xi-api-key": "sk_550053542174a9cb083e460d2d6683efec0b83e2554edec2", // Replace with secure storage + "Content-Type": "application/json", + "Accept": "audio/mpeg", + }, + body: JSON.stringify({ + text, + model_id: "eleven_multilingual_v2", + output_format: "mp3_44100_128", + }), + }); + + if (!response.ok) { + throw new Error(`ElevenLabs API error: ${await response.text()}`); + } + + const arrayBuffer = await response.arrayBuffer(); + path = `${FileSystem.documentDirectory}speech-${Date.now()}.mp3`; + await FileSystem.writeAsStringAsync(path, Buffer.from(arrayBuffer).toString("base64"), { + encoding: FileSystem.EncodingType.Base64, + }); + + const { sound } = await Audio.Sound.createAsync({ uri: path }, { shouldPlay: true }); + isPlayingRef.current = true; + + await new Promise((resolve) => { + sound.setOnPlaybackStatusUpdate((status) => { + if (status.isLoaded && status.didJustFinish) { + sound.unloadAsync(); + isPlayingRef.current = false; + resolve(); + } + }); + }); + } catch (error) { + console.error("Error in speakWithElevenLabs:", error); + isPlayingRef.current = false; + throw error; + } finally { + if (path) { + FileSystem.deleteAsync(path).catch((err) => console.warn("File cleanup failed:", err)); + } + } + }; + + + const ws = useRef(null); + const [recognizing, setRecognizing] = useState(true); // Start with true to unmute const [transcript, setTranscript] = useState(""); const [backendResponse, setBackendResponse] = useState(""); const [hasPermission, setHasPermission] = useState(false); const [loading, setLoading] = useState(false); - const [isSpeaking, setIsSpeaking] = useState(false); // Track if TTS is active - const [destination, setDestination] = useState(""); // Store the destination input + const [isSpeaking, setIsSpeaking] = useState(false); + const [destination, setDestination] = useState(""); const handleInputChange = (text: string) => { setDestination(text); // Update destination state when user types }; const handleSubmitDestination = () => { - // Handle submitting the destination (e.g., send it to backend or use it in the app) console.log("Destination entered:", destination); - }; - - const processSpeechQueue = () => { - if (speechQueue.length === 0) return; - - // Get the next item from the queue - const textToSpeak = speechQueue[0]; - - // Make sure we stop listening before speaking to prevent feedback - stopListening(); - - // Set speaking state - setIsSpeaking(true); - - // Speak the text - Speech.speak(textToSpeak, { - rate: 0.8, - pitch: 1.0, - language: "en-US", - onDone: () => { - // Update queue by removing the item we just processed - setSpeechQueue(prevQueue => prevQueue.slice(1)); - setIsSpeaking(false); - - // Add a small delay before restarting listening - // to ensure TTS has completely finished - setTimeout(() => { - if (speechQueue.length === 0) { - startListening(); - } - }, 500); - }, - onStopped: () => { - // Update queue by removing the item we just processed - setSpeechQueue(prevQueue => prevQueue.slice(1)); - setIsSpeaking(false); - - // Add a small delay before restarting listening - setTimeout(() => { - if (speechQueue.length === 0) { - startListening(); - } - }, 500); - }, - onError: (error) => { - console.error("Speech error:", error); - // Update queue by removing the item we just processed - setSpeechQueue(prevQueue => prevQueue.slice(1)); - setIsSpeaking(false); - - // Add a small delay before restarting listening - setTimeout(() => { - if (speechQueue.length === 0) { - startListening(); - } - }, 500); - } - }); - }; - - // Modified useEffect to handle the speech queue - useEffect(() => { - // Only process the queue if we're not currently speaking and there are items in the queue - if (!isSpeaking && speechQueue.length > 0) { - processSpeechQueue(); + if (destination.trim()) { + startNavigation(destination); } - }, [isSpeaking, speechQueue]); + }; + // WebSocket setup useEffect(() => { ws.current = new WebSocket("ws://localhost:8000/"); @@ -117,46 +121,35 @@ export default function HomeScreen() { console.log("WebSocket connected"); }; - ws.current.onmessage = (event: WebSocketEvent) => { - console.log("WebSocket message received:", event.data); - + ws.current.onmessage = async (event: WebSocketMessageEvent) => { try { - // Parse the message data - const parsedData = JSON.parse(event.data); - console.log("Parsed WebSocket data:", parsedData); - + const parsedData = JSON.parse(event.data as string) as InstructionMessage; let textToSpeak = ""; - // Check message type and extract the appropriate text to speak - if (parsedData.type === "instruction") { - console.log("Instruction received:", parsedData.data); + if (parsedData.type === "approachingTurn") { textToSpeak = parsedData.data.instruction || ""; - } - else if (parsedData.type === "approachingTurn") { - console.log("Approaching turn:", parsedData.data); - // Extract relevant info from approaching turn data - textToSpeak = parsedData.data.instruction || ""; - } - else if (parsedData.type === "complete") { - console.log("Navigation complete:", parsedData.data); + } else if (parsedData.type === "complete") { textToSpeak = parsedData.data.message || "You have reached your destination"; } - // Update the UI with the extracted text - setBackendResponse(textToSpeak || JSON.stringify(parsedData)); - - // Instead of speaking immediately, add the text to the speech queue if (textToSpeak) { - setSpeechQueue(prevQueue => [...prevQueue, textToSpeak]); + // Wait for initial message to finish + while (audioQueue.current.length > 0 || isPlayingRef.current) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + stopListening(); + setBackendResponse(textToSpeak); + await speakResponse(textToSpeak); } } catch (error) { console.error("Error processing WebSocket message:", error); - setBackendResponse(event.data); // Fallback to raw data + setBackendResponse(String(event.data)); } }; - ws.current.onerror = (error: Event) => { - console.error("WebSocket Error:", error); + ws.current.onerror = (ev: Event) => { + console.error("WebSocket Error:", ev); }; ws.current.onclose = () => { @@ -164,25 +157,58 @@ export default function HomeScreen() { }; return () => { - if (ws.current) { - ws.current.close(); - } + if (ws.current) ws.current.close(); }; }, []); - // Check permissions on component mount - useEffect(() => { - checkPermissions(); - startListening(); - }, []); + // Centralized function to handle speaking + // Modified speakResponse function to use ElevenLabs + // Modified speakResponse with lock and sequential queue processing + const speakResponse = async (text: string) => { + if (!text) return; + + audioQueue.current.push(text); + if (audioLock.current) return; + + audioLock.current = true; - // Handler for text-to-speech with muting - REMOVED - // Since we now handle text-to-speech directly in the WebSocket onmessage handler + while (audioQueue.current.length > 0) { + const nextText = audioQueue.current[0]; + if (recognizing) stopListening(); + setIsSpeaking(true); + + try { + await speakWithElevenLabs(nextText); + audioQueue.current.shift(); + } catch (error) { + console.error("TTS error:", error); + audioQueue.current.shift(); + } finally { + setIsSpeaking(false); + } + + await new Promise((resolve) => setTimeout(resolve, 100)); + } - const handleTextToSpeech = () => { - const thingToSay = transcript; - Speech.speak(thingToSay); + audioLock.current = false; + + if (audioQueue.current.length === 0) { + setTimeout(startListening, 500); + } }; + // Check permissions and initialize listening + useEffect(() => { + const initialize = async () => { + await checkPermissions(); + if (hasPermission) { + startListening(); // Start listening immediately if permissions are granted + } else { + await requestPermissions(); + if (hasPermission) startListening(); // Start after requesting permissions + } + }; + initialize(); + }, []); const checkPermissions = async () => { const { status, granted } = await ExpoSpeechRecognitionModule.getPermissionsAsync(); @@ -203,25 +229,21 @@ export default function HomeScreen() { // Event listeners for speech recognition useSpeechRecognitionEvent("start", () => setRecognizing(true)); useSpeechRecognitionEvent("end", () => setRecognizing(false)); - useSpeechRecognitionEvent("result", (event: SpeechRecognitionResultEvent) => { + useSpeechRecognitionEvent("result", (event) => { const speechResult = event.results[0]?.transcript || ""; setTranscript(speechResult); console.log("Speech to Text Result:", speechResult); - }); - useSpeechRecognitionEvent("error", (event: SpeechRecognitionErrorEvent) => { + useSpeechRecognitionEvent("error", (event) => { console.log("Error:", event.error, "Message:", event.message); }); const startListening = async () => { - // Don't start listening if TTS is active - if (isSpeaking) return; + if (isSpeaking || !hasPermission) return; // Prevent starting if speaking - if (!hasPermission) { - await requestPermissions(); - } + await requestPermissions(); + if (!hasPermission) return; - console.log("Starting speech recognition..."); ExpoSpeechRecognitionModule.start({ lang: "en-US", interimResults: true, @@ -230,19 +252,9 @@ export default function HomeScreen() { requiresOnDeviceRecognition: false, addsPunctuation: false, contextualStrings: [ - "weather", - "temperature", - "city", - "weather in", - "ai", - "chat", - "conversation", - "start navigation", - "navigate to", - "take me to", - "directions to", - "how do I get to", - "route to" + "weather", "temperature", "city", "weather in", "ai", "chat", "conversation", + "start navigation", "navigate to", "take me to", "directions to", + "how do I get to", "route to" ], }); setRecognizing(true); @@ -261,96 +273,93 @@ export default function HomeScreen() { } }; - // Function to send the transcript to the backend + // Navigation function + // Updated startNavigation to use enqueueInitialNavigation + const startNavigation = async (dest: string) => { + try { + setLoading(true); + const destination = dest || "2831 W 15th St Ste 200, Plano, Tx 75075"; + console.log(`Starting navigation to: ${destination}`); + + const response = await axios.post("http://localhost:8000/startSimulationDirections", { + destination, + }); + + console.log("Navigation started:", response.data); + setBackendResponse(`Starting navigation to ${destination}`); + await enqueueInitialNavigation(destination); + } catch (error) { + console.error("Error starting navigation:", error); + setBackendResponse("Sorry, there was an error starting navigation."); + await speakResponse("Sorry, there was an error starting navigation."); + } finally { + setLoading(false); + } + }; + + // Send speech to backend const sendSpeechToBackend = async () => { - if (!transcript) return; + if (!transcript || isSpeaking) return; try { setLoading(true); - - // Check if transcript contains navigation keywords - const navigationKeywords = ["start navigation", "navigate to", "take me to", "directions to", "how do I get to", "route to"]; + const navigationKeywords = [ + "start navigation", "navigate to", "take me to", "directions to", + "how do I get to", "route to" + ]; const isNavigationRequest = navigationKeywords.some(keyword => transcript.toLowerCase().includes(keyword) ); - // Extract destination from the transcript - let destination = ""; + let extractedDestination = ""; if (isNavigationRequest) { - // Parse to extract destination for (const keyword of navigationKeywords) { if (transcript.toLowerCase().includes(keyword)) { - destination = transcript.toLowerCase().split(keyword)[1].trim(); + extractedDestination = transcript.toLowerCase().split(keyword)[1]?.trim() || ""; break; } } - } - - // If not currently speaking - if (!isSpeaking) { - // Choose the appropriate endpoint based on the command type - if (isNavigationRequest && destination) { - // Get current location (in a real app, you'd use geolocation) - // For this example, we'll use a hardcoded origin - const origin = "2800 Waterview Pkwy, Richardson, TX 75080"; // Default origin - const destination = "2831 W 15th St Ste 200, Plano, Tx 75075" - console.log(`Starting navigation to: ${destination}`); - - // Call navigation-specific endpoint - const response = await axios.post('http://localhost:8000/startSimulationDirections', { - destination: destination - }); - - console.log("Navigation started:", response.data); - setBackendResponse(`Starting navigation to ${destination}`); - } else { - // Use the general command endpoint for other requests - const response = await axios.post('http://localhost:8000/command', { - userInput: transcript, - sessionId: 'unique-session-id', - }); - - console.log("Backend response:", response.data); - - if (response.data && response.data.response) { - setBackendResponse(response.data.response); - } else { - setBackendResponse("No response received"); - } - } + startNavigation(extractedDestination); + } else { + const response = await axios.post("http://localhost:8000/command", { + userInput: transcript, + sessionId: "unique-session-id", + }); + console.log("Backend response:", response.data); // Log backend result as in original + const responseText = response.data.response || "No response received"; + setBackendResponse(responseText); + speakResponse(responseText); } } catch (error) { console.error("Error sending speech to backend:", error); setBackendResponse("Sorry, there was an error processing your request."); + speakResponse("Sorry, there was an error processing your request."); } finally { setLoading(false); } }; - // Modified useEffect with debouncing to prevent multiple API calls - // Modified useEffect for sending speech to backend + // Debounced transcript processing useEffect(() => { - let debounceTimer = null; + const debounceTimer = setTimeout(() => { + if (transcript && !isSpeaking) sendSpeechToBackend(); + }, 1000); - // Only process speech when we have transcript AND we're not speaking - if (transcript && !isSpeaking && recognizing) { - // Clear any existing timer - if (debounceTimer) clearTimeout(debounceTimer); + return () => clearTimeout(debounceTimer); + }, [transcript]); - // Set a new timer - debounceTimer = setTimeout(() => { - sendSpeechToBackend(); - }, 1000); // 1 second delay to stabilize transcript - } - - // Cleanup function - return () => { - if (debounceTimer) clearTimeout(debounceTimer); - }; - }, [transcript, isSpeaking, recognizing]); return ( + { + // Test a voice - change the identifier to test different voices + speakWithElevenLabs("This is a test of the text to speech functionality using this voice"); + }} + > + Test Voice + {/* Top-left section with logo and app name */} CoDriver + {/* Main container to align the content */} @@ -370,6 +380,7 @@ export default function HomeScreen() { /> + {/* Text container for song title */} @@ -377,6 +388,7 @@ export default function HomeScreen() { The Color Violet ยท Tory Lanez + {/* Bluetooth icon + BeatSpill+ in a row */} + {/* Second Bluetooth icon */} + + {/* Background AI-themed blob image */} + {/* Destination Button at the bottom */} {/* Left Section: Icon */} @@ -429,6 +445,7 @@ export default function HomeScreen() { /> + {/* Right Section: Text Input */} + =10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/elevenlabs/node_modules/form-data": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", + "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/elevenlabs/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/elevenlabs/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/elevenlabs/node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/elevenlabs/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/elevenlabs/node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/elevenlabs/node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/elevenlabs/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, "node_modules/emittery": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", @@ -9731,7 +9895,6 @@ "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "license": "MIT", - "peer": true, "engines": { "node": ">=0.8.x" } @@ -10864,6 +11027,24 @@ "node": ">= 6" } }, + "node_modules/form-data-encoder": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-4.0.2.tgz", + "integrity": "sha512-KQVhvhK8ZkWzxKxOr56CPulAhH3dobtuQ4+hNQ+HekH/Wp5gSOafqRAeTphQUJAIk0GBvHZgJ2ZGRWd5kphMuw==", + "license": "MIT", + "engines": { + "node": ">= 18" + } + }, + "node_modules/formdata-node": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-6.0.3.tgz", + "integrity": "sha512-8e1++BCiTzUno9v5IZ2J6bv4RU+3UKDmqWUQD0MIMVCd9AdhWkO1gw57oo1mNEX1dMq2EGI+FbWz4B92pscSQg==", + "license": "MIT", + "engines": { + "node": ">= 18" + } + }, "node_modules/freeport-async": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/freeport-async/-/freeport-async-2.0.0.tgz", @@ -15903,6 +16084,15 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, "node_modules/progress": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", @@ -16012,6 +16202,21 @@ "qrcode-terminal": "bin/qrcode-terminal.js" } }, + "node_modules/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/query-string": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/query-string/-/query-string-7.1.3.tgz", @@ -17987,7 +18192,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "devOptional": true, "license": "MIT", "dependencies": { "safe-buffer": "~5.2.0" @@ -19231,6 +19435,12 @@ "punycode": "^2.1.0" } }, + "node_modules/url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "license": "MIT" + }, "node_modules/url-parse": { "version": "1.5.10", "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", @@ -19501,6 +19711,30 @@ "node": ">=10" } }, + "node_modules/whatwg-url-without-unicode/node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, "node_modules/whatwg-url-without-unicode/node_modules/webidl-conversions": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-5.0.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index 99789545..d0d60cb5 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -22,7 +22,9 @@ "@react-navigation/stack": "^7.1.2", "assemblyai": "^4.9.0", "axios": "^1.8.2", + "buffer": "^6.0.3", "dotenv": "^16.4.7", + "elevenlabs": "^1.56.1", "expo": "~52.0.38", "expo-audio": "^0.3.5", "expo-av": "^15.0.2",