diff --git a/api/index.ts b/api/index.ts
index 0c16808..8de72d7 100644
--- a/api/index.ts
+++ b/api/index.ts
@@ -51,7 +51,12 @@ io.on("connection", (socket) => {
console.log("New client connected:", socket.id);
// Receive prompt along with its unique ID from the client
- socket.on("send_prompt", async ({ id, prompt }) => {
+ socket.on("send_prompt", async ({ id, prompt, mock }) => {
+ // if (mock) {
+ // socket.emit("ai_response", suggestionMocks);
+ // console.log({ action: "returned mock data", data: suggestionMocks });
+ // return;
+ // }
try {
const response = await openai.chat.completions.create({
model: "gpt-4",
@@ -60,10 +65,10 @@ io.on("connection", (socket) => {
{
role: "system",
content:
- "You are overhearing a game master running a Tabletop RPG game, briefly predict and provide creative ideas for what the game master might say next. You are limited to 20 tokens",
+ "You are overhearing a game master running a Tabletop RPG game, briefly predict and provide creative ideas for what the game master might say next. You are limited to 30 tokens.",
},
],
- max_tokens: 20,
+ max_tokens: 30,
temperature: 0.8,
});
diff --git a/api/types/apiTypes.d.ts b/api/types/apiTypes.d.ts
index 961819b..8e54296 100644
--- a/api/types/apiTypes.d.ts
+++ b/api/types/apiTypes.d.ts
@@ -1,8 +1,14 @@
+import { SendPromptData, SuggestionObj } from "suggestionTypes";
+
export interface ServerToClientEvents {
noArg: () => void;
basicEmit: (a: number, b: string, c: Buffer) => void;
withAck: (d: string, callback: (e: number) => void) => void;
- ai_response: (data: AiResponseData) => void;
+ ai_response: (data: {
+ id: string;
+ response: any;
+ }) => void;
+ // ai_response: (data: SuggestionObj[]) => void;
}
export interface ClientToServerEvents {
@@ -18,13 +24,3 @@ export interface SocketData {
name: string;
age: number;
}
-
-export interface SendPromptData {
- id: string;
- prompt: string;
-}
-
-export interface AiResponseData {
- id: string;
- response: string | null;
-}
diff --git a/api/types/enums.ts b/api/types/enums.ts
new file mode 100644
index 0000000..0df4a61
--- /dev/null
+++ b/api/types/enums.ts
@@ -0,0 +1,8 @@
+export enum SuggestionCategory {
+ rules = "Rules",
+ items = "Items",
+ monsters = "Monsters",
+ events = "Events",
+ scene = "Scene Description",
+ dialogue = "Dialogue",
+}
diff --git a/api/types/suggestionMocks.ts b/api/types/suggestionMocks.ts
new file mode 100644
index 0000000..6f6e4ef
--- /dev/null
+++ b/api/types/suggestionMocks.ts
@@ -0,0 +1,26 @@
+import { SuggestionObj } from "suggestionTypes";
+
+import { SuggestionCategory } from "./enums";
+
+export const suggestionMocks: SuggestionObj[] = [
+ {
+ id: "number1",
+ category: SuggestionCategory.dialogue,
+ suggestion:
+ 'Peasant says: "You should go see the magistrate, he was mentioning he needed help."',
+ relevancyScore: 80,
+ },
+ {
+ id: "number2",
+ category: SuggestionCategory.monsters,
+ suggestion:
+ "A blast shakes the town. A Minotaur has blasted through the wall, and he's coming at you. Roll for initiation!",
+ relevancyScore: 85,
+ },
+ {
+ id: "number3",
+ category: SuggestionCategory.items,
+ suggestion: "You find a bag holding in the chest.",
+ relevancyScore: 60,
+ },
+];
diff --git a/api/types/suggestionTypes.d.ts b/api/types/suggestionTypes.d.ts
new file mode 100644
index 0000000..6decf6e
--- /dev/null
+++ b/api/types/suggestionTypes.d.ts
@@ -0,0 +1,19 @@
+import SuggestionCategory from "./enums";
+
+export interface SuggestionObj {
+ id: string;
+ category: SuggestionCategory;
+ relevancyScore: number;
+ suggestion: string | null;
+}
+
+export interface SendPromptData {
+ id: string;
+ prompt: string;
+ mock?: boolean;
+}
+
+export interface AiResponseData {
+ id: string;
+ response: string | null;
+}
diff --git a/vite-client/package.json b/vite-client/package.json
index 41c549d..5e302c0 100644
--- a/vite-client/package.json
+++ b/vite-client/package.json
@@ -10,7 +10,13 @@
"preview": "vite preview"
},
"dependencies": {
+ "@emotion/react": "^11.14.0",
+ "@emotion/styled": "^11.14.0",
+ "@formkit/auto-animate": "^0.8.2",
+ "@mui/icons-material": "^6.4.7",
+ "@mui/material": "^6.4.7",
"axios": "^1.7.9",
+ "lodash": "^4.17.21",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"socket.io-client": "^4.8.1",
@@ -26,6 +32,7 @@
"eslint-plugin-react-hooks": "^5.0.0",
"eslint-plugin-react-refresh": "^0.4.18",
"globals": "^15.14.0",
+ "prettier": "^3.5.3",
"vite": "^6.1.0"
}
}
diff --git a/vite-client/src/App.css b/vite-client/src/App.css
index b9d355d..8bc1d0c 100644
--- a/vite-client/src/App.css
+++ b/vite-client/src/App.css
@@ -1,10 +1,3 @@
-#root {
- max-width: 1280px;
- margin: 0 auto;
- padding: 2rem;
- text-align: center;
-}
-
.logo {
height: 6em;
padding: 1.5em;
@@ -35,6 +28,9 @@
.card {
padding: 2em;
+ border-radius: 0.4em;
+ box-shadow: 5px 5px 14px rgba(0, 0, 0, 0.4);
+ background-color: #272b2b;
}
.read-the-docs {
diff --git a/vite-client/src/App.jsx b/vite-client/src/App.jsx
index a165690..769132b 100644
--- a/vite-client/src/App.jsx
+++ b/vite-client/src/App.jsx
@@ -1,111 +1,21 @@
-import { useEffect, useState, useRef } from 'react';
-import { io } from 'socket.io-client';
-import useSpeechRecognition from './hooks/useSpeechRecognition';
-import { v4 as uuidv4 } from 'uuid'; // For unique IDs
-
-const socket = io('http://localhost:5000', { withCredentials: true });
+import "./App.css";
+import SuggestionsPage from "./pages/SuggestionsPage.jsx";
+import { CssBaseline, ThemeProvider } from "@mui/material";
+import { theme } from "./theme.js";
+import { SocketProvider } from "./providers/SocketProvider.jsx";
+import { SuggestionsProvider } from "./providers/SuggestionsProvider.jsx";
function App() {
- const [responses, setResponses] = useState([]);
- const { transcript, listening, startListening, stopListening } = useSpeechRecognition();
- const responseBoxRef = useRef(null);
- const latencyStartTimes = useRef({}); // Store start times by request ID
-
- useEffect(() => {
- socket.on('ai_response', ({ id, response }) => {
- const endTime = Date.now();
- const startTime = latencyStartTimes.current[id] || endTime;
- const latency = ((endTime - startTime) / 1000).toFixed(2);
-
- setResponses((prevResponses) =>
- prevResponses.map((entry) =>
- entry.id === id ? { ...entry, response, latency } : entry
- )
- );
-
- delete latencyStartTimes.current[id];
- });
-
- return () => socket.off('ai_response');
- }, []);
-
- useEffect(() => {
- if (transcript.trim() !== '') {
- const id = uuidv4(); // Unique ID for each request
- latencyStartTimes.current[id] = Date.now(); // Start time per request
-
- setResponses((prevResponses) => [
- ...prevResponses,
- { id, prompt: transcript, response: '', latency: null },
- ]);
-
- socket.emit('send_prompt', { id, prompt: transcript });
- }
- }, [transcript]);
-
- useEffect(() => {
- // Auto-scroll when new responses arrive
- if (responseBoxRef.current) {
- responseBoxRef.current.scrollTop = responseBoxRef.current.scrollHeight;
- }
- }, [responses]);
-
- return (
-
-
D&D AI Assistant Demo
-
(Browser Speech + WebSocket + OpenAI)
-
-
-
-
Live Transcript:
-
{transcript || '🎤 Say something...'}
-
-
- {/* 🎯 FINAL SCROLLBOX FIX - Height limited + scrollable */}
-
-
AI Responses (Newest at Bottom):
-
- {responses.length === 0 ? (
-
AI responses will appear here.
- ) : (
- responses.map((entry) => (
-
-
- 📝 Prompt: {entry.prompt || 'N/A'}
-
-
- 🤖 Response:{' '}
- {entry.response || (
- Processing...
- )}
-
-
- ⏱️ Latency:{' '}
- {entry.latency !== null
- ? `${entry.latency} seconds`
- : 'Calculating...'}
-
-
- ))
- )}
-
-
-
- );
+ return (
+
+
+
+
+
+
+
+
+ );
}
-export default App;
+export default App;
diff --git a/vite-client/src/components/LoadingBars.jsx b/vite-client/src/components/LoadingBars.jsx
new file mode 100644
index 0000000..a0b3de6
--- /dev/null
+++ b/vite-client/src/components/LoadingBars.jsx
@@ -0,0 +1,38 @@
+import React from 'react';
+import {SvgIcon} from "@mui/material";
+
+export function LoadingBarsIcon(props){
+ return (
+
+ {/* credit: cog icon from https://heroicons.com */}
+
+
+ )
+
+}
\ No newline at end of file
diff --git a/vite-client/src/components/SuggestionControlContainer.jsx b/vite-client/src/components/SuggestionControlContainer.jsx
new file mode 100644
index 0000000..35d8d7b
--- /dev/null
+++ b/vite-client/src/components/SuggestionControlContainer.jsx
@@ -0,0 +1,81 @@
+import React from "react";
+import { Button, Grid2, Paper, Typography } from "@mui/material";
+import StopIcon from "@mui/icons-material/Stop";
+import AutoAwesomeIcon from "@mui/icons-material/AutoAwesome";
+import * as PropTypes from "prop-types";
+import { useAutoAnimate } from "@formkit/auto-animate/react";
+
+export default function SuggestionControlContainer(props) {
+ const [parent] = useAutoAnimate();
+
+ const buttonStyles = {
+ transition: "all 0.8s ease",
+ "&:focus": {
+ outline: "none",
+ },
+ };
+
+ return (
+
+
+
+
+ {props.show
+ ? "Searching vast distances..."
+ : 'To get suggestions, click "Begin Scrying"'}
+
+
+
+
+
+
+
+ );
+}
+
+SuggestionControlContainer.propTypes = {
+ show: PropTypes.bool.isRequired,
+ listen: PropTypes.func.isRequired,
+};
diff --git a/vite-client/src/components/SuggestionItem.jsx b/vite-client/src/components/SuggestionItem.jsx
new file mode 100644
index 0000000..b6f6a93
--- /dev/null
+++ b/vite-client/src/components/SuggestionItem.jsx
@@ -0,0 +1,39 @@
+import React from "react";
+import { alpha, Grid2, Typography } from "@mui/material";
+import AutoAwesomeIcon from "@mui/icons-material/AutoAwesome";
+import CommentIcon from "@mui/icons-material/Comment";
+import * as PropTypes from "prop-types";
+
+export default function SuggestionItem(props) {
+ return (
+
+
+
+
+
+
+ {props.response.response}
+
+
+
+
+
+
+
+ {props.response.prompt}
+
+
+
+ );
+}
+
+SuggestionItem.propTypes = { response: PropTypes.any };
diff --git a/vite-client/src/components/SuggestionsContainer.jsx b/vite-client/src/components/SuggestionsContainer.jsx
new file mode 100644
index 0000000..17dd38e
--- /dev/null
+++ b/vite-client/src/components/SuggestionsContainer.jsx
@@ -0,0 +1,55 @@
+import React, { useMemo } from "react";
+import { useAutoAnimate } from "@formkit/auto-animate/react";
+import { Grid2, Typography } from "@mui/material";
+import _ from "lodash";
+import { LoadingBarsIcon } from "./LoadingBars.jsx";
+import SuggestionItem from "./SuggestionItem.jsx";
+import * as PropTypes from "prop-types";
+import { useSuggestions } from "../providers/SuggestionsProvider.jsx";
+
+export function SuggestionsContainer(props) {
+ const [parent] = useAutoAnimate();
+ const { suggestions } = useSuggestions();
+ const nonEmptySuggestions = useMemo(
+ () => suggestions?.filter((r) => !_.isEmpty(r.response)),
+ [suggestions],
+ );
+
+ return (
+
+ {(suggestions?.[0]?.prompt !== props.delayedTranscript ||
+ _.isEmpty(suggestions?.[0]?.response)) &&
+ !_.isEmpty(props.delayedTranscript) && (
+
+
+
+ {props.delayedTranscript}
+
+
+
+
+
+
+ )}
+
+ {nonEmptySuggestions?.map((response) => (
+
+ ))}
+
+ );
+}
+
+SuggestionsContainer.propTypes = {
+ delayedTranscript: PropTypes.string,
+};
diff --git a/vite-client/src/hooks/useSpeechRecognition.js b/vite-client/src/hooks/useSpeechRecognition.js
index a1476fe..2fb89cf 100644
--- a/vite-client/src/hooks/useSpeechRecognition.js
+++ b/vite-client/src/hooks/useSpeechRecognition.js
@@ -1,59 +1,87 @@
-import { useState, useEffect, useRef } from 'react';
+import { useEffect, useRef, useState } from "react";
const useSpeechRecognition = () => {
- const [transcript, setTranscript] = useState('');
- const [listening, setListening] = useState(false);
- const recognitionRef = useRef(null);
-
- useEffect(() => {
- const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
- const recognition = new SpeechRecognition();
- recognitionRef.current = recognition;
-
- recognition.continuous = true;
- recognition.interimResults = true;
- recognition.lang = 'en-US';
-
- recognition.onresult = (event) => {
- const currentTranscript = Array.from(event.results)
- .map((result) => result[0])
- .map((result) => result.transcript)
- .join('');
- setTranscript(currentTranscript);
- };
-
- recognition.onerror = (event) => {
- console.error('Speech recognition error:', event.error);
- };
-
- recognition.onend = () => {
- if (listening) {
- recognition.start();
- }
- };
-
- return () => {
- recognition.stop();
- };
- }, []);
-
- const startListening = () => {
- setTranscript('');
- setListening(true);
- if (recognitionRef.current) {
- recognitionRef.current.start();
- }
+ const [transcript, setTranscript] = useState("");
+ const [listening, setListening] = useState(false);
+ const recognitionRef = useRef(null);
+
+ useEffect(() => {
+ const SpeechRecognition =
+ window.SpeechRecognition || window.webkitSpeechRecognition;
+ const recognition = new SpeechRecognition();
+ recognitionRef.current = recognition;
+
+ recognition.continuous = true;
+ recognition.interimResults = true;
+ recognition.lang = "en-US";
+
+ recognition.onresult = (event) => {
+ const currentTranscript = Array.from(event.results)
+ .map((result) => result[0])
+ .map((result) => result.transcript)
+ .join("");
+ setTranscript(currentTranscript);
+ };
+
+ recognition.onerror = (event) => {
+ if (event.error === "no-speech") {
+ console.log("No speech detected. Restarting recognition...");
+ restartListening();
+ return;
+ }
+ if (event.error === "aborted") {
+ return;
+ }
+ console.error("Speech recognition error:", event.error);
};
- const stopListening = () => {
- setListening(false);
- if (recognitionRef.current) {
- recognitionRef.current.onend = null;
- recognitionRef.current.stop();
- }
+ recognition.onend = () => {
+ if (listening) {
+ recognition.start();
+ }
};
- return { transcript, listening, startListening, stopListening };
+ return () => {
+ recognition.stop();
+ };
+ }, []);
+
+ const startListening = () => {
+ setTranscript("");
+ setListening(true);
+ if (recognitionRef.current) {
+ recognitionRef.current.start();
+ }
+ };
+
+ const stopListening = () => {
+ setListening(false);
+ if (recognitionRef.current) {
+ recognitionRef.current.onend = null;
+ recognitionRef.current.stop();
+ }
+ };
+ const clearTranscript = () => {
+ setTranscript("");
+ recognitionRef.current.abort();
+ restartListening();
+ };
+
+ const restartListening = () => {
+ recognitionRef.current.stop();
+ setTimeout(() => {
+ recognitionRef.current.start();
+ }, 500);
+ };
+
+ return {
+ transcript,
+ listening,
+ startListening,
+ stopListening,
+ restartListening,
+ clearTranscript,
+ };
};
-export default useSpeechRecognition;
+export default useSpeechRecognition;
diff --git a/vite-client/src/index.css b/vite-client/src/index.css
index 6119ad9..f9242df 100644
--- a/vite-client/src/index.css
+++ b/vite-client/src/index.css
@@ -5,7 +5,8 @@
color-scheme: light dark;
color: rgba(255, 255, 255, 0.87);
- background-color: #242424;
+ /*background-color: #012020*/
+;
font-synthesis: none;
text-rendering: optimizeLegibility;
@@ -23,10 +24,6 @@ a:hover {
}
body {
- margin: 0;
- display: flex;
- place-items: center;
- min-width: 320px;
min-height: 100vh;
}
diff --git a/vite-client/src/pages/SuggestionsPage.jsx b/vite-client/src/pages/SuggestionsPage.jsx
new file mode 100644
index 0000000..0447664
--- /dev/null
+++ b/vite-client/src/pages/SuggestionsPage.jsx
@@ -0,0 +1,101 @@
+import React, { useCallback, useEffect, useState } from "react";
+import { Container, Grid2, Typography } from "@mui/material";
+import useSpeechRecognition from "../hooks/useSpeechRecognition.js";
+import _ from "lodash";
+import SuggestionControlContainer from "../components/SuggestionControlContainer.jsx";
+import { SuggestionsContainer } from "../components/SuggestionsContainer.jsx";
+import { useSuggestions } from "../providers/SuggestionsProvider.jsx";
+
+function SuggestionsPage() {
+ const { nonEmptySuggestions, getNewSuggestion } = useSuggestions();
+ const [isRecording, setIsRecording] = useState(false);
+ const [showContent, setShowContent] = useState(false);
+ const [delayedTranscript, setDelayedTranscript] = useState("");
+
+ const {
+ transcript,
+ listening,
+ startListening,
+ stopListening,
+ clearTranscript,
+ } = useSpeechRecognition();
+
+ useEffect(() => {
+ setTimeout(() => {
+ setShowContent(isRecording);
+ }, 450);
+ return () => {};
+ }, [isRecording]);
+
+ useEffect(() => {
+ if (isRecording !== listening) {
+ setIsRecording(listening);
+ }
+ }, [listening]);
+
+ useEffect(() => {
+ if (nonEmptySuggestions?.[0]?.prompt === transcript) {
+ clearTranscript();
+ setDelayedTranscript("");
+ }
+ }, [nonEmptySuggestions]);
+
+ const debouncedSendTranscript = useCallback(
+ _.debounce((transcript) => getNewSuggestion(transcript), 2000, {
+ trailing: true,
+ }),
+ [],
+ );
+
+ const debouncedStartListening = useCallback(
+ _.debounce(startListening, 500),
+ [],
+ );
+
+ const debouncedStopListening = useCallback(
+ _.debounce(stopListening, 500),
+ [],
+ );
+
+ useEffect(() => {
+ if (!_.isEmpty(transcript)) {
+ setDelayedTranscript(transcript);
+ debouncedSendTranscript(transcript);
+ }
+ }, [transcript]);
+
+ // useEffect(() => {
+ // // Auto-scroll when new responses arrive
+ // // if (responseBoxRef.current) {
+ // // responseBoxRef.current.scrollTop = responseBoxRef.current.scrollHeight;
+ // // }
+ // // console.log({ current_responses: responses });
+ // }, [responses]);
+
+ const handleListen = () => {
+ const newRecordingValue = !listening;
+ if (!newRecordingValue) {
+ debouncedStopListening();
+ setIsRecording(newRecordingValue);
+ } else {
+ debouncedStartListening();
+ setIsRecording(newRecordingValue);
+ }
+ };
+
+ return (
+
+
+
+
+ Scrying.AI
+
+
+
+
+
+
+ );
+}
+
+export default SuggestionsPage;
diff --git a/vite-client/src/providers/SocketProvider.jsx b/vite-client/src/providers/SocketProvider.jsx
new file mode 100644
index 0000000..4d95e93
--- /dev/null
+++ b/vite-client/src/providers/SocketProvider.jsx
@@ -0,0 +1,75 @@
+import {
+ createContext,
+ useCallback,
+ useContext,
+ useEffect,
+ useRef,
+ useState,
+} from "react";
+import { io } from "socket.io-client";
+
+const SOCKET_SERVER_URL = "http://localhost:5000";
+
+const SocketContext = createContext(null);
+
+export const SocketProvider = ({ children }) => {
+ const socketRef = useRef(null);
+ const [isConnected, setIsConnected] = useState(false);
+
+ useEffect(() => {
+ socketRef.current = io(SOCKET_SERVER_URL);
+
+ socketRef.current.on("connect", () => {
+ setIsConnected(true);
+ console.log("Connected to socket:", socketRef.current.id);
+ });
+
+ socketRef.current.on("disconnect", () => {
+ setIsConnected(false);
+ console.log("Disconnected from socket");
+ });
+
+ return () => {
+ socketRef.current.disconnect();
+ };
+ }, []);
+
+ const emitEvent = useCallback(
+ (event, data) => {
+ if (socketRef.current) {
+ socketRef.current.emit(event, data);
+ }
+ },
+ [socketRef.current],
+ );
+
+ const listenEvent = useCallback(
+ (event, callback) => {
+ if (socketRef.current) {
+ socketRef.current.on(event, callback);
+ }
+ },
+ [socketRef.current],
+ );
+
+ const removeEventListener = useCallback(
+ (event) => {
+ if (socketRef.current) {
+ socketRef.current.off(event);
+ }
+ },
+ [socketRef.current],
+ );
+
+ return (
+
+ {children}
+
+ );
+};
+
+export const useSocket = () => {
+ return useContext(SocketContext);
+};
diff --git a/vite-client/src/providers/SuggestionsProvider.jsx b/vite-client/src/providers/SuggestionsProvider.jsx
new file mode 100644
index 0000000..8904c3c
--- /dev/null
+++ b/vite-client/src/providers/SuggestionsProvider.jsx
@@ -0,0 +1,83 @@
+import {
+ createContext,
+ useContext,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from "react";
+import { v4 as uuidv4 } from "uuid";
+import { useSocket } from "./SocketProvider.jsx";
+import _ from "lodash";
+
+const SuggestionsContext = createContext(null);
+
+export const SuggestionsProvider = ({ children }) => {
+ const { isConnected, emitEvent, listenEvent, removeEventListener } =
+ useSocket();
+ const [suggestions, setSuggestions] = useState([]);
+ const latencyStartTimes = useRef({}); // Store start times by request ID
+
+ const nonEmptySuggestions = useMemo(
+ () => suggestions?.filter((r) => !_.isEmpty(r.response)),
+ [suggestions],
+ );
+
+ useEffect(() => {
+ if (listenEvent) {
+ listenEvent("ai_response", handleSuggestionMessage);
+ }
+
+ return () => {
+ removeEventListener("ai_response");
+ };
+ }, [listenEvent, removeEventListener]);
+
+ const prepareTranscriptForPrompt = (transcript) => {
+ if (transcript.trim() !== "") {
+ const id = uuidv4(); // Unique ID for each request
+ latencyStartTimes.current[id] = Date.now(); // Start time per request
+ setSuggestions((prevResponses) => [
+ { id, prompt: transcript, response: "", latency: null },
+ ...prevResponses,
+ ]);
+ return { id, prompt: transcript, mock: true };
+ }
+ };
+
+ const getNewSuggestion = (transcript) => {
+ const promptRequestData = prepareTranscriptForPrompt(transcript);
+ console.log("SEND PROMPT: ", promptRequestData);
+ emitEvent("send_prompt", promptRequestData);
+ };
+
+ const handleSuggestionMessage = ({ id, response }) => {
+ console.log("RECEIVED MESSAGE: ", response);
+ const endTime = Date.now();
+ const startTime = latencyStartTimes.current[id] || endTime;
+ const latency = ((endTime - startTime) / 1000).toFixed(2);
+
+ setSuggestions((prevResponses) =>
+ prevResponses.map((entry) =>
+ entry.id === id ? { ...entry, response, latency } : entry,
+ ),
+ );
+ };
+
+ return (
+
+ {children}
+
+ );
+};
+
+export const useSuggestions = () => {
+ return useContext(SuggestionsContext);
+};
diff --git a/vite-client/src/theme.js b/vite-client/src/theme.js
new file mode 100644
index 0000000..87d7093
--- /dev/null
+++ b/vite-client/src/theme.js
@@ -0,0 +1,34 @@
+import { createTheme } from "@mui/material";
+
+export const theme = createTheme({
+ palette: {
+ mode: "dark",
+ primary: {
+ light: "#abeded",
+ main: "#57dbdb",
+ dark: "#24a8a8",
+ contrastText: "#041515",
+ },
+ background: {
+ default: "#0d2020",
+ paper: "rgb(30, 30, 30)",
+ },
+ },
+ typography: {
+ caption: {
+ fontStyle: "italic",
+ fontSize: "1rem",
+ color: "#9e9e9e",
+ },
+ },
+ components: {
+ MuiCssBaseline: {
+ styleOverrides: {
+ body: {
+ backgroundColor: (theme) => theme.palette.background.default,
+ backgroundImage: `linear-gradient(135deg, #112a2a 10%, #091717 75%, #060f0f)`,
+ },
+ },
+ },
+ },
+});