From faa94903b7cc3e95551baaf39b53c6f5942c2535 Mon Sep 17 00:00:00 2001 From: NASA <65771915+nodeblackbox@users.noreply.github.com> Date: Sun, 6 Oct 2024 02:01:41 +0100 Subject: [PATCH] Changes to the back end kind of obsolete --- backend/apiDocsReadme.MD | 669 --- backend/app/0.21.0 | 3 + backend/cogenesis-backend/Dockerfile | 22 +- .../PROJECT_DOCUMENTATION.md | 1282 +++++ .../PROJECT_DOCUMENTATIONV2.md | 0 backend/cogenesis-backend/README.md | 1090 ----- backend/cogenesis-backend/app/api/gravrag.py | 51 +- backend/cogenesis-backend/app/core/config.py | 3 +- .../cogenesis-backend/app/core/security.py | 11 +- backend/cogenesis-backend/app/main.py | 10 +- .../cogenesis-backend/app/models/gravrag.py | 11 +- .../cogenesis-backend/app/services/gravrag.py | 54 +- backend/cogenesis-backend/docker-compose.yml | 3 +- backend/cogenesis-backend/makeReadMe.py | 93 +- backend/cogenesis-backend/requirements.txt | 25 +- .../cogenesis-backend/tests/test_gravrag.py | 65 +- .../agentmanagement/PROJECT_DOCUMENTATION.md | 1697 +++++++ .../src/app/api/agentmanagement/makeReadMe.py | 53 +- frontend/src/app/dashboardV11/.cursorrules | 4132 +++++++++++++++++ frontend/src/app/dashboardV11/page.jsx | 2096 +++++++++ .../app/dashboardV11/useQuantumNexusRAG.js | 140 + frontend/src/app/dashboardV11/useRAG.js | 92 + .../APIEndpointManager.jsx | 0 .../JSONEditor.jsx | 0 .../PayloadTester.jsx | 0 .../QdrantManager.jsx | 0 26 files changed, 9707 insertions(+), 1895 deletions(-) create mode 100644 backend/app/0.21.0 create mode 100644 backend/cogenesis-backend/PROJECT_DOCUMENTATION.md create mode 100644 backend/cogenesis-backend/PROJECT_DOCUMENTATIONV2.md create mode 100644 frontend/src/app/api/agentmanagement/PROJECT_DOCUMENTATION.md create mode 100644 frontend/src/app/dashboardV11/.cursorrules create mode 100644 frontend/src/app/dashboardV11/page.jsx create mode 100644 frontend/src/app/dashboardV11/useQuantumNexusRAG.js create mode 100644 frontend/src/app/dashboardV11/useRAG.js rename frontend/src/components/{APITestV5 copy => APITestV6}/APIEndpointManager.jsx (100%) rename frontend/src/components/{APITestV5 copy => APITestV6}/JSONEditor.jsx (100%) rename frontend/src/components/{APITestV5 copy => APITestV6}/PayloadTester.jsx (100%) rename frontend/src/components/{APITestV5 copy => APITestV6}/QdrantManager.jsx (100%) diff --git a/backend/apiDocsReadme.MD b/backend/apiDocsReadme.MD index 43f89d6..e69de29 100644 --- a/backend/apiDocsReadme.MD +++ b/backend/apiDocsReadme.MD @@ -1,669 +0,0 @@ -# API Documentation - -Welcome to the comprehensive API documentation for our project. This guide provides detailed information on the available endpoints, including example payloads and usage examples in **cURL**, **JavaScript**, and **Python**. Whether you're a frontend developer, backend engineer, or just getting started, this documentation will help you integrate seamlessly with our APIs. - -## Table of Contents - -- [Neural Resources API](#neural-resources-api) - - [route_query](#route_query) - - [set_api_key](#set_api_key) - - [available_models](#available_models) - - [create_message](#create_message) -- [AgentChef API](#agentchef-api) - - [collect_data](#collect_data) - - [structure_data](#structure_data) - - [augment_data](#augment_data) - - [push_to_huggingface](#push_to-huggingface) -- [GravRAG API](#gravrag-api) - - [create_memory](#create_memory) - - [recall_memory](#recall_memory) - - [prune_memories](#prune_memories) - ---- - -## Neural Resources API - -Base URL: `http://localhost:8000/neural_resources` - -The Neural Resources API provides endpoints to interact with various neural network resources, manage API keys, and handle message creation. - -### route_query - -- **Endpoint:** `/route_query` -- **Method:** `POST` -- **Description:** Routes a query through the neural resources to get a response based on the provided content. - -#### Request Payload - -```json -{ - "content": "Hello, how are you?" -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/neural_resources/route_query \ - -H "Content-Type: application/json" \ - -d '{"content": "Hello, how are you?"}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/neural_resources/route_query', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ content: 'Hello, how are you?' }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/neural_resources/route_query" -payload = {"content": "Hello, how are you?"} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -### set_api_key - -- **Endpoint:** `/set_api_key` -- **Method:** `POST` -- **Description:** Sets the API key for a specified provider. - -#### Request Payload - -```json -{ - "provider": "openai", - "api_key": "test_key" -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/neural_resources/set_api_key \ - -H "Content-Type: application/json" \ - -d '{"provider": "openai", "api_key": "test_key"}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/neural_resources/set_api_key', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ provider: 'openai', api_key: 'test_key' }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/neural_resources/set_api_key" -payload = {"provider": "openai", "api_key": "test_key"} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -### available_models - -- **Endpoint:** `/available_models` -- **Method:** `GET` -- **Description:** Retrieves a list of available neural network models. - -#### cURL Example - -```bash -curl -X GET http://localhost:8000/neural_resources/available_models -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/neural_resources/available_models') - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/neural_resources/available_models" - -response = requests.get(url) -print(response.json()) -``` - ---- - -### create_message - -- **Endpoint:** `/create_message/{provider}/{model}` -- **Method:** `POST` -- **Description:** Creates a message using the specified provider and model. - -#### URL Parameters - -- `provider`: The name of the provider (e.g., `openai`) -- `model`: The model name (e.g., `gpt-3.5-turbo`) - -#### Request Payload - -```json -{ - "content": "What is AI?" -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/neural_resources/create_message/openai/gpt-3.5-turbo \ - -H "Content-Type: application/json" \ - -d '{"content": "What is AI?"}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -const provider = 'openai'; -const model = 'gpt-3.5-turbo'; -const url = `http://localhost:8000/neural_resources/create_message/${provider}/${model}`; - -fetch(url, { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ content: 'What is AI?' }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -provider = "openai" -model = "gpt-3.5-turbo" -url = f"http://localhost:8000/neural_resources/create_message/{provider}/{model}" -payload = {"content": "What is AI?"} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -## AgentChef API - -Base URL: `http://localhost:8000/agentchef` - -The AgentChef API facilitates data collection, structuring, augmentation, and integration with Hugging Face repositories. - -### collect_data - -- **Endpoint:** `/collect_data` -- **Method:** `POST` -- **Description:** Collects data from specified sources based on the query. - -#### Request Payload - -```json -{ - "source_type": "arxiv", - "query": "machine learning", - "max_results": 5 -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/agentchef/collect_data \ - -H "Content-Type: application/json" \ - -d '{"source_type": "arxiv", "query": "machine learning", "max_results": 5}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/agentchef/collect_data', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - source_type: 'arxiv', - query: 'machine learning', - max_results: 5 - }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/agentchef/collect_data" -payload = { - "source_type": "arxiv", - "query": "machine learning", - "max_results": 5 -} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -### structure_data - -- **Endpoint:** `/structure_data` -- **Method:** `POST` -- **Description:** Structures raw data based on a specified template. - -#### Request Payload - -```json -{ - "data": [ - { - "title": "Test", - "content": "This is a test content" - } - ], - "template_name": "instruction_input_output" -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/agentchef/structure_data \ - -H "Content-Type: application/json" \ - -d '{"data": [{"title": "Test", "content": "This is a test content"}], "template_name": "instruction_input_output"}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/agentchef/structure_data', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - data: [{ title: 'Test', content: 'This is a test content' }], - template_name: 'instruction_input_output' - }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/agentchef/structure_data" -payload = { - "data": [{"title": "Test", "content": "This is a test content"}], - "template_name": "instruction_input_output" -} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -### augment_data - -- **Endpoint:** `/augment_data` -- **Method:** `POST` -- **Description:** Augments structured data by generating additional samples. - -#### Request Payload - -```json -{ - "input_file": "structured_data.parquet", - "num_samples": 3, - "agent_name": "openai" -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/agentchef/augment_data \ - -H "Content-Type: application/json" \ - -d '{"input_file": "structured_data.parquet", "num_samples": 3, "agent_name": "openai"}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/agentchef/augment_data', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - input_file: 'structured_data.parquet', - num_samples: 3, - agent_name: 'openai' - }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/agentchef/augment_data" -payload = { - "input_file": "structured_data.parquet", - "num_samples": 3, - "agent_name": "openai" -} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -### push_to_huggingface - -- **Endpoint:** `/push_to_huggingface` -- **Method:** `POST` -- **Description:** Pushes augmented data to a Hugging Face repository. - -#### Request Payload - -```json -{ - "file_path": "augmented_data.parquet", - "repo_id": "test/dataset", - "token": "hf_test_token" -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/agentchef/push_to_huggingface \ - -H "Content-Type: application/json" \ - -d '{"file_path": "augmented_data.parquet", "repo_id": "test/dataset", "token": "hf_test_token"}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/agentchef/push_to_huggingface', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - file_path: 'augmented_data.parquet', - repo_id: 'test/dataset', - token: 'hf_test_token' - }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/agentchef/push_to_huggingface" -payload = { - "file_path": "augmented_data.parquet", - "repo_id": "test/dataset", - "token": "hf_test_token" -} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -## GravRAG API - -Base URL: `http://localhost:8000/gravrag/api/memory` - -The GravRAG API manages memory operations, including creation, recall, and pruning of memories. - -### create_memory - -- **Endpoint:** `/memory/create` -- **Method:** `POST` -- **Description:** Creates a new memory entry with associated metadata. - -#### Request Payload - -```json -{ - "content": "This is a test memory", - "metadata": { - "objective_id": "obj_1", - "task_id": "task_1" - } -} -``` - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/gravrag/api/memory/create \ - -H "Content-Type: application/json" \ - -d '{"content": "This is a test memory", "metadata": {"objective_id": "obj_1", "task_id": "task_1"}}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/gravrag/api/memory/create', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - content: 'This is a test memory', - metadata: { - objective_id: 'obj_1', - task_id: 'task_1' - } - }) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/gravrag/api/memory/create" -payload = { - "content": "This is a test memory", - "metadata": { - "objective_id": "obj_1", - "task_id": "task_1" - } -} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -### recall_memory - -- **Endpoint:** `/memory/recall` -- **Method:** `GET` -- **Description:** Recalls memories based on a query. - -#### Query Parameters - -- `query`: The query string to search for relevant memories. - -#### Example URL - -``` -http://localhost:8000/gravrag/api/memory/recall?query=test memory -``` - -#### cURL Example - -```bash -curl -G http://localhost:8000/gravrag/api/memory/recall \ - --data-urlencode "query=test memory" -``` - -#### JavaScript Example (using Fetch API) - -```javascript -const query = 'test memory'; -const url = `http://localhost:8000/gravrag/api/memory/recall?query=${encodeURIComponent(query)}`; - -fetch(url) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/gravrag/api/memory/recall" -params = {"query": "test memory"} - -response = requests.get(url, params=params) -print(response.json()) -``` - ---- - -### prune_memories - -- **Endpoint:** `/memory/prune` -- **Method:** `POST` -- **Description:** Prunes old or irrelevant memories to maintain optimal memory usage. - -#### Request Payload - -```json -{} -``` - -*Note: This endpoint may not require a payload. If it does, specify the parameters accordingly.* - -#### cURL Example - -```bash -curl -X POST http://localhost:8000/gravrag/api/memory/prune \ - -H "Content-Type: application/json" \ - -d '{}' -``` - -#### JavaScript Example (using Fetch API) - -```javascript -fetch('http://localhost:8000/gravrag/api/memory/prune', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({}) -}) - .then(response => response.json()) - .then(data => console.log(data)) - .catch(error => console.error('Error:', error)); -``` - -#### Python Example (using requests) - -```python -import requests - -url = "http://localhost:8000/gravrag/api/memory/prune" -payload = {} -headers = {"Content-Type": "application/json"} - -response = requests.post(url, json=payload, headers=headers) -print(response.json()) -``` - ---- - -## Conclusion - -This documentation provides a detailed overview of the available API endpoints, along with example payloads and usage snippets in various programming languages. For any further assistance or questions, please refer to our support channels or consult the API specifications. - ---- - -**Happy Coding! 🚀** \ No newline at end of file diff --git a/backend/app/0.21.0 b/backend/app/0.21.0 new file mode 100644 index 0000000..e4982a0 --- /dev/null +++ b/backend/app/0.21.0 @@ -0,0 +1,3 @@ +Requirement already satisfied: pydantic in c:\users\nasan\miniconda3\lib\site-packages (1.10.18) +Requirement already satisfied: python-dotenv in c:\users\nasan\miniconda3\lib\site-packages (0.19.0) +Requirement already satisfied: typing-extensions>=4.2.0 in c:\users\nasan\miniconda3\lib\site-packages (from pydantic) (4.12.2) diff --git a/backend/cogenesis-backend/Dockerfile b/backend/cogenesis-backend/Dockerfile index 7efe90c..84e2d6d 100644 --- a/backend/cogenesis-backend/Dockerfile +++ b/backend/cogenesis-backend/Dockerfile @@ -1,11 +1,23 @@ +# Use an official Python runtime as a parent image +FROM python:3.10-slim -FROM python:3.9 +# Set environment variables +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 -WORKDIR /app +# Set work directory +WORKDIR /code -COPY requirements.txt . +# Install dependencies +COPY requirements.txt /code/ +RUN pip install --upgrade pip RUN pip install --no-cache-dir -r requirements.txt -COPY . . +# Copy project +COPY . /code/ -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file +# Expose port +EXPOSE 8000 + +# Run the application +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/backend/cogenesis-backend/PROJECT_DOCUMENTATION.md b/backend/cogenesis-backend/PROJECT_DOCUMENTATION.md new file mode 100644 index 0000000..db54609 --- /dev/null +++ b/backend/cogenesis-backend/PROJECT_DOCUMENTATION.md @@ -0,0 +1,1282 @@ +# backend + +## Project Structure + +``` +cogenesis-backend/ +├── .env +├── .gitignore +├── docker-compose.yml +├── Dockerfile +├── makeReadMe.py +├── PROJECT_DOCUMENTATION.md +├── PROJECT_DOCUMENTATIONV2.md +├── README.md +├── requirements.txt +├── app/ +│ ├── main.py +│ ├── api/ +│ │ ├── gravrag.py +│ ├── core/ +│ │ ├── config.py +│ │ ├── security.py +│ ├── models/ +│ │ ├── gravrag.py +│ ├── services/ +│ │ ├── gravrag.py +├── tests/ +│ ├── test_gravrag.py +``` + +## File Contents and Implementation Guidelines + +### `docker-compose.yml` + +#### File Content: +```yaml + +version: '3.9.11' + +services: + api: + build: . + ports: + - "8000:8000" + environment: + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - GROQ_API_KEY=${GROQ_API_KEY} + - QDRANT_HOST=qdrant + - QDRANT_PORT=6333 + depends_on: + - qdrant + + qdrant: + image: qdrant/qdrant + ports: + - "6333:6333" + volumes: + - qdrant_data:/qdrant/storage + +volumes: + qdrant_data: +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'docker-compose.yml': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `makeReadMe.py` + +#### File Content: +```python +import os +import sys +import re +import logging + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +def generate_tree(startpath): + tree = [] + for root, dirs, files in os.walk(startpath): + level = root.replace(startpath, '').count(os.sep) + indent = '│ ' * (level - 1) + '├── ' if level > 0 else '' + tree.append(f"{indent}{os.path.basename(root)}/") + subindent = '│ ' * level + '├── ' + for f in files: + tree.append(f"{subindent}{f}") + return '\n'.join(tree) + +def read_file_content(file_path): + try: + with open(file_path, 'r', encoding='utf-8') as file: + content = file.read() + logging.info(f"Successfully read file: {file_path}") + return content + except Exception as e: + logging.error(f"Error reading file {file_path}: {str(e)}") + return f"Error reading file: {str(e)}" + +def extract_function_names(content): + pattern = r'(async\s+)?function\s+(\w+)|const\s+(\w+)\s*=\s*(async\s*)?\(' + matches = re.findall(pattern, content) + return [match[1] or match[2] for match in matches if match[1] or match[2]] + +def generate_implementation_prompt(file_path, content): + functions = extract_function_names(content) + relative_path = os.path.relpath(file_path, start=os.getcwd()) + prompt = f"Implement the following functions for the file '{relative_path}':\n\n" + for func in functions: + prompt += f"- {func}\n" + prompt += "\nEnsure that the implementation follows best practices and integrates with the existing project structure." + return prompt + +def get_file_language(file_extension): + language_map = { + '.js': 'javascript', + '.ts': 'typescript', + '.jsx': 'jsx', + '.tsx': 'tsx', + '.py': 'python', + '.md': 'markdown', + '.yml': 'yaml', + '.env': 'plaintext' + } + return language_map.get(file_extension.lower(), 'plaintext') + +def generate_readme(folder_path): + project_name = os.path.basename(os.path.dirname(folder_path)) + readme_content = f"# {project_name}\n\n" + readme_content += "## Project Structure\n\n```\n" + readme_content += generate_tree(folder_path) + readme_content += "\n```\n\n" + readme_content += "## File Contents and Implementation Guidelines\n\n" + + for root, dirs, files in os.walk(folder_path): + for file in files: + file_extension = os.path.splitext(file)[1] + if file_extension in ['.js', '.ts', '.jsx', '.tsx', '.py', '.env', '.md', '.yml']: + file_path = os.path.join(root, file) + relative_path = os.path.relpath(file_path, folder_path) + content = read_file_content(file_path) + + readme_content += f"### `{relative_path}`\n\n" + readme_content += "#### File Content:\n" + language = get_file_language(file_extension) + readme_content += f"```{language}\n" + readme_content += content + readme_content += "\n```\n\n" + + readme_content += "#### Implementation Guidelines:\n" + readme_content += "- Purpose: [Briefly describe the purpose of this file]\n" + readme_content += "- Key Components/Functions:\n" + for func in extract_function_names(content): + readme_content += f" - `{func}`: [Describe the purpose and expected behavior]\n" + readme_content += "- Integration Points: [Describe how this file integrates with other parts of the system]\n" + readme_content += "- Data Flow: [Explain the data flow in and out of this file]\n" + readme_content += "- Error Handling: [Describe any specific error handling requirements]\n\n" + + readme_content += "#### Implementation Prompt:\n" + readme_content += "```\n" + readme_content += generate_implementation_prompt(file_path, content) + readme_content += "\n```\n\n" + + return readme_content + +def main(): + if len(sys.argv) > 1: + folder_path = sys.argv[1] + else: + folder_path = os.getcwd() + + if not os.path.isdir(folder_path): + logging.error(f"Error: {folder_path} is not a valid directory") + sys.exit(1) + + logging.info(f"Generating documentation for: {folder_path}") + readme_content = generate_readme(folder_path) + + readme_path = os.path.join(folder_path, "PROJECT_DOCUMENTATION.md") + try: + with open(readme_path, 'w', encoding='utf-8') as readme_file: + readme_file.write(readme_content) + logging.info(f"PROJECT_DOCUMENTATION.md has been generated at {readme_path}") + except Exception as e: + logging.error(f"Error writing documentation file: {str(e)}") + +if __name__ == "__main__": + main() +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'makeReadMe.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `PROJECT_DOCUMENTATION.md` + +#### File Content: +```markdown + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'PROJECT_DOCUMENTATION.md': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `PROJECT_DOCUMENTATIONV2.md` + +#### File Content: +```markdown +# Next.js 14 API and Agent Management System + +## Project Structure + +``` +cogenesis-backend/ +├── .env +├── .gitignore +├── docker-compose.yml +├── Dockerfile +├── makeReadMe.py +├── PROJECT_DOCUMENTATION.md +├── PROJECT_DOCUMENTATIONV2.md +├── README.md +├── requirements.txt +├── app/ +│ ├── main.py +│ ├── api/ +│ │ ├── gravrag.py +│ ├── core/ +│ │ ├── config.py +│ │ ├── security.py +│ ├── models/ +│ │ ├── gravrag.py +│ ├── services/ +│ │ ├── gravrag.py +├── tests/ +│ ├── test_gravrag.py +``` + +## File Contents and Implementation Guidelines + +### `.env` + +#### File Content: +```javascript + +ANTHROPIC_API_KEY=your_anthropic_api_key_here +OPENAI_API_KEY=your_openai_api_key_here +GROQ_API_KEY=your_groq_api_key_here +QDRANT_HOST=qdrant +QDRANT_PORT=6333 +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file '.env': + + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `docker-compose.yml` + +#### File Content: +```javascript + +version: '3.9.11' + +services: + api: + build: . + ports: + - "8000:8000" + environment: + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - GROQ_API_KEY=${GROQ_API_KEY} + - QDRANT_HOST=qdrant + - QDRANT_PORT=6333 + depends_on: + - qdrant + + qdrant: + image: qdrant/qdrant + ports: + - "6333:6333" + volumes: + - qdrant_data:/qdrant/storage + +volumes: + qdrant_data: +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'docker-compose.yml': + + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `PROJECT_DOCUMENTATION.md` + +#### File Content: +```javascript + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'PROJECT_DOCUMENTATION.md': + + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `PROJECT_DOCUMENTATIONV2.md` + +#### File Content: +```javascript + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'PROJECT_DOCUMENTATIONV2.md': + + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `README.md` + +#### File Content: +```javascript + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'README.md': + + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'PROJECT_DOCUMENTATIONV2.md': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `README.md` + +#### File Content: +```markdown + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'README.md': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `app\main.py` + +#### File Content: +```python +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from prometheus_client import make_asgi_app + +# Change this import statement +from api import gravrag + +app = FastAPI(title="Cogenesis Backend API") + +# CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Allows all origins + allow_credentials=True, + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + +# Prometheus metrics +metrics_app = make_asgi_app() +app.mount("/metrics", metrics_app) + +# Include routers +app.include_router(gravrag.router, prefix="/gravrag", tags=["GravRag"]) + +@app.get("/") +async def root(): + return {"message": "Welcome to the Cogenesis Backend API"} + +@app.get("/health") +async def health_check(): + return {"status": "healthy"} + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'app\main.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `app\api\gravrag.py` + +#### File Content: +```python +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel +from typing import Dict, Any, Optional +import logging +from app.models.gravrag import MemoryManager + +router = APIRouter() +logger = logging.getLogger(__name__) +memory_manager = MemoryManager() + +class MemoryRequest(BaseModel): + content: str + metadata: Optional[Dict[str, Any]] = None + +class RecallRequest(BaseModel): + query: str + top_k: Optional[int] = 5 + +class PruneRequest(BaseModel): + gravity_threshold: Optional[float] = 1e-5 + +class RecallWithMetadataRequest(BaseModel): + query: str + metadata: Dict[str, Any] + top_k: Optional[int] = 10 + +class DeleteByMetadataRequest(BaseModel): + metadata: Dict[str, Any] + +@router.post("/create_memory") +async def create_memory(memory_request: MemoryRequest): + if not memory_request.content.strip(): + logger.warning("Memory creation failed: Empty content.") + raise HTTPException(status_code=400, detail="Content cannot be empty.") + + metadata = memory_request.metadata or {} + try: + logger.info(f"Creating memory: '{memory_request.content}' with metadata: {metadata}") + await memory_manager.create_memory(content=memory_request.content, metadata=metadata) + return {"message": "Memory created successfully"} + except Exception as e: + logger.error(f"Error during memory creation: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Error creating memory: {str(e)}") + +@router.post("/recall_memory") +async def recall_memory(recall_request: RecallRequest): + if not recall_request.query.strip(): + logger.warning("Memory recall failed: Empty query.") + raise HTTPException(status_code=400, detail="Query cannot be empty.") + + try: + logger.info(f"Recalling memories for query: '{recall_request.query}' with top_k={recall_request.top_k}") + memories = await memory_manager.recall_memory(query_content=recall_request.query, top_k=recall_request.top_k) + if not memories: + return {"message": "No relevant memories found"} + return {"memories": memories} + except Exception as e: + logger.error(f"Error during memory recall: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Error recalling memories: {str(e)}") + +@router.post("/prune_memories") +async def prune_memories(prune_request: PruneRequest): + try: + await memory_manager.prune_memories() + return {"message": "Memory pruning completed successfully"} + except Exception as e: + logger.error(f"Error during memory pruning: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Error pruning memories: {str(e)}") + +@router.post("/purge_memories") +async def purge_memories(): + try: + await memory_manager.purge_all_memories() + return {"message": "All memories have been purged successfully"} + except Exception as e: + logger.error(f"Error purging memories: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Error purging memories: {str(e)}") + +@router.post("/recall_with_metadata") +async def recall_with_metadata(recall_request: RecallWithMetadataRequest): + """ + Recall memories that match query content and metadata criteria. + """ + query = recall_request.query + metadata = recall_request.metadata + top_k = recall_request.top_k or 10 + + if not query.strip(): + raise HTTPException(status_code=400, detail="Query content cannot be empty.") + if not metadata: + raise HTTPException(status_code=400, detail="Metadata cannot be empty.") + + try: + memories = await memory_manager.recall_memory_with_metadata(query_content=query, search_metadata=metadata, top_k=top_k) + + if not memories or "memories" not in memories: + return {"message": "No matching memories found"} + + return memories + except Exception as e: + logger.error(f"Error during metadata recall: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Error recalling memories: {str(e)}") + +@router.post("/delete_by_metadata") +async def delete_by_metadata(delete_request: DeleteByMetadataRequest): + try: + logger.info(f"Deleting memories with metadata: {delete_request.metadata}") + await memory_manager.delete_memories_by_metadata(metadata=delete_request.metadata) + return {"message": "Memory deletion by metadata completed successfully"} + except Exception as e: + logger.error(f"Error deleting memories by metadata: {str(e)}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Error deleting memories: {str(e)}") + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'app\api\gravrag.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `app\core\config.py` + +#### File Content: +```python + +from pydantic import BaseSettings + +class Settings(BaseSettings): + ANTHROPIC_API_KEY: str + OPENAI_API_KEY: str + GROQ_API_KEY: str + QDRANT_HOST: str + QDRANT_PORT: int + JWT_SECRET_KEY: str = "your-secret-key" + JWT_ALGORITHM: str = "HS256" + ACCESS_TOKEN_EXPIRE_MINUTES: int = 30 + + class Config: + env_file = ".env" + +settings = Settings() +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'app\core\config.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `app\core\security.py` + +#### File Content: +```python + +from datetime import datetime, timedelta +from typing import Optional +from jose import JWTError, jwt +from passlib.context import CryptContext +from fastapi import Depends, HTTPException, status +from fastapi.security import OAuth2PasswordBearer +from pydantic import BaseModel + +from app.core.config import settings + +pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") +oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token") + +class Token(BaseModel): + access_token: str + token_type: str + +class TokenData(BaseModel): + username: Optional[str] = None + +def verify_password(plain_password, hashed_password): + return pwd_context.verify(plain_password, hashed_password) + +def get_password_hash(password): + return pwd_context.hash(password) + +def create_access_token(data: dict, expires_delta: Optional[timedelta] = None): + to_encode = data.copy() + if expires_delta: + expire = datetime.utcnow() + expires_delta + else: + expire = datetime.utcnow() + timedelta(minutes=15) + to_encode.update({"exp": expire}) + encoded_jwt = jwt.encode(to_encode, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM) + return encoded_jwt + +async def get_current_user(token: str = Depends(oauth2_scheme)): + credentials_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Could not validate credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + try: + payload = jwt.decode(token, settings.JWT_SECRET_KEY, algorithms=[settings.JWT_ALGORITHM]) + username: str = payload.get("sub") + if username is None: + raise credentials_exception + token_data = TokenData(username=username) + except JWTError: + raise credentials_exception + return token_data +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'app\core\security.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `app\models\gravrag.py` + +#### File Content: +```python + +from pydantic import BaseModel +from typing import List, Dict, Any, Optional + +class MemoryPacket(BaseModel): + vector: List[float] + metadata: Dict[str, Any] + +class MemoryRequest(BaseModel): + content: str + metadata: Optional[Dict[str, Any]] = None + +class RecallRequest(BaseModel): + query: str + top_k: Optional[int] = 5 + +class PruneRequest(BaseModel): + gravity_threshold: Optional[float] = 1e-5 +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'app\models\gravrag.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `app\services\gravrag.py` + +#### File Content: +```python + +from app.core.config import settings +from app.models.gravrag import MemoryPacket +import time +import math +import uuid +import logging +from typing import List, Dict, Any, Optional +from sentence_transformers import SentenceTransformer +from qdrant_client import QdrantClient +from qdrant_client.models import Distance, VectorParams, PointStruct +from sklearn.metrics.pairwise import cosine_similarity +from datetime import datetime + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Gravitational constants and thresholds +GRAVITATIONAL_THRESHOLD = 1e-5 # This can be adjusted based on system requirements + +class MemoryPacket: + def __init__(self, vector: List[float], content: str, metadata: Dict[str, Any]): + self.vector = vector # Semantic vector (numeric representation) + self.content = content # Original content (human-readable text) + self.metadata = metadata or {} + + # Metadata defaults + self.metadata.setdefault("timestamp", time.time()) + self.metadata.setdefault("recall_count", 0) + self.metadata.setdefault("memetic_similarity", self.calculate_memetic_similarity()) + self.metadata.setdefault("semantic_relativity", 1.0) + self.metadata.setdefault("gravitational_pull", self.calculate_gravitational_pull()) + self.metadata.setdefault("spacetime_coordinate", self.calculate_spacetime_coordinate()) + + def calculate_gravitational_pull(self) -> float: + """ + Gravitational pull incorporates vector magnitude, recall count, memetic similarity, and semantic relativity. + """ + vector_magnitude = math.sqrt(sum(x ** 2 for x in self.vector)) + recall_count = self.metadata["recall_count"] + memetic_similarity = self.metadata["memetic_similarity"] + semantic_relativity = self.metadata["semantic_relativity"] + + # Dynamically calculate gravitational pull + gravitational_pull = vector_magnitude * (1 + math.log1p(recall_count)) * memetic_similarity * semantic_relativity + self.metadata["gravitational_pull"] = gravitational_pull + return gravitational_pull + + def calculate_spacetime_coordinate(self) -> float: + """ + Spacetime coordinate is a decaying function of gravitational pull and time. + """ + time_decay_factor = 1 + (time.time() - self.metadata.get("timestamp", time.time())) + spacetime_coordinate = self.metadata["gravitational_pull"] / time_decay_factor + self.metadata["spacetime_coordinate"] = spacetime_coordinate + return spacetime_coordinate + + def update_relevance(self, query_vector: List[float]): + """ + Update relevance when recalling a memory. This recalculates semantic relativity, memetic similarity, + gravitational pull, and spacetime coordinate. + """ + # Recalculate semantic similarity with the query vector (cosine similarity) + self.metadata["semantic_relativity"] = self.calculate_cosine_similarity(self.vector, query_vector) + + # Recalculate memetic similarity based on dynamic contextual information + self.metadata["memetic_similarity"] = self.calculate_memetic_similarity() + + # Update gravitational pull and spacetime coordinate + self.calculate_gravitational_pull() + self.calculate_spacetime_coordinate() + + def calculate_memetic_similarity(self) -> float: + """ + Dynamically calculate memetic similarity based on tags, recurrence, or any other contextual factors. + This example uses a simple Jaccard similarity between tags, but it can be extended with more complex logic. + """ + if "tags" not in self.metadata: + return 1.0 # Default if no tags are present + + # Example: Jaccard similarity between tags and reference tags + tags = set(self.metadata.get("tags", [])) + reference_tags = set(self.metadata.get("reference_tags", [])) # Reference memory or system-level tags + + if not tags or not reference_tags: + return 1.0 # No tags to compare, assume full similarity + + intersection = len(tags.intersection(reference_tags)) + union = len(tags.union(reference_tags)) + + if union == 0: + return 1.0 # Avoid division by zero + + return intersection / union # Jaccard similarity as a placeholder for memetic similarity + + @staticmethod + def calculate_cosine_similarity(vector_a: List[float], vector_b: List[float]) -> float: + """ Calculate cosine similarity between two vectors. """ + dot_product = sum(a * b for a, b in zip(vector_a, vector_b)) + magnitude_a = math.sqrt(sum(a ** 2 for a in vector_a)) + magnitude_b = math.sqrt(sum(b ** 2 for b in vector_b)) + + if magnitude_a == 0 or magnitude_b == 0: + return 0.0 # Avoid division by zero + + return dot_product / (magnitude_a * magnitude_b) + + def to_payload(self) -> Dict[str, Any]: + """ + Convert the memory packet to a Qdrant-compatible payload for storage. + Store the vector and content separately. + """ + return { + "vector": self.vector, # Correctly storing the vector here + "content": self.content, # Storing the original content here + "metadata": self.metadata + } + + @staticmethod + def from_payload(payload: Dict[str, Any]): + """ Recreate a MemoryPacket from a payload, ensuring 'content' is handled correctly. """ + vector = payload.get("vector") + content = payload.get("content", "") # Ensure content is present, or provide a default value + metadata = payload.get("metadata", {}) + + # Raise an error if vector is missing, as it is essential for MemoryPacket + if not vector: + raise ValueError("Vector data is missing in payload") + + return MemoryPacket(vector=vector, content=content, metadata=metadata) + + +class MemoryManager: + def __init__(self, qdrant_host="localhost", qdrant_port=6333, collection_name="Mind"): + self.qdrant_client = QdrantClient(host=qdrant_host, port=qdrant_port) + self.collection_name = collection_name + self.model = SentenceTransformer('all-MiniLM-L6-v2') # Semantic vector model + self._setup_collection() + + def _setup_collection(self): + """ + Ensure that the Qdrant collection is set up for vectors with cosine distance. + """ + try: + self.qdrant_client.get_collection(self.collection_name) + logger.info(f"Collection '{self.collection_name}' exists.") + except Exception: + logger.info(f"Creating collection '{self.collection_name}'.") + self.qdrant_client.create_collection( + collection_name=self.collection_name, + vectors_config=VectorParams(size=self.model.get_sentence_embedding_dimension(), distance=Distance.COSINE) + ) + + async def create_memory(self, content: str, metadata: Dict[str, Any]): + """ + Create a memory from content, vectorize it, and store in Qdrant asynchronously. + """ + vector = self.model.encode(content).tolist() + memory_packet = MemoryPacket(vector=vector, content=content, metadata=metadata) + point_id = str(uuid.uuid4()) + + # Insert the memory packet into the Qdrant collection + self.qdrant_client.upsert( + collection_name=self.collection_name, + points=[PointStruct(id=point_id, vector=vector, payload=memory_packet.to_payload())] + ) + logger.info(f"Memory created successfully with ID: {point_id}") + + async def recall_memory(self, query_content: str, top_k: int = 5): + """ Recall a memory based on query content and return the original content along with metadata. """ + query_vector = self.model.encode(query_content).tolist() + + # Perform semantic search with Qdrant (using the query vector and top_k limit) + results = self.qdrant_client.search( + collection_name=self.collection_name, + query_vector=query_vector, + limit=top_k + ) + + # Recreate MemoryPacket objects from the search results + memories = [MemoryPacket.from_payload(hit.payload) for hit in results] + + # Update relevance for each memory + for memory in memories: + memory.update_relevance(query_vector) + + # Rank memories based on combined relevance factors + ranked_memories = sorted( + memories, + key=lambda mem: ( + mem.metadata['semantic_relativity'] * mem.metadata['memetic_similarity'] * mem.metadata['gravitational_pull'] + ), + reverse=True + ) + + # Return original content and metadata for top K results + return [{ + "content": memory.content, # Return the original content + "metadata": memory.metadata + } for memory in ranked_memories[:top_k]] + + async def prune_memories(self): + """ + Prune low relevance memories based on their gravitational pull and spacetime coordinates. + """ + total_points = self.qdrant_client.count(self.collection_name).count + if total_points > 1000000: # Arbitrary limit + points = self.qdrant_client.scroll(self.collection_name, limit=1000) + low_relevance_points = [ + p.id for p in points if p.payload['metadata']['gravitational_pull'] < GRAVITATIONAL_THRESHOLD + ] + if low_relevance_points: + self.qdrant_client.delete(self.collection_name, points_selector=low_relevance_points) + + async def purge_all_memories(self): + """ + Deletes all memories from the Qdrant collection. + """ + try: + # Delete the entire collection (and all memories within it) + self.qdrant_client.delete_collection(self.collection_name) + + # Re-create the collection after purging + self._setup_collection() + logger.info(f"Purged all memories in the collection '{self.collection_name}'.") + except Exception as e: + logger.error(f"Error purging all memories: {str(e)}") + raise e + + async def recall_memory_with_metadata(self, query_content: str, search_metadata: Dict[str, Any], top_k: int = 10): + """ + Recall memories based on query content, and further filter by matching metadata. + """ + try: + # Step 1: Vector search for the top K most relevant memories based on semantic similarity + query_vector = self.model.encode(query_content).tolist() + results = self.qdrant_client.search( + collection_name=self.collection_name, + query_vector=query_vector, + limit=top_k + ) + + # Step 2: Recreate MemoryPacket objects from the search results + memories = [MemoryPacket.from_payload(hit.payload) for hit in results] + + # Step 3: Filter the top K results based on metadata + matching_memories = [] + for memory in memories: + memory_metadata = memory.metadata + + # Check if all search metadata keys/values match the memory metadata + if all(memory_metadata.get(key) == value for key, value in search_metadata.items()): + matching_memories.append({ + "content": memory.content, + "metadata": memory_metadata + }) + + if not matching_memories: + return {"message": "No matching memories found"} + + return {"memories": matching_memories} + + except Exception as e: + logger.error(f"Error recalling memories by metadata: {str(e)}") + raise e + + + async def delete_memories_by_metadata(self, metadata: Dict[str, Any]): + """ + Delete memories where the metadata matches the given metadata criteria. + """ + try: + # Scroll through all memories in the collection + scroll_result = self.qdrant_client.scroll(self.collection_name, limit=1000) + + # Check if result is a list of points, otherwise handle it as a tuple + if isinstance(scroll_result, tuple): + points = scroll_result[0] + else: + points = scroll_result + + # List to store the IDs of memories to be deleted + memories_to_delete = [] + + for point in points: + if isinstance(point, dict) and 'payload' in point: + point_metadata = point['payload']['metadata'] + + # Check if the point's metadata matches the provided metadata + if all(point_metadata.get(key) == value for key, value in metadata.items()): + memories_to_delete.append(point["id"]) + + # Delete the memories that match the metadata criteria + if memories_to_delete: + self.qdrant_client.delete(self.collection_name, points_selector=memories_to_delete) + logger.info(f"Deleted {len(memories_to_delete)} memories matching the metadata.") + else: + logger.info("No memories found matching the specified metadata.") + except Exception as e: + logger.error(f"Error deleting memories by metadata: {str(e)}") + raise e + +memory_manager = MemoryManager() +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `of`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'app\services\gravrag.py': + +- of + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + +### `tests\test_gravrag.py` + +#### File Content: +```python +import pytest +from fastapi.testclient import TestClient +from app.main import app +import logging + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +client = TestClient(app) + +@pytest.fixture +def mock_memory_manager(mocker): + return mocker.patch('app.services.gravrag.MemoryManager') + +# Test for creating a memory +def test_create_memory(mock_memory_manager): + mock_memory_manager.return_value.create_memory.return_value = None + + payload = { + "content": "This is a test memory", + "metadata": { + "objective_id": "obj_123", + "task_id": "task_123", + "tags": ["test", "example"] + } + } + + response = client.post("/gravrag/create_memory", json=payload) + logger.info(f"Create Memory Response: {response.status_code}") + assert response.status_code == 200 + assert response.json() == {"message": "Memory created successfully"} + +# Test for creating another memory +def test_create_memory_2(mock_memory_manager): + mock_memory_manager.return_value.create_memory.return_value = None + + payload = { + "content": "This is another test memory", + "metadata": { + "objective_id": "obj_567", + "task_id": "task_567", + "tags": ["test", "example"] + } + } + + response = client.post("/gravrag/create_memory", json=payload) + logger.info(f"Create Memory 2 Response: {response.status_code}") + assert response.status_code == 200 + assert response.json() == {"message": "Memory created successfully"} + +# Test for invalid memory creation +def test_create_invalid_memory(mock_memory_manager): + payload = {"content": ""} # Empty content + + response = client.post("/gravrag/create_memory", json=payload) + logger.info(f"Create Invalid Memory Response: {response.status_code}") + assert response.status_code == 400 + assert response.json() == {"detail": "Content cannot be empty."} + +# Test for recalling memory +def test_recall_memory(mock_memory_manager): + mock_memory_manager.return_value.recall_memory.return_value = [ + { + "content": "This is a test memory", + "metadata": { + "objective_id": "obj_123", + "task_id": "task_123", + "tags": ["test", "example"], + "gravitational_pull": 0.9, + "memetic_similarity": 1.0, + "semantic_relativity": 1.0, + "timestamp": 1728026867 + } + } + ] + + payload = {"query": "test memory", "top_k": 3} + response = client.post("/gravrag/recall_memory", json=payload) + logger.info(f"Recall Memory Response: {response.status_code}") + assert response.status_code == 200 + assert "memories" in response.json() + +# Test for memory pruning +def test_prune_memories(mock_memory_manager): + mock_memory_manager.return_value.prune_memories.return_value = None + + response = client.post("/gravrag/prune_memories", json={}) + logger.info(f"Prune Memories Response: {response.status_code}") + assert response.status_code == 200 + assert response.json() == {"message": "Memory pruning completed successfully"} + +# Test for memory recall using metadata +def test_recall_memory_with_metadata(mock_memory_manager): + mock_memory_manager.return_value.recall_memory.return_value = [ + { + "content": "This is a test memory", + "metadata": { + "objective_id": "obj_123", + "task_id": "task_123", + "tags": ["test", "example"], + "gravitational_pull": 1.0, + "memetic_similarity": 1.0, + "semantic_relativity": 1.0, + "timestamp": 1728026867 + } + } + ] + + payload = { + "query": "test memory", + "metadata": {"objective_id": "obj_123", "task_id": "task_123"}, + "top_k": 5 + } + response = client.post("/gravrag/recall_with_metadata", json=payload) + logger.info(f"Recall with Metadata Response: {response.status_code}") + assert response.status_code == 200 + assert "memories" in response.json() + +# Test for deleting memory by metadata +def test_delete_by_metadata(mock_memory_manager): + mock_memory_manager.return_value.delete_memories_by_metadata.return_value = None + + payload = {"metadata": {"objective_id": "obj_123", "task_id": "task_123"}} + response = client.post("/gravrag/delete_by_metadata", json=payload) + logger.info(f"Delete by Metadata Response: {response.status_code}") + assert response.status_code == 200 + assert response.json() == {"message": "Memory deletion by metadata completed successfully"} + +# Test for purging all memories +def test_purge_memories(mock_memory_manager): + mock_memory_manager.return_value.purge_all_memories.return_value = None + + response = client.post("/gravrag/purge_memories") + logger.info(f"Purge Memories Response: {response.status_code}") + assert response.status_code == 200 + assert response.json() == {"message": "All memories have been purged successfully"} + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'tests\test_gravrag.py': + + +Ensure that the implementation follows best practices and integrates with the existing project structure. +``` + diff --git a/backend/cogenesis-backend/PROJECT_DOCUMENTATIONV2.md b/backend/cogenesis-backend/PROJECT_DOCUMENTATIONV2.md new file mode 100644 index 0000000..e69de29 diff --git a/backend/cogenesis-backend/README.md b/backend/cogenesis-backend/README.md index 1d908da..e69de29 100644 --- a/backend/cogenesis-backend/README.md +++ b/backend/cogenesis-backend/README.md @@ -1,1090 +0,0 @@ -# Project Structure - -``` -cogenesis-backend/ -├── .env -├── .gitignore -├── docker-compose.yml -├── Dockerfile -├── makeReadMe.py -├── README.md -├── requirements.txt -├── app/ -│ ├── main.py -│ ├── api/ -│ │ ├── gravrag.py -│ │ ├── neural_resources.py -│ ├── core/ -│ │ ├── config.py -│ │ ├── security.py -│ ├── models/ -│ │ ├── gravrag.py -│ │ ├── neural_resources.py -│ ├── services/ -│ │ ├── gravrag.py -│ │ ├── neural_resources.py -├── tests/ -│ ├── test_gravrag.py -│ ├── test_neural_resources.py -``` - -# File Contents - -## `.env` - -``` - -ANTHROPIC_API_KEY=your_anthropic_api_key_here -OPENAI_API_KEY=your_openai_api_key_here -GROQ_API_KEY=your_groq_api_key_here -QDRANT_HOST=qdrant -QDRANT_PORT=6333 -``` - -## `.gitignore` - -``` - -# Python -__pycache__/ -*.py[cod] -*.pyo -*.pyd -.Python -env/ -venv/ -pip-log.txt -pip-delete-this-directory.txt - -# Environments -.env -.venv -env/ -venv/ - -# IDEs -.vscode/ -.idea/ - -# Logs -*.log - -# Docker -.docker/ - -# OS generated files -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db -``` - -## `docker-compose.yml` - -``` - -version: '3.9.11' - -services: - api: - build: . - ports: - - "8000:8000" - environment: - - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - GROQ_API_KEY=${GROQ_API_KEY} - - QDRANT_HOST=qdrant - - QDRANT_PORT=6333 - depends_on: - - qdrant - - qdrant: - image: qdrant/qdrant - ports: - - "6333:6333" - volumes: - - qdrant_data:/qdrant/storage - -volumes: - qdrant_data: -``` - -## `Dockerfile` - -``` - -FROM python:3.9 - -WORKDIR /app - -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -COPY . . - -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] -``` - -## `makeReadMe.py` - -``` -import os -import sys - -def generate_tree(startpath): - tree = [] - for root, dirs, files in os.walk(startpath): - level = root.replace(startpath, '').count(os.sep) - indent = '│ ' * (level - 1) + '├── ' if level > 0 else '' - tree.append(f"{indent}{os.path.basename(root)}/") - subindent = '│ ' * level + '├── ' - for f in files: - tree.append(f"{subindent}{f}") - return '\n'.join(tree) - -def read_file_content(file_path): - try: - with open(file_path, 'r', encoding='utf-8') as file: - return file.read() - except Exception as e: - return f"Error reading file: {str(e)}" - -def generate_readme(folder_path): - readme_content = f"# Project Structure\n\n```\n{generate_tree(folder_path)}\n```\n\n" - readme_content += "# File Contents\n\n" - - for root, dirs, files in os.walk(folder_path): - for file in files: - file_path = os.path.join(root, file) - relative_path = os.path.relpath(file_path, folder_path) - readme_content += f"## `{relative_path}`\n\n" - readme_content += "```\n" - readme_content += read_file_content(file_path) - readme_content += "\n```\n\n" - - return readme_content - -def main(): - if len(sys.argv) > 1: - folder_path = sys.argv[1] - else: - folder_path = os.getcwd() - - if not os.path.isdir(folder_path): - print(f"Error: {folder_path} is not a valid directory") - sys.exit(1) - - readme_content = generate_readme(folder_path) - - readme_path = os.path.join(folder_path, "README.md") - with open(readme_path, 'w', encoding='utf-8') as readme_file: - readme_file.write(readme_content) - - print(f"README.md has been generated at {readme_path}") - -if __name__ == "__main__": - main() -``` - -## `README.md` - -``` - -# Cogenesis Backend API - -This project implements a comprehensive backend API for the Cogenesis system, integrating GravRag memory management and Neural Resources for AI model interactions. - -## Features - -- GravRag memory management (create, recall, prune) -- Neural Resources for interacting with various AI models (Anthropic, OpenAI, Groq, Ollama) -- Authentication and authorization -- Docker support -- Prometheus metrics - -## Getting Started - -### Prerequisites - -- Python 3.9+ -- Docker and Docker Compose (optional) - -### Installation - -1. Clone the repository: - -``` - -git clone https://github.com/your-repo/cogenesis-backend.git -cd cogenesis-backend - -``` - -2. Create a virtual environment and activate it: - -``` - -python -m venv venv -source venv/bin/activate # On Windows, use `venv\\Scripts\\activate` - -``` - -3. Install the dependencies: - -``` - -pip install -r requirements.txt - -``` - -4. Set up environment variables: -Create a `.env` file in the root directory and add the following: - -``` - -ANTHROPIC_API_KEY=your_anthropic_api_key -OPENAI_API_KEY=your_openai_api_key -GROQ_API_KEY=your_groq_api_key -QDRANT_HOST=localhost -QDRANT_PORT=6333 -JWT_SECRET_KEY=your_jwt_secret_key - -``` - -### Running the Application - -#### Using Python - -1. Start the Qdrant server (if not using Docker): - -``` - -qdrant - -``` - -2. Run the FastAPI application: - -``` - -uvicorn app.main:app --reload - -``` - -#### Using Docker - -1. Build and run the Docker containers: - -``` - -docker-compose up --build - -``` - -The API will be available at `http://localhost:8000`. - -## API Documentation - -Once the application is running, you can access the Swagger UI documentation at `http://localhost:8000/docs`. - -## Testing - -Run the tests using pytest: - -``` - -pytest - -``` - -## Contributing - -Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct and the process for submitting pull requests. - -## License - -This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details. - -``` - -This comprehensive implementation includes all the components we've discussed, incorporating improvements such as authentication, Docker support, asynchronous operations, error handling, and more. The project structure is organized for scalability and maintainability. - -To run this project: - -1. Set up the environment variables in the `.env` file. -2. Install the dependencies using `pip install -r requirements.txt`. -3. Run the application using `uvicorn app.main:app --reload` or use Docker with `docker-compose up --build`. - -The API will be available at `http://localhost:8000`, and you can access the Swagger UI documentation at `http://localhost:8000/docs`. - -Remember to implement proper error handling, logging, and security measures in a production environment. Also, consider adding more comprehensive tests and documentation as the project evolves. -``` - -## `requirements.txt` - -``` - -fastapi==0.68.0 -uvicorn==0.15.0 -pydantic==1.8.2 -python-dotenv==0.19.0 -qdrant-client==0.11.0 -sentence-transformers==2.1.0 -anthropic==0.2.8 -openai==0.27.0 -groq==0.1.0 -requests==2.26.0 -python-jose==3.3.0 -passlib==1.7.4 -bcrypt==3.2.0 -prometheus-client==0.11.0 -``` - -## `app\main.py` - -``` - -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from prometheus_client import make_asgi_app - -from app.api import gravrag, neural_resources - -app = FastAPI(title="Cogenesis Backend API") - -# CORS middleware -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], # Allows all origins - allow_credentials=True, - allow_methods=["*"], # Allows all methods - allow_headers=["*"], # Allows all headers -) - -# Prometheus metrics -metrics_app = make_asgi_app() -app.mount("/metrics", metrics_app) - -# Include routers -app.include_router(gravrag.router, prefix="/gravrag", tags=["GravRag"]) -app.include_router(neural_resources.router, prefix="/neural_resources", tags=["Neural Resources"]) - -@app.get("/") -async def root(): - return {"message": "Welcome to the Cogenesis Backend API"} - -@app.get("/health") -async def health_check(): - return {"status": "healthy"} - -if __name__ == "__main__": - import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8000) -``` - -## `app\api\gravrag.py` - -``` - -from fastapi import APIRouter, HTTPException, Depends -from app.models.gravrag import MemoryRequest, RecallRequest, PruneRequest -from app.services.gravrag import memory_manager -from app.core.security import get_current_user - -router = APIRouter() - -@router.post("/create_memory") -async def create_memory(memory_request: MemoryRequest, current_user: dict = Depends(get_current_user)): - if not memory_request.content.strip(): - raise HTTPException(status_code=400, detail="Content cannot be empty.") - try: - await memory_manager.create_memory(content=memory_request.content, metadata=memory_request.metadata) - return {"message": "Memory created successfully"} - except Exception as e: - raise HTTPException(status_code=500, detail=f"Error creating memory: {str(e)}") - -@router.post("/recall_memory") -async def recall_memory(recall_request: RecallRequest, current_user: dict = Depends(get_current_user)): - if not recall_request.query.strip(): - raise HTTPException(status_code=400, detail="Query cannot be empty.") - try: - memories = await memory_manager.recall_memory(query_content=recall_request.query, top_k=recall_request.top_k) - if not memories: - return {"message": "No relevant memories found"} - return {"memories": memories} - except Exception as e: - raise HTTPException(status_code=500, detail=f"Error recalling memories: {str(e)}") - -@router.post("/prune_memories") -async def prune_memories(prune_request: PruneRequest, current_user: dict = Depends(get_current_user)): - try: - await memory_manager.prune_memories() - return {"message": "Memory pruning completed successfully"} - except Exception as e: - raise HTTPException(status_code=500, detail=f"Error pruning memories: {str(e)}") -``` - -## `app\api\neural_resources.py` - -``` - -from fastapi import APIRouter, HTTPException, Depends -from app.models.neural_resources import Message, APIKeyUpdate -from app.services.neural_resources import llm_manager -from app.core.security import get_current_user - -router = APIRouter() - -@router.post("/route_query") -async def route_query(message: Message, current_user: dict = Depends(get_current_user)): - response = llm_manager.route_query(message.content, message.role) - if "error" in response: - raise HTTPException(status_code=500, detail=response["error"]) - return response - -@router.post("/set_api_key") -async def set_api_key(api_key_update: APIKeyUpdate, current_user: dict = Depends(get_current_user)): - try: - llm_manager.set_api_key(api_key_update.provider, api_key_update.api_key) - return {"message": f"API key updated for {api_key_update.provider}"} - except ValueError as ve: - raise HTTPException(status_code=400, detail=str(ve)) - -@router.get("/available_models") -async def get_available_models(current_user: dict = Depends(get_current_user)): - models = llm_manager.get_available_models() - return {"available_models": models} - -@router.get("/model_info/{model}") -async def get_model_info(model: str, current_user: dict = Depends(get_current_user)): - model_info = llm_manager.get_model_info(model) - if "error" in model_info: - raise HTTPException(status_code=404, detail=model_info["error"]) - return model_info -``` - -## `app\core\config.py` - -``` - -from pydantic import BaseSettings - -class Settings(BaseSettings): - ANTHROPIC_API_KEY: str - OPENAI_API_KEY: str - GROQ_API_KEY: str - QDRANT_HOST: str - QDRANT_PORT: int - JWT_SECRET_KEY: str = "your-secret-key" - JWT_ALGORITHM: str = "HS256" - ACCESS_TOKEN_EXPIRE_MINUTES: int = 30 - - class Config: - env_file = ".env" - -settings = Settings() -``` - -## `app\core\security.py` - -``` - -from datetime import datetime, timedelta -from typing import Optional -from jose import JWTError, jwt -from passlib.context import CryptContext -from fastapi import Depends, HTTPException, status -from fastapi.security import OAuth2PasswordBearer -from pydantic import BaseModel - -from app.core.config import settings - -pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") -oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token") - -class Token(BaseModel): - access_token: str - token_type: str - -class TokenData(BaseModel): - username: Optional[str] = None - -def verify_password(plain_password, hashed_password): - return pwd_context.verify(plain_password, hashed_password) - -def get_password_hash(password): - return pwd_context.hash(password) - -def create_access_token(data: dict, expires_delta: Optional[timedelta] = None): - to_encode = data.copy() - if expires_delta: - expire = datetime.utcnow() + expires_delta - else: - expire = datetime.utcnow() + timedelta(minutes=15) - to_encode.update({"exp": expire}) - encoded_jwt = jwt.encode(to_encode, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM) - return encoded_jwt - -async def get_current_user(token: str = Depends(oauth2_scheme)): - credentials_exception = HTTPException( - status_code=status.HTTP_401_UNAUTHORIZED, - detail="Could not validate credentials", - headers={"WWW-Authenticate": "Bearer"}, - ) - try: - payload = jwt.decode(token, settings.JWT_SECRET_KEY, algorithms=[settings.JWT_ALGORITHM]) - username: str = payload.get("sub") - if username is None: - raise credentials_exception - token_data = TokenData(username=username) - except JWTError: - raise credentials_exception - return token_data -``` - -## `app\models\gravrag.py` - -``` - -from pydantic import BaseModel -from typing import List, Dict, Any, Optional - -class MemoryPacket(BaseModel): - vector: List[float] - metadata: Dict[str, Any] - -class MemoryRequest(BaseModel): - content: str - metadata: Optional[Dict[str, Any]] = None - -class RecallRequest(BaseModel): - query: str - top_k: Optional[int] = 5 - -class PruneRequest(BaseModel): - gravity_threshold: Optional[float] = 1e-5 -``` - -## `app\models\neural_resources.py` - -``` - -from pydantic import BaseModel - -class Message(BaseModel): - content: str - role: str - -class APIKeyUpdate(BaseModel): - provider: str - api_key: str -``` - -## `app\services\gravrag.py` - -``` - -import time -import math -import uuid -import logging -from typing import List, Dict, Any -from sentence_transformers import SentenceTransformer -from qdrant_client import QdrantClient -from qdrant_client.models import Distance, VectorParams, PointStruct - -from app.core.config import settings -from app.models.gravrag import MemoryPacket - -logger = logging.getLogger(__name__) - -GRAVITATIONAL_THRESHOLD = 1e-5 - -class MemoryManager: - def __init__(self): - self.qdrant_client = QdrantClient(host=settings.QDRANT_HOST, port=settings.QDRANT_PORT) - self.collection_name = "Mind" - self.model = SentenceTransformer('all-MiniLM-L6-v2') - self._setup_collection() - - def _setup_collection(self): - try: - self.qdrant_client.get_collection(self.collection_name) - logger.info(f"Collection '{self.collection_name}' exists.") - except Exception: - logger.info(f"Creating collection '{self.collection_name}'.") - self.qdrant_client.create_collection( - collection_name=self.collection_name, - vectors_config=VectorParams(size=self.model.get_sentence_embedding_dimension(), distance=Distance.COSINE) - ) - - async def create_memory(self, content: str, metadata: Dict[str, Any]): - vector = self.model.encode(content).tolist() - memory_packet = MemoryPacket(vector=vector, metadata=metadata) - point_id = str(uuid.uuid4()) - - self.qdrant_client.upsert( - collection_name=self.collection_name, - points=[PointStruct(id=point_id, vector=vector, payload=memory_packet.dict())] - ) - logger.info(f"Memory created successfully with ID: {point_id}") - - async def recall_memory(self, query_content: str, top_k: int = 5): - query_vector = self.model.encode(query_content).tolist() - - results = self.qdrant_client.search( - collection_name=self.collection_name, - query_vector=query_vector, - limit=top_k - ) - - memories = [MemoryPacket(**hit.payload) for hit in results] - - for memory in memories: - self._update_relevance(memory, query_vector) - - return [memory.metadata for memory in memories] - - def _update_relevance(self, memory: MemoryPacket, query_vector: List[float]): - memory.metadata["semantic_relativity"] = self._calculate_cosine_similarity(memory.vector, query_vector) - memory.metadata["memetic_similarity"] = self._calculate_memetic_similarity(memory.metadata) - memory.metadata["gravitational_pull"] = self._calculate_gravitational_pull(memory) - memory.metadata["spacetime_coordinate"] = self._calculate_spacetime_coordinate(memory) - - @staticmethod - def _calculate_cosine_similarity(vector_a: List[float], vector_b: List[float]) -> float: - dot_product = sum(a * b for a, b in zip(vector_a, vector_b)) - magnitude_a = math.sqrt(sum(a ** 2 for a in vector_a)) - magnitude_b = math.sqrt(sum(b ** 2 for b in vector_b)) - - if magnitude_a == 0 or magnitude_b == 0: - return 0.0 - - return dot_product / (magnitude_a * magnitude_b) - - @staticmethod - def _calculate_memetic_similarity(metadata: Dict[str, Any]) -> float: - tags = set(metadata.get("tags", [])) - reference_tags = set(metadata.get("reference_tags", [])) - - if not tags or not reference_tags: - return 1.0 - - intersection = len(tags.intersection(reference_tags)) - union = len(tags.union(reference_tags)) - - return intersection / union if union > 0 else 1.0 - - @staticmethod - def _calculate_gravitational_pull(memory: MemoryPacket) -> float: - vector_magnitude = math.sqrt(sum(x ** 2 for x in memory.vector)) - recall_count = memory.metadata.get("recall_count", 0) - memetic_similarity = memory.metadata.get("memetic_similarity", 1.0) - semantic_relativity = memory.metadata.get("semantic_relativity", 1.0) - - return vector_magnitude * (1 + math.log1p(recall_count)) * memetic_similarity * semantic_relativity - - @staticmethod - def _calculate_spacetime_coordinate(memory: MemoryPacket) -> float: - time_decay_factor = 1 + (time.time() - memory.metadata.get("timestamp", time.time())) - return memory.metadata["gravitational_pull"] / time_decay_factor - - async def prune_memories(self): - total_points = self.qdrant_client.count(self.collection_name).count - if total_points > 1000000: # Arbitrary limit - points = self.qdrant_client.scroll(self.collection_name, limit=1000) - low_relevance_points = [ - p.id for p in points if p.payload['metadata']['gravitational_pull'] < GRAVITATIONAL_THRESHOLD - ] - if low_relevance_points: - self.qdrant_client.delete( - collection_name=self.collection_name, - points_selector={"points": low_relevance_points} - ) - logger.info(f"Pruned {len(low_relevance_points)} low-relevance memories.") - -memory_manager = MemoryManager() -``` - -## `app\services\neural_resources.py` - -``` - -import os -import logging -from typing import List, Dict, Any, Optional -from anthropic import Anthropic -import openai -from groq import Groq -import requests - -from app.core.config import settings - -logger = logging.getLogger(__name__) - -class AIAsset: - def __init__(self, api_key: str): - self.api_key = api_key - - def create_message(self, model: str, role: str, message: str) -> Dict[str, Any]: - raise NotImplementedError - - def get_output_tokens(self, response: Dict[str, Any]) -> int: - raise NotImplementedError - -class AnthropicLLM(AIAsset): - def __init__(self, api_key: str): - super().__init__(api_key) - self.client = Anthropic(api_key=api_key) - logger.info("Anthropic LLM initialized") - - def create_message(self, model: str, role: str, message: str) -> Dict[str, Any]: - logger.debug(f"Creating message for Anthropic model: {model}") - if not message.strip(): - logger.warning("Empty message provided to Anthropic LLM") - return {"error": "Empty message provided"} - try: - response = self.client.messages.create( - model=model, - messages=[{"role": role, "content": message}], - ) - logger.info(f"Successfully created message with Anthropic model: {model}") - return response.model_dump() - except Exception as e: - logger.exception(f"Error creating message for Anthropic: {str(e)}") - return {"error": f"Anthropic failed: {str(e)}"} - - def get_output_tokens(self, response: Dict[str, Any]) -> int: - return response.get('usage', {}).get('output_tokens', 0) - -class OpenAILLM(AIAsset): - def __init__(self, api_key: str): - super().__init__(api_key) - openai.api_key = api_key - logger.info("OpenAI LLM initialized") - - def create_message(self, model: str, role: str, message: str) -> Dict[str, Any]: - logger.debug(f"Creating message for OpenAI model: {model}") - if not message.strip(): - logger.warning("Empty message provided to OpenAI LLM") - return {"error": "Empty message provided"} - try: - response = openai.ChatCompletion.create( - model=model, - messages=[{"role": role, "content": message}], - ) - logger.info(f"Successfully created message with OpenAI model: {model}") - return response.to_dict() - except Exception as e: - logger.exception(f"Error creating message for OpenAI: {str(e)}") - return {"error": f"OpenAI failed: {str(e)}"} - - def get_output_tokens(self, response: Dict[str, Any]) -> int: - return response.get('usage', {}).get('completion_tokens', 0) - -class GroqLLM(AIAsset): - def __init__(self, api_key: str): - super().__init__(api_key) - self.client = Groq(api_key=api_key) - logger.info("Groq LLM initialized") - - def create_message(self, model: str, role: str, message: str) -> Dict[str, Any]: - logger.debug(f"Creating message for Groq model: {model}") - if not message.strip(): - logger.warning("Empty message provided to Groq LLM") - return {"error": "Empty message provided"} - try: - response = self.client.chat.completions.create( - model=model, - messages=[{"role": role, "content": message}], - ) - logger.info(f"Successfully created message with Groq model: {model}") - return response.to_dict() - except Exception as e: - logger.exception(f"Error creating message for Groq: {str(e)}") - return {"error": f"Groq failed: {str(e)}"} - - def get_output_tokens(self, response: Dict[str, Any]) -> int: - return response.get('usage', {}).get('completion_tokens', 0) - -class OllamaLLM(AIAsset): - def __init__(self, base_url: str = ""): - super().__init__(api_key="") - self.base_url = base_url - logger.info(f"Ollama LLM initialized with base URL: {base_url}") - - def create_message(self, model: str, role: str, message: str) -> Dict[str, Any]: - logger.debug(f"Creating message for Ollama model: {model}") - if not message.strip(): - logger.warning("Empty message provided to Ollama LLM") - return {"error": "Empty message provided"} - try: - url = f"{self.base_url}/api/generate" - payload = { - "model": model, - "role": role, - "prompt": message, - "stream": False - } - response = requests.post(url, json=payload, timeout=30) - response.raise_for_status() - logger.info(f"Successfully created message with Ollama model: {model}") - return response.json() - except requests.RequestException as e: - logger.exception(f"Error creating message for Ollama: {str(e)}") - return {"error": f"Ollama failed: {str(e)}"} - - def get_output_tokens(self, response: Dict[str, Any]) -> int: - return len(response.get('response', '').split()) - -class LLMManager: - def __init__(self): - self.llm_models: Dict[str, AIAsset] = {} - self.overridden_keys: Dict[str, str] = {} - self.models_cache = {} # Cache to store fetched model info - self._initialize_models() - logger.info("LLMManager initialized") - - def _initialize_models(self): - logger.debug("Initializing AI models") - for provider, api_key in self._load_api_keys().items(): - if api_key: - llm_instance = self._create_llm_instance(provider, api_key) - if llm_instance: - self.llm_models[provider] = llm_instance - self.llm_models["ollama"] = OllamaLLM() - logger.info(f"Initialized models: {', '.join(self.llm_models.keys())}") - - def _load_api_keys(self) -> Dict[str, str]: - logger.debug("Loading API keys") - keys = { - "anthropic": self.overridden_keys.get('anthropic', settings.ANTHROPIC_API_KEY), - "openai": self.overridden_keys.get('openai', settings.OPENAI_API_KEY), - "groq": self.overridden_keys.get('groq', settings.GROQ_API_KEY), - } - for provider, key in keys.items(): - if key: - logger.info(f"API key loaded for {provider}") - else: - logger.warning(f"No API key found for {provider}") - return keys - - def _create_llm_instance(self, provider: str, api_key: str) -> Optional[AIAsset]: - logger.debug(f"Creating LLM instance for provider: {provider}") - if provider == "anthropic": - return AnthropicLLM(api_key) - elif provider == "openai": - return OpenAILLM(api_key) - elif provider == "groq": - return GroqLLM(api_key) - else: - logger.warning(f"Unknown provider: {provider}") - return None - - def set_api_key(self, provider: str, api_key: str): - logger.info(f"Setting API key for provider: {provider}") - if not provider or not api_key: - logger.error("Invalid provider or API key provided") - raise ValueError("Both provider and api_key must be non-empty strings") - self.overridden_keys[provider] = api_key - self._initialize_models() - - def get_available_models(self) -> List[str]: - models = [] - - # Fetch Ollama models - try: - logger.info("Fetching Ollama models") - ollama_response = requests.get("") - if ollama_response.status_code == 200: - ollama_data = ollama_response.json() - ollama_models = [model['name'] for model in ollama_data.get('models', [])] - models.extend(ollama_models) - else: - logger.error(f"Failed to fetch Ollama models: {ollama_response.status_code}") - except Exception as e: - logger.error(f"Error fetching Ollama models: {str(e)}") - - # Fetch Groq models - try: - logger.info("Fetching Groq models") - groq_api_key = settings.GROQ_API_KEY - groq_response = requests.get( - "", - headers={ - "Authorization": f"Bearer {groq_api_key}", - "Content-Type": "application/json" - } - ) - if groq_response.status_code == 200: - groq_models_data = groq_response.json() - groq_models = groq_models_data.get("data", []) - models.extend([model['id'] for model in groq_models]) - else: - logger.error(f"Failed to fetch Groq models: {groq_response.status_code}") - except Exception as e: - logger.error(f"Error fetching Groq models: {str(e)}") - - logger.debug(f"Available models: {', '.join(models)}") - return models - - def route_query(self, message: str, role: str, model: Optional[str] = None) -> Dict[str, Any]: - logger.info(f"Routing query to {'specified model: ' + model if model else 'default model'}") - if not message.strip(): - logger.warning("Empty message provided to route_query") - return {"error": "Empty message provided"} - - if model: - for provider, llm in self.llm_models.items(): - try: - logger.debug(f"Attempting to create message with provider: {provider}, model: {model}") - response = llm.create_message(model, role, message) - if "error" not in response: - logger.info(f"Successfully created message with provider: {provider}, model: {model}") - return response - logger.warning(f"Error with {provider}, model {model}: {response['error']}") - except Exception as e: - logger.exception(f"Unexpected error with {provider}, model {model}: {str(e)}") - - logger.error(f"Specified model {model} is not available or failed for all providers") - return {"error": f"Specified model {model} is not available or failed for all providers"} - - for provider, llm in self.llm_models.items(): - try: - logger.debug(f"Attempting to create message with provider: {provider}") - response = llm.create_message(provider, role, message) - if "error" not in response: - logger.info(f"Successfully created message with provider: {provider}") - return response - logger.warning(f"Error with {provider}: {response['error']}") - except Exception as e: - logger.exception(f"Unexpected error with {provider}: {str(e)}") - - logger.error("No available models could process the request") - return {"error": "No available models could process the request"} - - def get_model_info(self, model: str) -> Dict[str, Any]: - logger.info(f"Retrieving model info for model: {model}") - - # Check if the model info is cached - if model in self.models_cache: - logger.debug(f"Model info for {model} retrieved from cache.") - return self.models_cache[model] - - try: - # Fetch from Ollama - available_models = self.get_available_models() - if model in available_models: - logger.info(f"Fetching model info from Ollama for model: {model}") - url = f"{self.llm_models['ollama'].base_url}/api/models/{model}" - ollama_response = requests.get(url) - if ollama_response.status_code == 200: - model_info = ollama_response.json() - self.models_cache[model] = model_info - logger.debug(f"Fetched Ollama model info: {model_info}") - return model_info - else: - logger.error(f"Failed to fetch Ollama model info: {ollama_response.status_code}, Response: {ollama_response.text}") - - # Fetch from Groq - groq_api_key = settings.GROQ_API_KEY - if groq_api_key: - logger.info(f"Fetching model info from Groq for model: {model}") - url = f"" - groq_response = requests.get( - url, - headers={ - "Authorization": f"Bearer {groq_api_key}", - "Content-Type": "application/json" - } - ) - if groq_response.status_code == 200: - model_info = groq_response.json() - self.models_cache[model] = model_info - logger.debug(f"Fetched Groq model info: {model_info}") - return model_info - else: - logger.error(f"Failed to fetch Groq model info: {groq_response.status_code}, Response: {groq_response.text}") - except Exception as e: - logger.error(f"Error fetching model info for {model}: {str(e)}") - return {"error": f"Model {model} not found or failed to retrieve info."} - - logger.error(f"Model {model} not found in any provider.") - return {"error": f"Model {model} not found in any provider."} - -llm_manager = LLMManager() -``` - -## `tests\test_gravrag.py` - -``` - -import pytest -from fastapi.testclient import TestClient -from app.main import app - -client = TestClient(app) - -@pytest.fixture -def mock_memory_manager(mocker): - return mocker.patch('app.services.gravrag.MemoryManager') - -def test_create_memory(mock_memory_manager): - response = client.post("/gravrag/create_memory", json={"content": "Test memory", "metadata": {"key": "value"}}) - assert response.status_code == 200 - assert response.json() == {"message": "Memory created successfully"} - -def test_recall_memory(mock_memory_manager): - mock_memory_manager.return_value.recall_memory.return_value = [{"content": "Test memory"}] - response = client.post("/gravrag/recall_memory", json={"query": "Test query", "top_k": 5}) - assert response.status_code == 200 - assert "memories" in response.json() - -def test_prune_memories(mock_memory_manager): - response = client.post("/gravrag/prune_memories", json={"gravity_threshold": 0.5}) - assert response.status_code == 200 - assert response.json() == {"message": "Memory pruning completed successfully"} -``` - -## `tests\test_neural_resources.py` - -``` - -import pytest -from fastapi.testclient import TestClient -from app.main import app - -client = TestClient(app) - -@pytest.fixture -def mock_llm_manager(mocker): - return mocker.patch('app.services.neural_resources.LLMManager') - -def test_route_query(mock_llm_manager): - mock_llm_manager.return_value.route_query.return_value = {"response": "Test response"} - response = client.post("/neural_resources/route_query", json={"content": "Test query", "role": "user"}) - assert response.status_code == 200 - assert "response" in response.json() - -def test_set_api_key(mock_llm_manager): - response = client.post("/neural_resources/set_api_key", json={"provider": "test_provider", "api_key": "test_key"}) - assert response.status_code == 200 - assert response.json() == {"message": "API key updated for test_provider"} - -def test_get_available_models(mock_llm_manager): - mock_llm_manager.return_value.get_available_models.return_value = ["model1", "model2"] - response = client.get("/neural_resources/available_models") - assert response.status_code == 200 - assert "available_models" in response.json() - -def test_get_model_info(mock_llm_manager): - mock_llm_manager.return_value.get_model_info.return_value = {"model": "test_model", "info": "test_info"} - response = client.get("/neural_resources/model_info/test_model") - assert response.status_code == 200 - assert "model" in response.json() -``` - diff --git a/backend/cogenesis-backend/app/api/gravrag.py b/backend/cogenesis-backend/app/api/gravrag.py index fc94cf5..5b26b1d 100644 --- a/backend/cogenesis-backend/app/api/gravrag.py +++ b/backend/cogenesis-backend/app/api/gravrag.py @@ -1,33 +1,34 @@ -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter, HTTPException, Depends from pydantic import BaseModel from typing import Dict, Any, Optional import logging -from app.models.gravrag import MemoryManager + +from app.models.gravrag import MemoryRequest, RecallRequest, PruneRequest, RecallWithMetadataRequest, DeleteByMetadataRequest +from app.services.gravrag import memory_manager +from app.core.security import get_current_user router = APIRouter() logger = logging.getLogger(__name__) -memory_manager = MemoryManager() -class MemoryRequest(BaseModel): - content: str - metadata: Optional[Dict[str, Any]] = None +class CreateMemoryResponse(BaseModel): + message: str + +class RecallMemoryResponse(BaseModel): + memories: Optional[list] -class RecallRequest(BaseModel): - query: str - top_k: Optional[int] = 5 +class PruneMemoriesResponse(BaseModel): + message: str -class PruneRequest(BaseModel): - gravity_threshold: Optional[float] = 1e-5 +class PurgeMemoriesResponse(BaseModel): + message: str -class RecallWithMetadataRequest(BaseModel): - query: str - metadata: Dict[str, Any] - top_k: Optional[int] = 10 +class RecallWithMetadataResponse(BaseModel): + memories: Optional[list] -class DeleteByMetadataRequest(BaseModel): - metadata: Dict[str, Any] +class DeleteByMetadataResponse(BaseModel): + message: str -@router.post("/create_memory") +@router.post("/create_memory", response_model=CreateMemoryResponse, dependencies=[Depends(get_current_user)]) async def create_memory(memory_request: MemoryRequest): if not memory_request.content.strip(): logger.warning("Memory creation failed: Empty content.") @@ -42,7 +43,7 @@ async def create_memory(memory_request: MemoryRequest): logger.error(f"Error during memory creation: {str(e)}", exc_info=True) raise HTTPException(status_code=500, detail=f"Error creating memory: {str(e)}") -@router.post("/recall_memory") +@router.post("/recall_memory", response_model=RecallMemoryResponse, dependencies=[Depends(get_current_user)]) async def recall_memory(recall_request: RecallRequest): if not recall_request.query.strip(): logger.warning("Memory recall failed: Empty query.") @@ -52,13 +53,13 @@ async def recall_memory(recall_request: RecallRequest): logger.info(f"Recalling memories for query: '{recall_request.query}' with top_k={recall_request.top_k}") memories = await memory_manager.recall_memory(query_content=recall_request.query, top_k=recall_request.top_k) if not memories: - return {"message": "No relevant memories found"} + return {"memories": []} return {"memories": memories} except Exception as e: logger.error(f"Error during memory recall: {str(e)}", exc_info=True) raise HTTPException(status_code=500, detail=f"Error recalling memories: {str(e)}") -@router.post("/prune_memories") +@router.post("/prune_memories", response_model=PruneMemoriesResponse, dependencies=[Depends(get_current_user)]) async def prune_memories(prune_request: PruneRequest): try: await memory_manager.prune_memories() @@ -67,7 +68,7 @@ async def prune_memories(prune_request: PruneRequest): logger.error(f"Error during memory pruning: {str(e)}", exc_info=True) raise HTTPException(status_code=500, detail=f"Error pruning memories: {str(e)}") -@router.post("/purge_memories") +@router.post("/purge_memories", response_model=PurgeMemoriesResponse, dependencies=[Depends(get_current_user)]) async def purge_memories(): try: await memory_manager.purge_all_memories() @@ -76,7 +77,7 @@ async def purge_memories(): logger.error(f"Error purging memories: {str(e)}", exc_info=True) raise HTTPException(status_code=500, detail=f"Error purging memories: {str(e)}") -@router.post("/recall_with_metadata") +@router.post("/recall_with_metadata", response_model=RecallWithMetadataResponse, dependencies=[Depends(get_current_user)]) async def recall_with_metadata(recall_request: RecallWithMetadataRequest): """ Recall memories that match query content and metadata criteria. @@ -94,14 +95,14 @@ async def recall_with_metadata(recall_request: RecallWithMetadataRequest): memories = await memory_manager.recall_memory_with_metadata(query_content=query, search_metadata=metadata, top_k=top_k) if not memories or "memories" not in memories: - return {"message": "No matching memories found"} + return {"memories": []} return memories except Exception as e: logger.error(f"Error during metadata recall: {str(e)}", exc_info=True) raise HTTPException(status_code=500, detail=f"Error recalling memories: {str(e)}") -@router.post("/delete_by_metadata") +@router.post("/delete_by_metadata", response_model=DeleteByMetadataResponse, dependencies=[Depends(get_current_user)]) async def delete_by_metadata(delete_request: DeleteByMetadataRequest): try: logger.info(f"Deleting memories with metadata: {delete_request.metadata}") diff --git a/backend/cogenesis-backend/app/core/config.py b/backend/cogenesis-backend/app/core/config.py index 82d49ac..c20bca8 100644 --- a/backend/cogenesis-backend/app/core/config.py +++ b/backend/cogenesis-backend/app/core/config.py @@ -1,4 +1,3 @@ - from pydantic import BaseSettings class Settings(BaseSettings): @@ -14,4 +13,4 @@ class Settings(BaseSettings): class Config: env_file = ".env" -settings = Settings() \ No newline at end of file +settings = Settings() diff --git a/backend/cogenesis-backend/app/core/security.py b/backend/cogenesis-backend/app/core/security.py index 1741fb8..28e95d6 100644 --- a/backend/cogenesis-backend/app/core/security.py +++ b/backend/cogenesis-backend/app/core/security.py @@ -1,4 +1,3 @@ - from datetime import datetime, timedelta from typing import Optional from jose import JWTError, jwt @@ -19,13 +18,13 @@ class Token(BaseModel): class TokenData(BaseModel): username: Optional[str] = None -def verify_password(plain_password, hashed_password): +def verify_password(plain_password: str, hashed_password: str) -> bool: return pwd_context.verify(plain_password, hashed_password) -def get_password_hash(password): +def get_password_hash(password: str) -> str: return pwd_context.hash(password) -def create_access_token(data: dict, expires_delta: Optional[timedelta] = None): +def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str: to_encode = data.copy() if expires_delta: expire = datetime.utcnow() + expires_delta @@ -35,7 +34,7 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None): encoded_jwt = jwt.encode(to_encode, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM) return encoded_jwt -async def get_current_user(token: str = Depends(oauth2_scheme)): +async def get_current_user(token: str = Depends(oauth2_scheme)) -> TokenData: credentials_exception = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials", @@ -49,4 +48,4 @@ async def get_current_user(token: str = Depends(oauth2_scheme)): token_data = TokenData(username=username) except JWTError: raise credentials_exception - return token_data \ No newline at end of file + return token_data diff --git a/backend/cogenesis-backend/app/main.py b/backend/cogenesis-backend/app/main.py index a64ead4..4278a45 100644 --- a/backend/cogenesis-backend/app/main.py +++ b/backend/cogenesis-backend/app/main.py @@ -1,16 +1,16 @@ - from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from prometheus_client import make_asgi_app -from app.api import gravrag, neural_resources +from app.api.gravrag import router as gravrag_router +from app.core.config import settings app = FastAPI(title="Cogenesis Backend API") # CORS middleware app.add_middleware( CORSMiddleware, - allow_origins=["*"], # Allows all origins + allow_origins=["*"], # Allows all origins; adjust as needed for production allow_credentials=True, allow_methods=["*"], # Allows all methods allow_headers=["*"], # Allows all headers @@ -21,7 +21,7 @@ app.mount("/metrics", metrics_app) # Include routers -app.include_router(gravrag.router, prefix="/gravrag", tags=["GravRag"]) +app.include_router(gravrag_router, prefix="/gravrag", tags=["GravRag"]) @app.get("/") async def root(): @@ -33,4 +33,4 @@ async def health_check(): if __name__ == "__main__": import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/backend/cogenesis-backend/app/models/gravrag.py b/backend/cogenesis-backend/app/models/gravrag.py index 53b9218..712ef63 100644 --- a/backend/cogenesis-backend/app/models/gravrag.py +++ b/backend/cogenesis-backend/app/models/gravrag.py @@ -1,4 +1,3 @@ - from pydantic import BaseModel from typing import List, Dict, Any, Optional @@ -15,4 +14,12 @@ class RecallRequest(BaseModel): top_k: Optional[int] = 5 class PruneRequest(BaseModel): - gravity_threshold: Optional[float] = 1e-5 \ No newline at end of file + gravity_threshold: Optional[float] = 1e-5 + +class RecallWithMetadataRequest(BaseModel): + query: str + metadata: Dict[str, Any] + top_k: Optional[int] = 10 + +class DeleteByMetadataRequest(BaseModel): + metadata: Dict[str, Any] diff --git a/backend/cogenesis-backend/app/services/gravrag.py b/backend/cogenesis-backend/app/services/gravrag.py index ef2d4c1..5d25791 100644 --- a/backend/cogenesis-backend/app/services/gravrag.py +++ b/backend/cogenesis-backend/app/services/gravrag.py @@ -1,4 +1,3 @@ - from app.core.config import settings from app.models.gravrag import MemoryPacket import time @@ -97,6 +96,9 @@ def calculate_memetic_similarity(self) -> float: @staticmethod def calculate_cosine_similarity(vector_a: List[float], vector_b: List[float]) -> float: """ Calculate cosine similarity between two vectors. """ + if not vector_a or not vector_b: + return 0.0 + dot_product = sum(a * b for a, b in zip(vector_a, vector_b)) magnitude_a = math.sqrt(sum(a ** 2 for a in vector_a)) magnitude_b = math.sqrt(sum(b ** 2 for b in vector_b)) @@ -130,9 +132,8 @@ def from_payload(payload: Dict[str, Any]): return MemoryPacket(vector=vector, content=content, metadata=metadata) - class MemoryManager: - def __init__(self, qdrant_host="localhost", qdrant_port=6333, collection_name="Mind"): + def __init__(self, qdrant_host: str = settings.QDRANT_HOST, qdrant_port: int = settings.QDRANT_PORT, collection_name: str = "Mind"): self.qdrant_client = QdrantClient(host=qdrant_host, port=qdrant_port) self.collection_name = collection_name self.model = SentenceTransformer('all-MiniLM-L6-v2') # Semantic vector model @@ -167,7 +168,7 @@ async def create_memory(self, content: str, metadata: Dict[str, Any]): ) logger.info(f"Memory created successfully with ID: {point_id}") - async def recall_memory(self, query_content: str, top_k: int = 5): + async def recall_memory(self, query_content: str, top_k: int = 5) -> List[Dict[str, Any]]: """ Recall a memory based on query content and return the original content along with metadata. """ query_vector = self.model.encode(query_content).tolist() @@ -204,15 +205,18 @@ async def prune_memories(self): """ Prune low relevance memories based on their gravitational pull and spacetime coordinates. """ - total_points = self.qdrant_client.count(self.collection_name).count - if total_points > 1000000: # Arbitrary limit - points = self.qdrant_client.scroll(self.collection_name, limit=1000) + total_points = self.qdrant_client.count(collection_name=self.collection_name).count + if total_points > 1000000: # Arbitrary limit; adjust as needed + points = self.qdrant_client.scroll(collection_name=self.collection_name, limit=1000) low_relevance_points = [ p.id for p in points if p.payload['metadata']['gravitational_pull'] < GRAVITATIONAL_THRESHOLD ] if low_relevance_points: - self.qdrant_client.delete(self.collection_name, points_selector=low_relevance_points) - + self.qdrant_client.delete(collection_name=self.collection_name, points_selector=low_relevance_points) + logger.info(f"Pruned {len(low_relevance_points)} low-relevance memories.") + else: + logger.info("No low-relevance memories found to prune.") + async def purge_all_memories(self): """ Deletes all memories from the Qdrant collection. @@ -228,7 +232,7 @@ async def purge_all_memories(self): logger.error(f"Error purging all memories: {str(e)}") raise e - async def recall_memory_with_metadata(self, query_content: str, search_metadata: Dict[str, Any], top_k: int = 10): + async def recall_memory_with_metadata(self, query_content: str, search_metadata: Dict[str, Any], top_k: int = 10) -> Dict[str, Any]: """ Recall memories based on query content, and further filter by matching metadata. """ @@ -257,7 +261,7 @@ async def recall_memory_with_metadata(self, query_content: str, search_metadata: }) if not matching_memories: - return {"message": "No matching memories found"} + return {"memories": []} return {"memories": matching_memories} @@ -265,35 +269,30 @@ async def recall_memory_with_metadata(self, query_content: str, search_metadata: logger.error(f"Error recalling memories by metadata: {str(e)}") raise e - async def delete_memories_by_metadata(self, metadata: Dict[str, Any]): """ Delete memories where the metadata matches the given metadata criteria. """ try: # Scroll through all memories in the collection - scroll_result = self.qdrant_client.scroll(self.collection_name, limit=1000) + scroll_result = self.qdrant_client.scroll(collection_name=self.collection_name, limit=1000) + + # Check if result is a list of points + points = scroll_result if isinstance(scroll_result, list) else [] - # Check if result is a list of points, otherwise handle it as a tuple - if isinstance(scroll_result, tuple): - points = scroll_result[0] - else: - points = scroll_result - # List to store the IDs of memories to be deleted memories_to_delete = [] for point in points: - if isinstance(point, dict) and 'payload' in point: - point_metadata = point['payload']['metadata'] - - # Check if the point's metadata matches the provided metadata - if all(point_metadata.get(key) == value for key, value in metadata.items()): - memories_to_delete.append(point["id"]) + point_metadata = point.payload.get('metadata', {}) + + # Check if the point's metadata matches the provided metadata + if all(point_metadata.get(key) == value for key, value in metadata.items()): + memories_to_delete.append(point.id) # Delete the memories that match the metadata criteria if memories_to_delete: - self.qdrant_client.delete(self.collection_name, points_selector=memories_to_delete) + self.qdrant_client.delete(collection_name=self.collection_name, points_selector=memories_to_delete) logger.info(f"Deleted {len(memories_to_delete)} memories matching the metadata.") else: logger.info("No memories found matching the specified metadata.") @@ -301,4 +300,5 @@ async def delete_memories_by_metadata(self, metadata: Dict[str, Any]): logger.error(f"Error deleting memories by metadata: {str(e)}") raise e -memory_manager = MemoryManager() \ No newline at end of file +# Initialize the MemoryManager instance +memory_manager = MemoryManager() diff --git a/backend/cogenesis-backend/docker-compose.yml b/backend/cogenesis-backend/docker-compose.yml index 573e470..aaac67c 100644 --- a/backend/cogenesis-backend/docker-compose.yml +++ b/backend/cogenesis-backend/docker-compose.yml @@ -1,4 +1,3 @@ - version: '3.9.11' services: @@ -23,4 +22,4 @@ services: - qdrant_data:/qdrant/storage volumes: - qdrant_data: \ No newline at end of file + qdrant_data: diff --git a/backend/cogenesis-backend/makeReadMe.py b/backend/cogenesis-backend/makeReadMe.py index fb52271..d73613e 100644 --- a/backend/cogenesis-backend/makeReadMe.py +++ b/backend/cogenesis-backend/makeReadMe.py @@ -1,5 +1,9 @@ import os import sys +import re +import logging + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') def generate_tree(startpath): tree = [] @@ -15,22 +19,76 @@ def generate_tree(startpath): def read_file_content(file_path): try: with open(file_path, 'r', encoding='utf-8') as file: - return file.read() + content = file.read() + logging.info(f"Successfully read file: {file_path}") + return content except Exception as e: + logging.error(f"Error reading file {file_path}: {str(e)}") return f"Error reading file: {str(e)}" +def extract_function_names(content): + pattern = r'(async\s+)?def\s+(\w+)|class\s+(\w+)' + matches = re.findall(pattern, content) + return [match[1] or match[2] for match in matches if match[1] or match[2]] + +def generate_implementation_prompt(file_path, content): + functions = extract_function_names(content) + relative_path = os.path.relpath(file_path, start=os.getcwd()) + prompt = f"Implement the following functions/classes for the file '{relative_path}':\n\n" + for func in functions: + prompt += f"- {func}\n" + prompt += "\nEnsure that the implementation follows best practices and integrates with the existing project structure." + return prompt + +def get_file_language(file_extension): + language_map = { + '.js': 'javascript', + '.ts': 'typescript', + '.jsx': 'jsx', + '.tsx': 'tsx', + '.py': 'python', + '.md': 'markdown', + '.yml': 'yaml', + '.env': 'plaintext' + } + return language_map.get(file_extension.lower(), 'plaintext') + def generate_readme(folder_path): - readme_content = f"# Project Structure\n\n```\n{generate_tree(folder_path)}\n```\n\n" - readme_content += "# File Contents\n\n" + project_name = os.path.basename(os.path.dirname(folder_path)) + readme_content = f"# {project_name}\n\n" + readme_content += "## Project Structure\n\n```\n" + readme_content += generate_tree(folder_path) + readme_content += "\n```\n\n" + readme_content += "## File Contents and Implementation Guidelines\n\n" for root, dirs, files in os.walk(folder_path): for file in files: - file_path = os.path.join(root, file) - relative_path = os.path.relpath(file_path, folder_path) - readme_content += f"## `{relative_path}`\n\n" - readme_content += "```\n" - readme_content += read_file_content(file_path) - readme_content += "\n```\n\n" + file_extension = os.path.splitext(file)[1] + if file_extension in ['.js', '.ts', '.jsx', '.tsx', '.py', '.env', '.md', '.yml']: + file_path = os.path.join(root, file) + relative_path = os.path.relpath(file_path, folder_path) + content = read_file_content(file_path) + + readme_content += f"### `{relative_path}`\n\n" + readme_content += "#### File Content:\n" + language = get_file_language(file_extension) + readme_content += f"```{language}\n" + readme_content += content + readme_content += "\n```\n\n" + + readme_content += "#### Implementation Guidelines:\n" + readme_content += "- Purpose: [Briefly describe the purpose of this file]\n" + readme_content += "- Key Components/Functions:\n" + for func in extract_function_names(content): + readme_content += f" - `{func}`: [Describe the purpose and expected behavior]\n" + readme_content += "- Integration Points: [Describe how this file integrates with other parts of the system]\n" + readme_content += "- Data Flow: [Explain the data flow in and out of this file]\n" + readme_content += "- Error Handling: [Describe any specific error handling requirements]\n\n" + + readme_content += "#### Implementation Prompt:\n" + readme_content += "```\n" + readme_content += generate_implementation_prompt(file_path, content) + readme_content += "\n```\n\n" return readme_content @@ -41,16 +99,19 @@ def main(): folder_path = os.getcwd() if not os.path.isdir(folder_path): - print(f"Error: {folder_path} is not a valid directory") + logging.error(f"Error: {folder_path} is not a valid directory") sys.exit(1) + logging.info(f"Generating documentation for: {folder_path}") readme_content = generate_readme(folder_path) - readme_path = os.path.join(folder_path, "README.md") - with open(readme_path, 'w', encoding='utf-8') as readme_file: - readme_file.write(readme_content) - - print(f"README.md has been generated at {readme_path}") + readme_path = os.path.join(folder_path, "PROJECT_DOCUMENTATION.md") + try: + with open(readme_path, 'w', encoding='utf-8') as readme_file: + readme_file.write(readme_content) + logging.info(f"PROJECT_DOCUMENTATION.md has been generated at {readme_path}") + except Exception as e: + logging.error(f"Error writing documentation file: {str(e)}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/backend/cogenesis-backend/requirements.txt b/backend/cogenesis-backend/requirements.txt index d135c0f..7b3efda 100644 --- a/backend/cogenesis-backend/requirements.txt +++ b/backend/cogenesis-backend/requirements.txt @@ -1,15 +1,12 @@ - -fastapi==0.68.0 -uvicorn==0.15.0 -pydantic==2.0.0 -python-dotenv==0.19.0 -qdrant-client==0.11.0 -sentence-transformers==2.1.0 -anthropic==0.2.8 -openai==0.27.0 -groq==0.11.0 -requests==2.26.0 +fastapi==0.95.1 +uvicorn==0.22.0 +pydantic==1.10.7 +qdrant-client==1.3.0 +sentence-transformers==2.4.1 +scikit-learn==1.2.2 python-jose==3.3.0 -passlib==1.7.4 -bcrypt==3.2.0 -prometheus-client==0.11.0 \ No newline at end of file +passlib[bcrypt]==1.7.4 +prometheus-client==0.17.0 +pytest==7.2.0 +pytest-asyncio==0.21.0 +httpx==0.24.0 diff --git a/backend/cogenesis-backend/tests/test_gravrag.py b/backend/cogenesis-backend/tests/test_gravrag.py index 0e8948e..e338cd0 100644 --- a/backend/cogenesis-backend/tests/test_gravrag.py +++ b/backend/cogenesis-backend/tests/test_gravrag.py @@ -1,5 +1,6 @@ import pytest from fastapi.testclient import TestClient +from unittest.mock import AsyncMock, patch from app.main import app import logging @@ -10,13 +11,14 @@ client = TestClient(app) @pytest.fixture -def mock_memory_manager(mocker): - return mocker.patch('app.services.gravrag.MemoryManager') +def mock_memory_manager(): + with patch('app.services.gravrag.memory_manager') as mock: + yield mock # Test for creating a memory def test_create_memory(mock_memory_manager): - mock_memory_manager.return_value.create_memory.return_value = None - + mock_memory_manager.create_memory = AsyncMock() + payload = { "content": "This is a test memory", "metadata": { @@ -30,10 +32,11 @@ def test_create_memory(mock_memory_manager): logger.info(f"Create Memory Response: {response.status_code}") assert response.status_code == 200 assert response.json() == {"message": "Memory created successfully"} + mock_memory_manager.create_memory.assert_awaited_once_with(content=payload["content"], metadata=payload["metadata"]) # Test for creating another memory def test_create_memory_2(mock_memory_manager): - mock_memory_manager.return_value.create_memory.return_value = None + mock_memory_manager.create_memory = AsyncMock() payload = { "content": "This is another test memory", @@ -48,6 +51,7 @@ def test_create_memory_2(mock_memory_manager): logger.info(f"Create Memory 2 Response: {response.status_code}") assert response.status_code == 200 assert response.json() == {"message": "Memory created successfully"} + mock_memory_manager.create_memory.assert_awaited_once_with(content=payload["content"], metadata=payload["metadata"]) # Test for invalid memory creation def test_create_invalid_memory(mock_memory_manager): @@ -60,7 +64,7 @@ def test_create_invalid_memory(mock_memory_manager): # Test for recalling memory def test_recall_memory(mock_memory_manager): - mock_memory_manager.return_value.recall_memory.return_value = [ + mock_memories = [ { "content": "This is a test memory", "metadata": { @@ -74,38 +78,44 @@ def test_recall_memory(mock_memory_manager): } } ] + mock_memory_manager.recall_memory = AsyncMock(return_value=mock_memories) payload = {"query": "test memory", "top_k": 3} response = client.post("/gravrag/recall_memory", json=payload) logger.info(f"Recall Memory Response: {response.status_code}") assert response.status_code == 200 - assert "memories" in response.json() + assert response.json() == {"memories": mock_memories} + mock_memory_manager.recall_memory.assert_awaited_once_with(query_content=payload["query"], top_k=payload["top_k"]) # Test for memory pruning def test_prune_memories(mock_memory_manager): - mock_memory_manager.return_value.prune_memories.return_value = None + mock_memory_manager.prune_memories = AsyncMock() response = client.post("/gravrag/prune_memories", json={}) logger.info(f"Prune Memories Response: {response.status_code}") assert response.status_code == 200 assert response.json() == {"message": "Memory pruning completed successfully"} + mock_memory_manager.prune_memories.assert_awaited_once() # Test for memory recall using metadata def test_recall_memory_with_metadata(mock_memory_manager): - mock_memory_manager.return_value.recall_memory.return_value = [ - { - "content": "This is a test memory", - "metadata": { - "objective_id": "obj_123", - "task_id": "task_123", - "tags": ["test", "example"], - "gravitational_pull": 1.0, - "memetic_similarity": 1.0, - "semantic_relativity": 1.0, - "timestamp": 1728026867 + mock_memories = { + "memories": [ + { + "content": "This is a test memory", + "metadata": { + "objective_id": "obj_123", + "task_id": "task_123", + "tags": ["test", "example"], + "gravitational_pull": 1.0, + "memetic_similarity": 1.0, + "semantic_relativity": 1.0, + "timestamp": 1728026867 + } } - } - ] + ] + } + mock_memory_manager.recall_memory_with_metadata = AsyncMock(return_value=mock_memories) payload = { "query": "test memory", @@ -115,23 +125,30 @@ def test_recall_memory_with_metadata(mock_memory_manager): response = client.post("/gravrag/recall_with_metadata", json=payload) logger.info(f"Recall with Metadata Response: {response.status_code}") assert response.status_code == 200 - assert "memories" in response.json() + assert response.json() == mock_memories + mock_memory_manager.recall_memory_with_metadata.assert_awaited_once_with( + query_content=payload["query"], + search_metadata=payload["metadata"], + top_k=payload["top_k"] + ) # Test for deleting memory by metadata def test_delete_by_metadata(mock_memory_manager): - mock_memory_manager.return_value.delete_memories_by_metadata.return_value = None + mock_memory_manager.delete_memories_by_metadata = AsyncMock() payload = {"metadata": {"objective_id": "obj_123", "task_id": "task_123"}} response = client.post("/gravrag/delete_by_metadata", json=payload) logger.info(f"Delete by Metadata Response: {response.status_code}") assert response.status_code == 200 assert response.json() == {"message": "Memory deletion by metadata completed successfully"} + mock_memory_manager.delete_memories_by_metadata.assert_awaited_once_with(metadata=payload["metadata"]) # Test for purging all memories def test_purge_memories(mock_memory_manager): - mock_memory_manager.return_value.purge_all_memories.return_value = None + mock_memory_manager.purge_all_memories = AsyncMock() response = client.post("/gravrag/purge_memories") logger.info(f"Purge Memories Response: {response.status_code}") assert response.status_code == 200 assert response.json() == {"message": "All memories have been purged successfully"} + mock_memory_manager.purge_all_memories.assert_awaited_once() diff --git a/frontend/src/app/api/agentmanagement/PROJECT_DOCUMENTATION.md b/frontend/src/app/api/agentmanagement/PROJECT_DOCUMENTATION.md new file mode 100644 index 0000000..0918f1e --- /dev/null +++ b/frontend/src/app/api/agentmanagement/PROJECT_DOCUMENTATION.md @@ -0,0 +1,1697 @@ +# Next.js 14 API and Agent Management System + +## Project Structure + +``` +agentmanagement/ +├── agentfile.py +├── makeReadMe.py +├── admin/ +│ ├── analytics/ +│ │ ├── task-completion/ +│ │ │ ├── route.js +│ │ ├── user-activity/ +│ │ │ ├── route.js +│ ├── overview/ +│ │ ├── route.js +│ ├── promote/ +│ │ ├── route.js +│ ├── task-status/ +│ │ ├── route.js +│ ├── tasks/ +│ │ ├── assign/ +│ │ │ ├── route.js +│ │ ├── [id]/ +│ │ │ ├── route.js +│ ├── user-activity/ +│ │ ├── route.js +│ ├── users/ +│ │ ├── route.js +├── app/ +│ ├── api/ +├── comments/ +│ ├── route.js +│ ├── [taskId]/ +│ │ ├── route.js +├── processor/ +│ ├── route.js +├── projects/ +│ ├── route.js +│ ├── [id]/ +│ │ ├── route.js +├── submissions/ +│ ├── route.js +├── tasks/ +│ ├── route.js +│ ├── assign/ +│ │ ├── route.js +│ ├── [id]/ +│ │ ├── route.js +├── upload/ +│ ├── route.js +├── users/ +│ ├── route.js +│ ├── login/ +│ │ ├── route.js +│ ├── me/ +│ │ ├── route.js +│ ├── register/ +│ │ ├── route.js +│ ├── [id]/ +│ │ ├── route.js +``` + +## File Contents and Implementation Guidelines + +### `admin\analytics\task-completion\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/analytics/task-completion - GET handler + * @description Handles GET requests for /api/admin/analytics/task-completion + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/admin/analytics/task-completion + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/admin/analytics/task-completion successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/analytics/task-completion GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\analytics\task-completion\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\analytics\user-activity\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/analytics/user-activity - GET handler + * @description Handles GET requests for /api/admin/analytics/user-activity + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/admin/analytics/user-activity + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/admin/analytics/user-activity successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/analytics/user-activity GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\analytics\user-activity\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\overview\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/overview - GET handler + * @description Handles GET requests for /api/admin/overview + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/admin/overview + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/admin/overview successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/overview GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\overview\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\promote\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/promote - POST handler + * @description Handles POST requests for /api/admin/promote + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/admin/promote + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/admin/promote successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/promote POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\promote\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\task-status\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/task-status - GET handler + * @description Handles GET requests for /api/admin/task-status + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/admin/task-status + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/admin/task-status successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/task-status GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\task-status\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\tasks\assign\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/tasks/assign - POST handler + * @description Handles POST requests for /api/admin/tasks/assign + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/admin/tasks/assign + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/admin/tasks/assign successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/tasks/assign POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\tasks\assign\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\tasks\[id]\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/tasks/[id] - PUT handler + * @description Handles PUT requests for /api/admin/tasks/[id] + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles PUT requests for /api/admin/tasks/[id] + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function put(request) { + try { + // Your put logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'PUT request to /api/admin/tasks/[id] successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/tasks/[id] PUT handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `put`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\tasks\[id]\route.js': + +- put +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\user-activity\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/user-activity - GET handler + * @description Handles GET requests for /api/admin/user-activity + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/admin/user-activity + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/admin/user-activity successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/user-activity GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\user-activity\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `admin\users\route.js` + +#### File Content: +```javascript +/** + * @file /api/admin/users - GET handler + * @description Handles GET requests for /api/admin/users + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/admin/users + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/admin/users successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/admin/users GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\admin\users\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `comments\route.js` + +#### File Content: +```javascript +/** + * @file /api/comments - POST handler + * @description Handles POST requests for /api/comments + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/comments + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/comments successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/comments POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\comments\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `comments\[taskId]\route.js` + +#### File Content: +```javascript +/** + * @file /api/comments/[taskId] - GET handler + * @description Handles GET requests for /api/comments/[taskId] + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/comments/[taskId] + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/comments/[taskId] successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/comments/[taskId] GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\comments\[taskId]\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `processor\route.js` + +#### File Content: +```javascript +/** + * @file /api/processor - POST handler + * @description Handles POST requests for /api/processor + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/processor + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/processor successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/processor POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\processor\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `projects\route.js` + +#### File Content: +```javascript +/** + * @file /api/projects - GET handler + * @description Handles GET requests for /api/projects + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/projects + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/projects successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/projects GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\projects\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `projects\[id]\route.js` + +#### File Content: +```javascript +/** + * @file /api/projects/[id] - DELETE handler + * @description Handles DELETE requests for /api/projects/[id] +*/` + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles DELETE requests for /api/projects/[id] + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function delete(request) { + try { + // Your delete logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'DELETE request to /api/projects/[id] successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/projects/[id] DELETE handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `delete`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\projects\[id]\route.js': + +- delete +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `submissions\route.js` + +#### File Content: +```javascript +/** + * @file /api/submissions - GET handler + * @description Handles GET requests for /api/submissions + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/submissions + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/submissions successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/submissions GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\submissions\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `tasks\route.js` + +#### File Content: +```javascript +/** + * @file /api/tasks - GET handler + * @description Handles GET requests for /api/tasks + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/tasks + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/tasks successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/tasks GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\tasks\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `tasks\assign\route.js` + +#### File Content: +```javascript +/** + * @file /api/tasks/assign - POST handler + * @description Handles POST requests for /api/tasks/assign + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/tasks/assign + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/tasks/assign successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/tasks/assign POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\tasks\assign\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `tasks\[id]\route.js` + +#### File Content: +```javascript +/** + * @file /api/tasks/[id] - DELETE handler + * @description Handles DELETE requests for /api/tasks/[id] + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles DELETE requests for /api/tasks/[id] + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function delete(request) { + try { + // Your delete logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'DELETE request to /api/tasks/[id] successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/tasks/[id] DELETE handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `delete`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\tasks\[id]\route.js': + +- delete +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `upload\route.js` + +#### File Content: +```javascript +/** + * @file /api/upload - POST handler + * @description Handles POST requests for /api/upload + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/upload + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/upload successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/upload POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\upload\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `users\route.js` + +#### File Content: +```javascript +/** + * @file /api/users - GET handler + * @description Handles GET requests for /api/users + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/users + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/users successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/users GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\users\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `users\login\route.js` + +#### File Content: +```javascript +/** + * @file /api/users/login - POST handler + * @description Handles POST requests for /api/users/login + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/users/login + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/users/login successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/users/login POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\users\login\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `users\me\route.js` + +#### File Content: +```javascript +/** + * @file /api/users/me - GET handler + * @description Handles GET requests for /api/users/me + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles GET requests for /api/users/me + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function get(request) { + try { + // Your get logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'GET request to /api/users/me successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/users/me GET handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `get`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\users\me\route.js': + +- get +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `users\register\route.js` + +#### File Content: +```javascript +/** + * @file /api/users/register - POST handler + * @description Handles POST requests for /api/users/register + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles POST requests for /api/users/register + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function post(request) { + try { + // Your post logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'POST request to /api/users/register successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/users/register POST handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `post`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\users\register\route.js': + +- post +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + +### `users\[id]\route.js` + +#### File Content: +```javascript +/** + * @file /api/users/[id] - DELETE handler + * @description Handles DELETE requests for /api/users/[id] + */ + +import { NextResponse } from 'next/server'; + +// You can import necessary utilities or services here +// import { someUtilityFunction } from '@/utils/someUtility'; +// import SomeService from '@/services/SomeService'; + +/** + * Handles DELETE requests for /api/users/[id] + * @param {NextRequest} request - The incoming request object + * @returns {Promise} The response object + */ +export async function delete(request) { + try { + // Your delete logic here + // const someData = await SomeService.getData(); + + return NextResponse.json( + { message: 'DELETE request to /api/users/[id] successful' }, + { status: 200 } + ); + } catch (error) { + console.error('Error in /api/users/[id] DELETE handler:', error); + return NextResponse.json( + { error: 'An internal server error occurred' }, + { status: 500 } + ); + } +} + +// Export other HTTP methods as needed +// export async function post(request) { ... } +// export async function put(request) { ... } +// export async function delete(request) { ... } + +``` + +#### Implementation Guidelines: +- Purpose: [Briefly describe the purpose of this file] +- Key Components/Functions: + - `delete`: [Describe the purpose and expected behavior] + - `post`: [Describe the purpose and expected behavior] + - `put`: [Describe the purpose and expected behavior] + - `delete`: [Describe the purpose and expected behavior] +- Integration Points: [Describe how this file integrates with other parts of the system] +- Data Flow: [Explain the data flow in and out of this file] +- Error Handling: [Describe any specific error handling requirements] + +#### Implementation Prompt: +``` +Implement the following functions for the file 'src\app\api\agentmanagement\users\[id]\route.js': + +- delete +- post +- put +- delete + +Ensure that the implementation follows Next.js 14 best practices and integrates with the existing project structure. +``` + diff --git a/frontend/src/app/api/agentmanagement/makeReadMe.py b/frontend/src/app/api/agentmanagement/makeReadMe.py index e2f0198..c997587 100644 --- a/frontend/src/app/api/agentmanagement/makeReadMe.py +++ b/frontend/src/app/api/agentmanagement/makeReadMe.py @@ -1,5 +1,6 @@ import os import sys +import re def generate_tree(startpath): tree = [] @@ -19,18 +20,52 @@ def read_file_content(file_path): except Exception as e: return f"Error reading file: {str(e)}" +def extract_function_names(content): + pattern = r'(async\s+)?function\s+(\w+)|const\s+(\w+)\s*=\s*(async\s*)?\(' + matches = re.findall(pattern, content) + return [match[1] or match[2] for match in matches if match[1] or match[2]] + +def generate_implementation_prompt(file_path, content): + functions = extract_function_names(content) + relative_path = os.path.relpath(file_path, start=os.getcwd()) + prompt = f"Implement the following functions for the file '{relative_path}':\n\n" + for func in functions: + prompt += f"- {func}\n" + prompt += "\nEnsure that the implementation follows Next.js 14 best practices and integrates with the existing project structure." + return prompt + def generate_readme(folder_path): - readme_content = f"# Project Structure\n\n```\n{generate_tree(folder_path)}\n```\n\n" - readme_content += "# File Contents\n\n" + readme_content = "# Next.js 14 API and Agent Management System\n\n" + readme_content += "## Project Structure\n\n```\n" + readme_content += generate_tree(folder_path) + readme_content += "\n```\n\n" + readme_content += "## File Contents and Implementation Guidelines\n\n" for root, dirs, files in os.walk(folder_path): for file in files: - if file.endswith(('.js', '.ts', '.jsx', '.tsx')): + if file.endswith(('.js', '.ts', '.jsx', '.tsx', ".env",".md",".yml")): file_path = os.path.join(root, file) relative_path = os.path.relpath(file_path, folder_path) - readme_content += f"## `{relative_path}`\n\n" + content = read_file_content(file_path) + + readme_content += f"### `{relative_path}`\n\n" + readme_content += "#### File Content:\n" readme_content += "```javascript\n" - readme_content += read_file_content(file_path) + readme_content += content + readme_content += "\n```\n\n" + + readme_content += "#### Implementation Guidelines:\n" + readme_content += "- Purpose: [Briefly describe the purpose of this file]\n" + readme_content += "- Key Components/Functions:\n" + for func in extract_function_names(content): + readme_content += f" - `{func}`: [Describe the purpose and expected behavior]\n" + readme_content += "- Integration Points: [Describe how this file integrates with other parts of the system]\n" + readme_content += "- Data Flow: [Explain the data flow in and out of this file]\n" + readme_content += "- Error Handling: [Describe any specific error handling requirements]\n\n" + + readme_content += "#### Implementation Prompt:\n" + readme_content += "```\n" + readme_content += generate_implementation_prompt(file_path, content) readme_content += "\n```\n\n" return readme_content @@ -47,11 +82,13 @@ def main(): readme_content = generate_readme(folder_path) - readme_path = os.path.join(folder_path, "cursorrules.md") + readme_path = os.path.join(folder_path, "PROJECT_DOCUMENTATION.md") with open(readme_path, 'w', encoding='utf-8') as readme_file: readme_file.write(readme_content) - print(f"cursorrules.md has been generated at {readme_path}") + print(f"PROJECT_DOCUMENTATION.md has been generated at {readme_path}") if __name__ == "__main__": - main() \ No newline at end of file + main() + + diff --git a/frontend/src/app/dashboardV11/.cursorrules b/frontend/src/app/dashboardV11/.cursorrules new file mode 100644 index 0000000..7e0b4f9 --- /dev/null +++ b/frontend/src/app/dashboardV11/.cursorrules @@ -0,0 +1,4132 @@ +Certainly! Below is a **fully comprehensive prompt** designed to guide the creation of a robust, visually appealing, and fully functional Dashboard in a Next.js and React environment. This prompt integrates both **GROQ** and **Ollama** APIs for chat and model selection, ensuring all functionalities are implemented without deferring any code. It's structured to cover component integration, API interactions, state management, responsive design, error handling, and more, making it ideal for a hackathon setting where time and efficiency are critical. + +--- + +### **Comprehensive Prompt for Building a Fully Functional and Beautiful Dashboard Page** + +--- + +#### **Objective:** + +Create a cohesive and comprehensive **Dashboard** page using **Next.js** and **React** that integrates both **GROQ** and **Ollama** APIs for chat functionalities and model selection. The Dashboard must dynamically fetch available models, handle API interactions for chat, manage settings, and provide seamless user interactions. The final product should be robust, fully functional, responsive, and visually appealing, ready for immediate deployment in a hackathon environment without any omitted or deferred code. + +--- + +#### **Project Structure Overview:** + +- **Frontend:** + - **Framework:** Next.js with React + - **Components:** + - `Dashboard.jsx` (Main Dashboard Component) + - `ChatView.jsx` + - `FileUploader.jsx` + - `WorkflowBuilder.jsx` + - `ToolingConfiguration.jsx` + - `Settings.jsx` + - `MermaidDiagramComponent.jsx` + - Navigation Components (`TopNavigation.jsx`, `SideNavigation.jsx`) + - `NotificationProvider.jsx` + - **Utilities:** + - `chatAPI.jsx` (Handles API interactions) + - `utils.js` (Utility functions, e.g., `cn` for className concatenation) + +- **Backend (API Routes):** + - `/api/chat-groq/route.js` + - `/api/chat-ollama/route.js` + - `/api/ollama-models/route.js` + - Additional API routes as needed (e.g., `/api/upload`, `/api/tools`, `/api/workflows`) + +--- + +#### **Instructions:** + +##### **1. Dashboard Component Refactoring:** + +- **Integrate Navigation Bars:** + - **Create `TopNavigation.jsx`:** + - Handles global navigation options (e.g., logo, user profile, global settings). + - Ensure it's responsive and collapsible on smaller screens. + - **Create `SideNavigation.jsx`:** + - Controls section navigation within the dashboard (e.g., Chat, Workflow, File Upload, Tooling). + - Implement dynamic rendering of navigation items with icons and labels. + - Add collapsible functionality for better responsiveness on mobile devices. + +- **Implement Navigation Bar Functionality:** + - **Dynamic Navigation Items:** + - Define an array of navigation items with properties like `id`, `label`, `icon`, and `route`. + - Map over this array to render navigation buttons dynamically. + - **Click Handlers:** + - Use `useRouter` from Next.js to handle navigation between sections. + - Update `currentView` state based on the selected navigation item. + +- **Merge Components into Dashboard:** + - **Integrate `ChatView`, `FileUploader`, `WorkflowBuilder`, and `ToolingConfiguration`:** + - Ensure each component is imported and conditionally rendered based on `currentView`. + - Implement smooth transitions between views for enhanced user experience. + +--- + +##### **2. Settings Component Separation:** + +- **Create a New `Settings.jsx` Component:** + - **Extraction:** + - Move all settings-related UI elements from `Dashboard.jsx` into `Settings.jsx`. + - **Functionality:** + - Manage API keys, model selection, temperature, max tokens, top P, top K, streaming options, and system prompts for both GROQ and Ollama APIs. + - Allow dynamic loading of available models from both APIs. + - Implement a sliding drawer for quick access to settings. + - **Prop Handling:** + - Define and validate props using `PropTypes` (e.g., `state`, `setState`, `saveApiKey`, `setSystemPrompt`, `resetSettings`, `toggleDarkMode`, `availableModels`). + +--- + +##### **3. API Integration and Model Selection:** + +- **GROQ API Integration (`/api/chat-groq/route.js`):** + - **Functionality:** + - Handle POST requests to interact with GROQ's chat functionality. + - Pass required data (`messages`, `model`, `temperature`, `max_tokens`, `top_p`, `stream`) to GROQ API. + - **Error Handling:** + - Return meaningful error messages and statuses for missing API keys or API failures. + - **Streaming Support:** + - Currently not implemented; return a `501 Not Implemented` status with an appropriate message. + +- **Ollama API Integration (`/api/chat-ollama/route.js`):** + - **Functionality:** + - Handle POST requests to interact with Ollama's chat functionality. + - Pass required data (`messages`, `model`, `system_prompt`, `temperature`, `max_tokens`, `top_p`, `top_k`, `stream`) to Ollama API. + - **Error Handling:** + - Return meaningful error messages and statuses for API failures. + - **Streaming Support:** + - Currently not implemented; return a `501 Not Implemented` status with an appropriate message. + +- **Model Fetching (`/api/ollama-models/route.js`):** + - **Functionality:** + - Handle GET requests to fetch available Ollama models. + - Return a list of model names. + - **Error Handling:** + - Return appropriate error messages and statuses if fetching fails. + +- **Dynamic API Switching:** + - **Implementation:** + - Allow users to toggle between GROQ and Ollama APIs in the `Settings` component. + - Automatically fetch and load available models based on the selected API. + +- **Error Handling and Retry Logic:** + - **Comprehensive Error Handling:** + - Log errors to the console and display user-friendly notifications. + - **Retry Mechanism:** + - Implement retry logic for failed API calls with up to 3 attempts and delays between retries. + +--- + +##### **4. Chat Functionality:** + +- **Dynamic Chat Component (`ChatView.jsx`):** + - **Features:** + - Send and receive messages via selected API (GROQ or Ollama). + - Support system prompts and manage chat history. + - Handle streaming responses gracefully, notifying users if unsupported. + - Optimize performance for long chat histories with lazy loading and scroll-to-bottom functionality. + - **Markdown Support:** + - Render messages with `ReactMarkdown`, supporting syntax highlighting for code blocks. + +--- + +##### **5. Settings Component:** + +- **Modular Settings Management (`Settings.jsx`):** + - **UI Elements:** + - Dropdowns for API selection and model selection. + - Input fields for GROQ API keys. + - Sliders for temperature, max tokens, top P, and top K. + - Switches for streaming responses and dark mode. + - Textarea for system prompts. + - **State Management:** + - Manage internal state and propagate changes to the parent `Dashboard` component via props. + - **Validation:** + - Implement form validation for inputs to prevent invalid configurations. + - **Error Handling:** + - Display error messages for invalid API keys or unsupported models. + +--- + +##### **6. File Uploader Component:** + +- **Enhance File Uploading (`FileUploader.jsx`):** + - **Features:** + - Support multiple file uploads with progress indicators. + - Validate file types before initiating uploads. + - Implement success and error notifications upon upload completion. + - Display a list of uploaded files with options to remove them. + - **Error Handling:** + - Handle network or server errors gracefully, informing the user appropriately. + +--- + +##### **7. Mermaid Diagrams Component:** + +- **Improve Mermaid Diagram Handling (`MermaidDiagramComponent.jsx`):** + - **Features:** + - Render diagrams based on Mermaid syntax input. + - Implement error handling for invalid Mermaid code, providing user feedback. + - Add zoom and pan functionalities to handle large or complex diagrams. + - **Optimization:** + - Ensure diagrams are rendered efficiently without performance bottlenecks. + +--- + +##### **8. Workflow Builder:** + +- **Enhance Workflow Creation (`WorkflowBuilder.jsx`):** + - **Features:** + - Implement drag-and-drop functionality for adding and reordering tools within a workflow. + - Validate workflows to ensure all required fields are populated before saving. + - Allow users to save workflows to the backend and load them for reuse. + - **Error Handling:** + - Provide user feedback for validation errors or failed save/load operations. + +--- + +##### **9. API Error Handling and Retry Logic:** + +- **Comprehensive Error Handling:** + - **Implementation:** + - Catch and handle errors from both GROQ and Ollama APIs. + - Log errors to the console and display user-friendly notifications. + +- **Retry Logic:** + - **Implementation:** + - Implement retry mechanisms for failed API calls, with up to 3 attempts and delays between retries (e.g., 1 second). + - Use exponential backoff strategies to handle persistent failures gracefully. + +--- + +##### **10. Hydration and State Management:** + +- **Resolve Hydration Issues:** + - **Identify and Fix:** + - Use console warnings and error tracking to locate hydration mismatches. + - Move any browser-specific code (e.g., `window`, `document`) into `useEffect` hooks to ensure they run only on the client side. + - **Data Fetching:** + - Use Next.js data fetching methods (`getServerSideProps`, `getStaticProps`) where appropriate to pre-fetch server-side data. + +- **Global State Management:** + - **Implementation:** + - Utilize React Context API to manage global states such as selected API, model, and user settings across the Dashboard. + - Optimize state updates using `useMemo` and `useCallback` to prevent unnecessary re-renders. + - **Prop Passing:** + - Ensure smooth and consistent prop passing between components, especially for settings and chat functionalities. + +--- + +##### **11. State Management Optimization:** + +- **Global State with Context:** + - **Implementation:** + - Create a `GlobalContext` using React Context API to store global states like `apiKey`, `selectedModel`, `settings`, and `chats`. + - Provide context values to all components that require access to these states. + +- **Memoization:** + - **Implementation:** + - Use `useMemo` to memoize expensive computations or derived data. + - Use `useCallback` to memoize callback functions passed to child components to prevent unnecessary re-renders. + +- **Efficient Prop Passing:** + - **Review:** + - Ensure that all necessary props are correctly passed to child components. + - Avoid prop drilling by leveraging Context where appropriate. + +--- + +##### **12. Responsive Design and Performance Optimization:** + +- **Mobile Optimization:** + - **Implementation:** + - Use CSS media queries or a responsive CSS framework (e.g., Tailwind CSS) to ensure the Dashboard is mobile-friendly. + - Implement collapsible sidebars and navigation menus for smaller screens. + +- **Performance Enhancements:** + - **Optimization:** + - Optimize rendering of the `ChatView` and `WorkflowBuilder` components, especially when dealing with long lists. + - Implement lazy loading or virtualization for chat messages and workflow tools to enhance performance. + - Ensure efficient state updates to prevent performance bottlenecks. + +--- + +##### **13. Testing and Quality Assurance:** + +- **Unit Testing:** + - **Implementation:** + - Write unit tests for each key component (e.g., `ChatView`, `Settings`, `FileUploader`, `WorkflowBuilder`, `ToolingConfiguration`). + - Use testing libraries like Jest and React Testing Library. + +- **Integration Testing:** + - **Implementation:** + - Test interactions between components, especially between `Dashboard`, `Settings`, and `ChatView`. + - Ensure that state changes in `Settings` correctly propagate to other components. + +- **End-to-End Testing:** + - **Implementation:** + - Use tools like Cypress or Playwright to write end-to-end tests for critical user flows (e.g., sending a chat message, switching APIs, uploading files). + +- **Cross-Browser Testing:** + - **Implementation:** + - Test the Dashboard across different browsers (Chrome, Firefox, Safari, Edge) to ensure consistent behavior and appearance. + +- **Performance Testing:** + - **Implementation:** + - Use tools like Lighthouse to audit performance, accessibility, and best practices. + - Optimize based on the audit results to ensure fast load times and smooth interactions. + +--- + +##### **14. Documentation and Code Cleanup:** + +- **Code Documentation:** + - **Implementation:** + - Add comprehensive JSDoc comments for all functions, components, and significant code blocks. + - Explain parameters, return types, and functionality within the comments. + +- **README Update:** + - **Implementation:** + - Create or update the `README.md` with clear setup and usage instructions. + - Detail API configurations, model selections, and chat functionalities. + - Include instructions for running tests and building the project. + +- **Code Cleanup:** + - **Implementation:** + - Remove any unused imports, variables, or functions. + - Ensure consistent code styling and adherence to best practices (e.g., using Prettier and ESLint). + - Refactor repetitive code into reusable components or utility functions. + +--- + +##### **15. Accessibility Improvements:** + +- **ARIA Labels and Roles:** + - **Implementation:** + - Add appropriate ARIA labels and roles to interactive elements (buttons, forms, navigation items). + +- **Keyboard Navigation:** + - **Implementation:** + - Ensure all interactive elements are reachable and operable via keyboard (e.g., using `tabIndex`). + +- **Color Contrast:** + - **Implementation:** + - Ensure sufficient color contrast for all text and UI elements, adhering to WCAG guidelines. + +- **Screen Reader Compatibility:** + - **Implementation:** + - Test the application with screen readers to ensure content is accessible and navigable. + +--- + +##### **16. Security Considerations:** + +- **API Key Management:** + - **Implementation:** + - Ensure that API keys (e.g., GROQ API key) are securely stored and not exposed in the frontend. + - Use environment variables and server-side handling to protect sensitive information. + +- **Input Validation:** + - **Implementation:** + - Validate all user inputs on both the client and server sides to prevent injection attacks. + +- **Error Messaging:** + - **Implementation:** + - Avoid exposing sensitive information in error messages. Provide user-friendly messages without revealing internal details. + +--- + +#### **Code Implementation Guidelines:** + +To ensure consistency and completeness, follow these guidelines during implementation: + +- **Component Structure:** + - Organize components in the `src/components/` directory, each in its own folder with related files (e.g., `ChatView.jsx`, `ChatView.test.jsx`, `ChatView.module.css`). + +- **Styling:** + - Use **Tailwind CSS** for styling to ensure rapid development and responsiveness. + - Implement dark mode support using Tailwind's dark variant. + +- **State Management:** + - Use **React Context API** for global state management. + - Avoid prop drilling by leveraging Context where applicable. + +- **API Interaction:** + - Centralize API interactions within the `chatAPI.jsx` utility class. + - Ensure all API routes are correctly implemented and secured. + +- **Error Handling:** + - Implement try-catch blocks around all asynchronous operations. + - Use the `NotificationProvider` to display user-friendly error and success messages. + +- **Performance Optimization:** + - Implement lazy loading for heavy components. + - Use React's `Suspense` and `lazy` for code-splitting. + - Memoize components and functions using `React.memo`, `useMemo`, and `useCallback` to prevent unnecessary re-renders. + +--- + +#### **Sample Code Snippets:** + +Below are essential code snippets to guide your implementation. Ensure to adapt and expand these based on your specific requirements. + +--- + +##### **1. `chatAPI.jsx`** + +```jsx +// src/components/chatAPI.jsx + +const OLLAMA_BASE_URL = 'http://localhost:11434/api'; +const GROQ_BASE_URL = 'https://api.groq.com/openai/v1'; + +class ChatAPI { + constructor() { + this.apiKey = ''; + this.model = 'deepseek-coder-v2'; + this.systemPrompt = ''; + this.temperature = 0.7; + this.maxTokens = 1024; + this.topP = 1; + this.topK = 40; + this.stream = false; + this.useGroq = false; + this.retryAttempts = 3; + this.retryDelay = 1000; + } + + setApiKey(key) { + if (typeof key !== 'string' || key.trim() === '') { + throw new Error('Invalid API key'); + } + this.apiKey = key.trim(); + } + + setModel(model) { + if (typeof model !== 'string' || model.trim() === '') { + throw new Error('Invalid model name'); + } + this.model = model.trim(); + } + + setSystemPrompt(prompt) { + if (typeof prompt !== 'string') { + throw new Error('System prompt must be a string'); + } + this.systemPrompt = prompt; + } + + setTemperature(temp) { + if (typeof temp !== 'number' || temp < 0 || temp > 1) { + throw new Error('Temperature must be a number between 0 and 1'); + } + this.temperature = temp; + } + + setMaxTokens(tokens) { + if (!Number.isInteger(tokens) || tokens <= 0) { + throw new Error('Max tokens must be a positive integer'); + } + this.maxTokens = tokens; + } + + setTopP(value) { + if (typeof value !== 'number' || value < 0 || value > 1) { + throw new Error('Top P must be a number between 0 and 1'); + } + this.topP = value; + } + + setTopK(value) { + if (!Number.isInteger(value) || value < 0) { + throw new Error('Top K must be a non-negative integer'); + } + this.topK = value; + } + + setStream(value) { + this.stream = Boolean(value); + } + + setUseGroq(value) { + this.useGroq = Boolean(value); + } + + async sendMessage(messages) { + if (!Array.isArray(messages) || messages.length === 0) { + throw new Error('Messages must be a non-empty array'); + } + + if (this.systemPrompt && !messages.some(m => m.role === 'system')) { + messages.unshift({ role: 'system', content: this.systemPrompt }); + } + + const url = this.useGroq ? `${GROQ_BASE_URL}/chat/completions` : `${OLLAMA_BASE_URL}/chat`; + + const headers = { + 'Content-Type': 'application/json', + }; + + if (this.useGroq) { + if (!this.apiKey) { + throw new Error('GROQ API key is required'); + } + headers['Authorization'] = `Bearer ${this.apiKey}`; + } + + const body = this.useGroq + ? { + model: this.model, + messages: messages, + temperature: this.temperature, + max_tokens: this.maxTokens, + top_p: this.topP, + stream: this.stream, + } + : { + model: this.model, + messages: messages, + stream: this.stream, + options: { + temperature: this.temperature, + num_predict: this.maxTokens, + top_k: this.topK, + top_p: this.topP, + }, + }; + + for (let attempt = 0; attempt < this.retryAttempts; attempt++) { + try { + const response = await fetch(url, { + method: 'POST', + headers: headers, + body: JSON.stringify(body), + }); + + if (!response.ok) { + const errorData = await response.json().catch(() => ({})); + throw new Error(`API error (${response.status}): ${errorData.error || response.statusText}`); + } + + if (this.stream) { + return this.handleStreamResponse(response); + } else { + const data = await response.json(); + return this.useGroq ? data.choices[0].message : data.message; + } + } catch (error) { + if (attempt === this.retryAttempts - 1) { + throw error; + } + await new Promise(resolve => setTimeout(resolve, this.retryDelay)); + } + } + } + + async *handleStreamResponse(response) { + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split('\n'); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith('data: ')) { + const data = JSON.parse(line.slice(6)); + if (this.useGroq) { + yield data.choices[0].delta.content; + } else { + yield data.message.content; + } + } + } + } + } + + async createNewChat(name) { + return { + id: crypto.randomUUID(), + name: name || `Chat ${new Date().toLocaleString()}`, + messages: [], + createdAt: Date.now(), + updatedAt: Date.now(), + }; + } + + async deleteChat(chatId) { + // Implement actual deletion logic if needed + console.log(`Deleting chat with ID: ${chatId}`); + return { success: true, message: 'Chat deleted successfully' }; + } + + async clearChat(chatId) { + // Implement actual clear logic if needed + console.log(`Clearing chat with ID: ${chatId}`); + return { success: true, message: 'Chat cleared successfully' }; + } + + async exportChats(chats) { + const dataStr = JSON.stringify(chats, null, 2); + const blob = new Blob([dataStr], { type: 'application/json' }); + return URL.createObjectURL(blob); + } + + async importChats(file) { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onload = (event) => { + try { + const chats = JSON.parse(event.target.result); + resolve(chats); + } catch (error) { + reject(new Error('Invalid JSON file')); + } + }; + reader.onerror = () => reject(new Error('Error reading file')); + reader.readAsText(file); + }); + } +} + +export const chatApi = new ChatAPI(); +``` + +--- + +##### **2. API Routes:** + +**a. GROQ Chat API Route (`/api/chat-groq/route.js`):** + +```javascript +// src/app/api/chat-groq/route.js + +import { NextResponse } from 'next/server'; + +export async function POST(req) { + try { + const { messages, model, temperature, max_tokens, top_p, stream } = await req.json(); + const authHeader = req.headers.get('Authorization') || ''; + const apiKeyMatch = authHeader.match(/^Bearer (.+)$/); + const apiKey = apiKeyMatch ? apiKeyMatch[1] : ''; + + if (!apiKey) { + return NextResponse.json({ error: 'GROQ API key is missing' }, { status: 401 }); + } + + const response = await fetch('https://api.groq.com/openai/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${apiKey}`, + }, + body: JSON.stringify({ + model, + messages, + temperature, + max_tokens, + top_p, + stream, + }), + }); + + if (!response.ok) { + const errorData = await response.json(); + console.error('GROQ API error:', errorData); + return NextResponse.json({ error: 'Error from GROQ API', details: errorData }, { status: response.status }); + } + + if (stream) { + // Handle streaming response if necessary + // For simplicity, we'll not handle streaming here + return NextResponse.json({ error: 'Streaming not implemented in GROQ API route' }, { status: 501 }); + } else { + const data = await response.json(); + return NextResponse.json(data); + } + } catch (error) { + console.error('Error in chat-groq API route:', error); + return NextResponse.json({ error: 'Internal server error', details: error.message }, { status: 500 }); + } +} +``` + +**b. Ollama Chat API Route (`/api/chat-ollama/route.js`):** + +```javascript +// src/app/api/chat-ollama/route.js + +import { NextResponse } from 'next/server'; + +export async function POST(req) { + try { + const { messages, model, system_prompt, temperature, max_tokens, top_p, top_k, stream } = await req.json(); + + const response = await fetch('http://localhost:11434/api/chat', { // Adjust the URL if different + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + model, + messages, + system_prompt, + temperature, + max_tokens, + top_p, + top_k, + stream, + }), + }); + + if (!response.ok) { + const errorData = await response.json(); + console.error('Ollama API error:', errorData); + return NextResponse.json({ error: 'Error from Ollama API', details: errorData }, { status: response.status }); + } + + if (stream) { + // Handle streaming response if necessary + // For simplicity, we'll not handle streaming here + return NextResponse.json({ error: 'Streaming not implemented in Ollama API route' }, { status: 501 }); + } else { + const data = await response.json(); + return NextResponse.json(data); + } + } catch (error) { + console.error('Error in chat-ollama API route:', error); + return NextResponse.json({ error: 'Internal server error', details: error.message }, { status: 500 }); + } +} +``` + +**c. Ollama Models Fetching API Route (`/api/ollama-models/route.js`):** + +```javascript {.copy-button} +// src/app/api/ollama-models/route.js + +import { NextResponse } from 'next/server'; + +export async function GET() { + try { + const response = await fetch('http://localhost:11434/api/tags'); + + if (!response.ok) { + throw new Error(`Failed to fetch models: ${response.statusText}`); + } + + const data = await response.json(); + const models = data.models.map(model => model.name); + return NextResponse.json({ models }); + } catch (error) { + console.error('Error fetching models:', error); + return NextResponse.json({ error: error.message }, { status: 500 }); + } +} +``` + +--- + +##### **3. `Dashboard.jsx`** + +```jsx +// src/app/dashboardV7/page.jsx + +'use client'; + +import React, { useState, useEffect, useRef } from 'react'; +import { + Triangle, + Download, + Upload, + Settings2, + Trash2, + Sun, + Moon, + Send, + Mic, + Bot, + CornerDownLeft, + Paperclip, + Plus, + Loader2, + ChevronRight, + ChevronLeft, + MessageSquare, + Workflow, + FileUp, + Wrench, +} from 'lucide-react'; +import { ScrollArea } from '@/components/ui/scroll-area'; +import { Badge } from '@/components/ui/badge'; +import { Button } from '@/components/ui/button'; +import { + Drawer, + DrawerContent, + DrawerDescription, + DrawerHeader, + DrawerTitle, + DrawerTrigger, +} from '@/components/ui/drawer'; +import { Input } from '@/components/ui/input'; +import { Label } from '@/components/ui/label'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select'; +import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar"; +import ReactMarkdown from 'react-markdown'; +import { Textarea } from '@/components/ui/textarea'; +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'; +import { Switch } from '@/components/ui/switch'; +import { Slider } from '@/components/ui/slider'; +import { cn } from "@/lib/utils"; +import { chatApi } from '@/components/chatApi'; +// Import new components +import ChatView from '@/components/ChatView'; +import WorkflowBuilder from '@/components/WorkflowBuilder'; +import FileUploader from '@/components/FileUploader'; +import ToolingConfiguration from '@/components/ToolingConfiguration'; +import NotificationProvider, { useNotification } from '@/components/ui/NotificationProvider'; +import Settings from '@/components/Settings'; +import TopNavigation from '@/components/TopNavigation'; +import SideNavigation from '@/components/SideNavigation'; + +const initialState = { + chats: [], + currentChatId: null, + apiKey: '', + settings: { + api: 'ollama', + model: 'deepseek-coder-v2', + temperature: 0.7, + maxTokens: 1024, + topP: 1, + topK: 0, + stream: false, + darkMode: false, + useGroq: false, + }, + systemPrompt: '', +}; + +const STORAGE_KEY = 'quantumNexusState'; + +const loadState = () => { + if (typeof window === 'undefined') return initialState; + const saved = localStorage.getItem(STORAGE_KEY); + return saved ? JSON.parse(saved) : initialState; +}; + +const saveState = (state) => { + localStorage.setItem(STORAGE_KEY, JSON.stringify(state)); +}; + +export default function Dashboard() { + const addNotification = useNotification(); + const [state, setState] = useState(loadState); + const [input, setInput] = useState(''); + const [isLoading, setIsLoading] = useState(false); + const [isSpeaking, setIsSpeaking] = useState(false); + const [currentView, setCurrentView] = useState('chat'); + const [isChatCollapsed, setIsChatCollapsed] = useState(false); + const [isSidebarExpanded, setIsSidebarExpanded] = useState(true); + + const chatContainerRef = useRef(null); + const speechSynthesisInstance = typeof window !== 'undefined' ? window.speechSynthesis : null; + const SpeechRecognition = + typeof window !== 'undefined' + ? window.SpeechRecognition || window.webkitSpeechRecognition + : null; + const recognition = SpeechRecognition ? new SpeechRecognition() : null; + + const [availableModels, setAvailableModels] = useState({ + ollama: ['deepseek-coder-v2'], + groq: [ + { + label: 'llama-3.1-70b-versatile', + value: 'llama-3.1-70b-versatile', + }, + ], + }); + + useEffect(() => { + fetchOllamaModels(); + }, []); + + useEffect(() => { + saveState(state); + if (state.settings.darkMode) { + document.documentElement.classList.add('dark'); + } else { + document.documentElement.classList.remove('dark'); + } + }, [state]); + + useEffect(() => { + if (chatContainerRef.current) { + chatContainerRef.current.scrollTop = chatContainerRef.current.scrollHeight; + } + }, [state.currentChatId, state.chats]); + + const fetchOllamaModels = async () => { + try { + const response = await fetch('/api/ollama-models'); + const data = await response.json(); + if (!response.ok) { + throw new Error(data.error || 'Failed to fetch models'); + } + const formattedModels = data.models.map((model) => ({ + label: model, + value: model, + })); + setAvailableModels((prev) => ({ ...prev, ollama: formattedModels })); + } catch (error) { + console.error('Error fetching Ollama models:', error); + addNotification('Failed to fetch Ollama models', 'error'); + } + }; + + const currentChat = state.chats.find((chat) => chat.id === state.currentChatId) || null; + + const handleSubmit = async (e) => { + e.preventDefault(); + if (!input.trim() || !currentChat) return; + + const newMessage = { + id: Date.now().toString(), + role: 'user', + content: input, + timestamp: Date.now(), + }; + const updatedMessages = [...currentChat.messages, newMessage]; + + setState((prev) => ({ + ...prev, + chats: prev.chats.map((chat) => + chat.id === currentChat.id + ? { ...chat, messages: updatedMessages, updatedAt: Date.now() } + : chat + ), + })); + setInput(''); + setIsLoading(true); + + try { + let response; + if (state.settings.api === 'groq') { + if (!state.apiKey) { + throw new Error('GROQ API key is not set'); + } + response = await fetch('/api/chat-groq', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${state.apiKey}`, + }, + body: JSON.stringify({ + messages: updatedMessages, + model: state.settings.model, + temperature: state.settings.temperature, + max_tokens: state.settings.maxTokens, + top_p: state.settings.topP, + stream: state.settings.stream, + }), + }); + + if (!response.ok) { + const errorData = await response.json(); + console.error('Error from GROQ API:', errorData); + throw new Error(`GROQ API error: ${errorData.error || 'Unknown error'}`); + } + } else if (state.settings.api === 'ollama') { + response = await fetch('/api/chat-ollama', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + messages: updatedMessages, + model: state.settings.model, + system_prompt: state.systemPrompt, + temperature: state.settings.temperature, + max_tokens: state.settings.maxTokens, + top_p: state.settings.topP, + top_k: state.settings.topK, + stream: state.settings.stream + }) + }); + } else { + throw new Error('Invalid API selected'); + } + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.error || `HTTP error! status: ${response.status}`); + } + + if (state.settings.stream) { + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let botMessage = ''; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + botMessage += decoder.decode(value, { stream: true }); + setState((prev) => ({ + ...prev, + chats: prev.chats.map((chat) => + chat.id === currentChat.id + ? { + ...chat, + messages: [ + ...updatedMessages, + { + id: 'assistant-' + Date.now().toString(), + role: 'assistant', + content: botMessage, + timestamp: Date.now(), + }, + ], + updatedAt: Date.now(), + } + : chat + ), + })); + } + } else { + const data = await response.json(); + const assistantMessage = state.settings.api === 'groq' ? data.choices[0].message.content : data.message.content; + setState((prev) => ({ + ...prev, + chats: prev.chats.map((chat) => + chat.id === currentChat.id + ? { + ...chat, + messages: [ + ...updatedMessages, + { + id: 'assistant-' + Date.now().toString(), + role: 'assistant', + content: assistantMessage, + timestamp: Date.now(), + }, + ], + updatedAt: Date.now(), + } + : chat + ), + })); + } + } catch (error) { + console.error('Error calling API:', error); + addNotification(`Error communicating with the chatbot: ${error.message}`, 'error'); + } finally { + setIsLoading(false); + } + }; + + const createNewChat = async () => { + try { + const newChat = await chatApi.createNewChat(`New Chat ${state.chats.length + 1}`); + setState((prev) => ({ + ...prev, + chats: [...prev.chats, newChat], + currentChatId: newChat.id, + })); + addNotification(`New chat "${newChat.name}" created!`, 'success'); + } catch (error) { + console.error('Error creating new chat:', error); + addNotification('Failed to create a new chat.', 'error'); + } + }; + + const loadSelectedChat = (chatId) => { + setState((prev) => ({ + ...prev, + currentChatId: chatId, + })); + }; + + const deleteChat = async (chatId) => { + try { + const result = await chatApi.deleteChat(chatId); + if (result.success) { + setState((prev) => ({ + ...prev, + chats: prev.chats.filter((chat) => chat.id !== chatId), + currentChatId: prev.currentChatId === chatId ? null : prev.currentChatId, + })); + addNotification(result.message, 'success'); + } else { + throw new Error(result.message); + } + } catch (error) { + console.error('Error deleting chat:', error); + addNotification('Failed to delete chat.', 'error'); + } + }; + + const clearChat = async () => { + if (currentChat) { + try { + const result = await chatApi.clearChat(currentChat.id); + if (result.success) { + setState((prev) => ({ + ...prev, + chats: prev.chats.map((chat) => + chat.id === currentChat.id ? { ...chat, messages: [], updatedAt: Date.now() } : chat + ), + })); + addNotification(result.message, 'success'); + } else { + throw new Error(result.message); + } + } catch (error) { + console.error('Error clearing chat:', error); + addNotification('Failed to clear chat.', 'error'); + } + } + }; + + const exportChats = async () => { + try { + const dataUri = await chatApi.exportChats(state.chats); + const exportFileDefaultName = 'quantum_nexus_chats.json'; + const linkElement = document.createElement('a'); + linkElement.setAttribute('href', dataUri); + linkElement.setAttribute('download', exportFileDefaultName); + document.body.appendChild(linkElement); + linkElement.click(); + document.body.removeChild(linkElement); + URL.revokeObjectURL(dataUri); + addNotification('Chats exported successfully!', 'success'); + } catch (error) { + console.error('Error exporting chats:', error); + addNotification('Failed to export chats.', 'error'); + } + }; + + const importChats = async (event) => { + const file = event.target.files && event.target.files[0]; + if (file) { + try { + const importedChats = await chatApi.importChats(file); + setState((prev) => ({ + ...prev, + chats: [...prev.chats, ...importedChats], + })); + addNotification('Chats imported successfully!', 'success'); + } catch (error) { + console.error('Error importing chats:', error); + addNotification('Failed to import chats. Please check the file format.', 'error'); + } + } + }; + + const saveApiKey = (key) => { + setState((prev) => ({ ...prev, apiKey: key })); + addNotification('API Key saved successfully!', 'success'); + }; + + const setSystemPrompt = (prompt) => { + setState((prev) => ({ ...prev, systemPrompt: prompt })); + addNotification('System prompt set successfully!', 'success'); + }; + + const resetSettings = () => { + setState((prev) => ({ + ...prev, + settings: initialState.settings, + })); + addNotification('Settings reset to default values!', 'success'); + }; + + const downloadChatTranscript = () => { + if (!currentChat) return; + try { + const transcript = currentChat.messages.map((m) => `${m.role}: ${m.content}`).join('\n\n'); + const blob = new Blob([transcript], { type: 'text/plain' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `${currentChat.name}_transcript.txt`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + addNotification('Chat transcript downloaded successfully!', 'success'); + } catch (error) { + console.error('Error downloading transcript:', error); + addNotification('Failed to download transcript.', 'error'); + } + }; + + const toggleDarkMode = () => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, darkMode: !prev.settings.darkMode }, + })); + }; + + const startVoiceInput = () => { + if (recognition) { + recognition.onresult = (event) => { + const transcript = event.results[0][0].transcript; + setInput(transcript); + }; + recognition.start(); + addNotification('Listening...', 'info'); + } else { + addNotification('Speech recognition not supported in this browser.', 'error'); + } + }; + + const speakMessage = (message) => { + if (speechSynthesisInstance) { + if (isSpeaking) { + speechSynthesisInstance.cancel(); + setIsSpeaking(false); + } else { + const utterance = new SpeechSynthesisUtterance(message); + utterance.onend = () => setIsSpeaking(false); + speechSynthesisInstance.speak(utterance); + setIsSpeaking(true); + } + } else { + addNotification('Text-to-speech not supported in this browser.', 'error'); + } + }; + + const toggleChatCollapse = () => { + setIsChatCollapsed(!isChatCollapsed); + }; + + const toggleSidebar = () => { + setIsSidebarExpanded(!isSidebarExpanded); + }; + + const renderCurrentView = () => { + switch (currentView) { + case 'chat': + return ( +
+
+

Chat

+ +
+ {!isChatCollapsed && ( + + )} +
+ ); + case 'workflow': + return ; + case 'fileUpload': + return ; + case 'tooling': + return ; + default: + return null; + } + }; + + return ( + +
+ {/* Top Navigation */} + + + {/* Collapsible Sidebar */} + + + {/* Main Content */} +
+ {/* Header */} +
+

+ {currentView === 'chat' ? (currentChat?.name || 'Select a chat') : getHeaderTitle(currentView)} +

+
+ {currentView === 'chat' && ( + <> + + + + {currentChat && ( + <> + + + + )} + + )} +
+
+ + {/* Main View */} + + {renderCurrentView()} + + + {/* Input Form (Only in Chat View) */} + {currentView === 'chat' && !isChatCollapsed && ( +
+
+ setInput(e.target.value)} + placeholder="Type your message..." + className="flex-1 bg-gray-100 dark:bg-gray-700 border-gray-300 dark:border-gray-600 focus:ring-purple-500 focus:border-purple-500 transition-all duration-200" + /> + + + + + + Send message + + + + + + Start voice input + + +
+
+ )} +
+
+
+ ); // Add this closing parenthesis +} + + // Helper function to get header title based on view + function getHeaderTitle(view) { + switch (view) { + case 'workflow': + return 'Agent Workflow Builder'; + case 'fileUpload': + return 'File Uploader'; + case 'tooling': + return 'Tooling & Configuration'; + default: + return 'Quantum Nexus'; + } + } +``` + +--- + +##### **4. `Settings.jsx`** + +```jsx +// src/components/Settings.jsx + +'use client'; + +import React, { useState, useEffect } from 'react'; +import { + Drawer, + DrawerContent, + DrawerDescription, + DrawerHeader, + DrawerTitle, + DrawerTrigger, +} from '@/components/ui/drawer'; +import { ScrollArea } from '@/components/ui/scroll-area'; +import { Label } from '@/components/ui/label'; +import { Input } from '@/components/ui/input'; +import { Textarea } from '@/components/ui/textarea'; +import { Button } from '@/components/ui/button'; +import { Settings2, Sun, Moon } from 'lucide-react'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select'; +import { Slider } from '@/components/ui/slider'; +import { Switch } from '@/components/ui/switch'; +import PropTypes from 'prop-types'; + +export default function Settings({ + state, + setState, + saveApiKey, + setSystemPrompt, + resetSettings, + toggleDarkMode, + availableModels, +}) { + const handleApiChange = (value) => { + const useGroq = value === 'groq'; + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, api: value, useGroq }, + })); + }; + + const handleModelChange = (value) => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, model: value }, + })); + }; + + const handleTemperatureChange = (value) => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, temperature: value[0] }, + })); + }; + + const handleMaxTokensChange = (value) => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, maxTokens: value[0] }, + })); + }; + + const handleTopPChange = (value) => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, topP: value[0] }, + })); + }; + + const handleTopKChange = (value) => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, topK: value[0] }, + })); + }; + + const handleStreamChange = (checked) => { + setState((prev) => ({ + ...prev, + settings: { ...prev.settings, stream: checked }, + })); + }; + + return ( +
+ {/* API Selection */} +
+ + +
+ + {/* GROQ API Key Input */} + {state.settings.useGroq && ( +
+ + saveApiKey(e.target.value)} + placeholder="Enter your GROQ API key" + className="w-full mt-1" + /> +
+ )} + + {/* Model Selection */} +
+ + +
+ + {/* Temperature Slider */} +
+ + +
+ + {/* Max Tokens Slider */} +
+ + +
+ + {/* Top P Slider */} +
+ + +
+ + {/* Top K Slider */} +
+ + +
+ + {/* Stream Responses Switch */} +
+ + +
+ + {/* System Prompt Textarea */} +
+ +