Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file removed Breify Note.pdf
Binary file not shown.
90 changes: 90 additions & 0 deletions src/memora/ai/conversational_ai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import ollama
from pathlib import Path
from memora.core.ingestion import extract_facts
from memora.core.store import ObjectStore


class MemoraChat:

def __init__(self):
self.client = ollama.Client()
self.store = ObjectStore(Path(".memora"))

def extract_code_blocks(self, message: str):
code_blocks = []

if "```" in message:
parts = message.split("```")
for i in range(1, len(parts), 2):
block = parts[i].strip()
if block:
code_blocks.append(block)

if not code_blocks:
if any(k in message for k in ["def ", "return ", "class ", "import "]):
code_blocks.append(message.strip())

return code_blocks

def needs_memory(self, message: str) -> bool:
message = message.lower()

triggers = [
"my", "i", "me",
"what did i", "what do i",
"who am i", "remember",
"code", "earlier", "before"
]

return any(t in message for t in triggers)

def chat(self, message: str):

message_lower = message.lower()

# ✅ STEP 1: store normal facts
if len(message.strip()) > 3:
facts = extract_facts(message, "user")
for f in facts:
self.store.write(f)

# ✅ STEP 2: store code (SAFE METHOD)
code_blocks = self.extract_code_blocks(message)

for code in code_blocks:
fact_text = f"user wrote code {code}"
facts = extract_facts(fact_text, "user")

for f in facts:
self.store.write(f)

# ✅ STEP 3: retrieve memory
if self.needs_memory(message):

all_hashes = self.store.list_all_hashes()

# 🔥 CODE RETRIEVAL
if "code" in message_lower:
for h in reversed(all_hashes):
try:
fact = self.store.read_fact(h)

content = getattr(fact, "content", "").lower()

if "code" in content or "def " in content:
return f"Here is your code:\n{fact.content}"

except:
continue

# ✅ STEP 4: fallback AI
try:
response = self.client.chat(
model="llama3.2",
messages=[{"role": "user", "content": message}]
)

return response["message"]["content"].strip()

except Exception:
return "Something went wrong."
32 changes: 32 additions & 0 deletions src/memora/ai/file_processor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from pathlib import Path
from memora.core.ingestion import extract_facts
from memora.core.store import ObjectStore


class FileProcessor:

def __init__(self):
self.store = ObjectStore(Path(".memora"))

def process_file(self, file_path: str):

path = Path(file_path)

if not path.exists():
raise ValueError("File not found")

# 1. Read file
if path.suffix == ".txt" or path.suffix == ".md":
with open(path, "r", encoding="utf-8") as f:
content = f.read()
else:
raise ValueError("Only .txt and .md supported")

# 2. Extract facts
facts = extract_facts(content, f"file:{path.name}")

# 3. Store facts
for f in facts:
self.store.write(f)

return len(facts)
Loading
Loading