forked from tinyhumansai/neocortex
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexample.py
More file actions
69 lines (57 loc) · 2.14 KB
/
example.py
File metadata and controls
69 lines (57 loc) · 2.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
Example usage of the TinyHumans SDK.
Install with examples extra for dotenv: pip install -e ".[examples]"
Copy .env.example to .env and set TINYHUMANS_TOKEN, TINYHUMANS_MODEL_ID, OPENAI_API_KEY.
Optional: set TINYHUMANSAI_LOG_LEVEL=DEBUG to print outbound API requests.
"""
import logging
import os
import time
from dotenv import load_dotenv
load_dotenv()
if os.environ.get("TINYHUMANSAI_LOG_LEVEL") and not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO)
import tinyhumansai as api
client = api.TinyHumanMemoryClient(os.environ["TINYHUMANS_TOKEN"])
# Ingest (upsert) a single memory
result = client.ingest_memory(
item={
"key": "user-preference-theme",
"content": "User prefers dark mode",
"namespace": "preferences",
"metadata": {"source": "onboarding"},
"created_at": time.time(), # Optional: Unix timestamp (seconds)
"updated_at": time.time(), # Optional: Unix timestamp (seconds)
}
)
print(result) # IngestMemoryResponse(ingested=1, updated=0, errors=0)
# Or ingest multiple at once: client.ingest_memories(items=[...])
# Get LLM context (prompt fetches relevant chunks; num_chunks limits how many)
ctx = client.recall_memory(
namespace="preferences",
prompt="What is the user's preference for theme?",
num_chunks=10,
)
print(ctx.context)
# (Optional) Query LLM with context (use your own API key from the provider)
# Built-in providers: "openai", "anthropic", "google"
response = client.recall_with_llm(
prompt="What is the user's preference for theme?",
provider="openai",
model="gpt-4o-mini",
api_key=os.environ["OPENAI_API_KEY"],
context=ctx.context,
)
print(response.text)
# Custom provider (OpenAI-compatible API)
# response = client.recall_with_llm(
# prompt="What is the user's preference for theme?",
# provider="custom",
# model="your-model-name",
# api_key="your-api-key",
# url="https://api.example.com/v1/chat/completions",
# context=ctx.context,
# )
# Delete all memory in namespace
# The current API exposes namespace-wide delete, not key-scoped delete.
client.delete_memory(namespace="preferences", delete_all=True)