-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
66 lines (62 loc) · 1.52 KB
/
docker-compose.yml
File metadata and controls
66 lines (62 loc) · 1.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
version: "3.9"
services:
backend:
build:
context: ./backend
dockerfile: Dockerfile
container_name: memoryos-api
ports:
- "8765:8765"
volumes:
- memoryos_data:/root/.memoryos
- ./backend:/app
environment:
- HOST=0.0.0.0
- PORT=8765
- DEBUG=false
- DATA_DIR=/root/.memoryos
- SUMMARIZER_PROVIDER=${SUMMARIZER_PROVIDER:-ollama}
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
depends_on:
- ollama
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8765/health"]
interval: 30s
timeout: 10s
retries: 3
frontend:
image: node:20-alpine
container_name: memoryos-dashboard
working_dir: /app
ports:
- "3000:3000"
volumes:
- ./frontend:/app
command: sh -c "npm install && npm run dev"
environment:
- NEXT_PUBLIC_API_URL=http://localhost:8765
depends_on:
- backend
restart: unless-stopped
ollama:
image: ollama/ollama:latest
container_name: memoryos-ollama
ports:
- "11434:11434"
volumes:
- ollama_models:/root/.ollama
restart: unless-stopped
# GPU support (uncomment if you have NVIDIA GPU):
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
volumes:
memoryos_data:
ollama_models: