-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
216 lines (188 loc) · 6.23 KB
/
docker-compose.yml
File metadata and controls
216 lines (188 loc) · 6.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# ProcessPulse - Complete Docker Deployment
#
# DISK SPACE REQUIREMENTS:
# - Docker images: ~2.5 GB
# - AI Models (downloaded on first run):
# - Chat model (llama3.1:8b): ~4.7 GB
# - Embedding model (nomic-embed-text): ~275 MB
# - Total: ~7.5 GB minimum
#
# USAGE:
# docker-compose up -d
#
# First run will take 5-15 minutes to download AI models.
version: '3.8'
services:
# ===========================================
# Frontend (React + nginx)
# ===========================================
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
container_name: processpulse-frontend
ports:
- "${FRONTEND_PORT:-80}:80"
depends_on:
backend:
condition: service_healthy
restart: unless-stopped
networks:
- processpulse-network
# ===========================================
# Backend (FastAPI)
# ===========================================
backend:
build:
context: .
dockerfile: Dockerfile
container_name: processpulse-backend
environment:
- DATABASE_URL=sqlite+aiosqlite:///./data/process_analyzer.db
- OLLAMA_BASE_URL=http://ollama:11434
- PERPLEXICA_BASE_URL=http://perplexica:3000
- DEBUG=${DEBUG:-false}
volumes:
- processpulse-data:/app/data
depends_on:
ollama-init:
condition: service_completed_successfully
restart: unless-stopped
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
networks:
- processpulse-network
# ===========================================
# Ollama (Local AI)
# ===========================================
ollama:
image: ollama/ollama:latest
container_name: processpulse-ollama
volumes:
- ollama-models:/root/.ollama
ports:
- "${OLLAMA_PORT:-11434}:11434"
restart: unless-stopped
# Uncomment below for GPU support (requires nvidia-docker)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
networks:
- processpulse-network
# ===========================================
# Ollama Model Initializer
# Downloads required models on first run
# ===========================================
ollama-init:
image: ollama/ollama:latest
container_name: processpulse-ollama-init
depends_on:
- ollama
volumes:
- ollama-models:/root/.ollama
environment:
- OLLAMA_HOST=http://ollama:11434
entrypoint: ["/bin/sh", "-c"]
command:
- |
echo "=========================================="
echo "ProcessPulse Model Initializer"
echo "=========================================="
echo ""
echo "Waiting for Ollama to be ready..."
sleep 10
# Check if Ollama is responding
until curl -s http://ollama:11434/api/tags > /dev/null 2>&1; do
echo "Waiting for Ollama..."
sleep 5
done
echo "Ollama is ready!"
echo ""
# Pull chat model (~4.7 GB)
echo "=========================================="
echo "Downloading chat model: ${CHAT_MODEL:-llama3.1:8b}"
echo "Size: ~4.7 GB - This may take 5-10 minutes..."
echo "=========================================="
curl -X POST http://ollama:11434/api/pull -d '{"name": "${CHAT_MODEL:-llama3.1:8b}"}'
echo ""
# Pull embedding model (~275 MB)
echo "=========================================="
echo "Downloading embedding model: ${EMBEDDING_MODEL:-nomic-embed-text}"
echo "Size: ~275 MB"
echo "=========================================="
curl -X POST http://ollama:11434/api/pull -d '{"name": "${EMBEDDING_MODEL:-nomic-embed-text}"}'
echo ""
echo "=========================================="
echo "All models downloaded successfully!"
echo "=========================================="
# List available models
echo "Available models:"
curl -s http://ollama:11434/api/tags | grep -o '"name":"[^"]*"' | cut -d'"' -f4
networks:
- processpulse-network
# ===========================================
# Perplexica (AI-Powered Web Search)
# ===========================================
perplexica-backend:
image: ghcr.io/itzcrazy/perplexica-backend:main
container_name: processpulse-perplexica-backend
environment:
- SEARXNG_API_URL=http://searxng:8080
- OLLAMA_API_URL=http://ollama:11434
depends_on:
- ollama
- searxng
restart: unless-stopped
networks:
- processpulse-network
perplexica:
image: ghcr.io/itzcrazy/perplexica-frontend:main
container_name: processpulse-perplexica
environment:
- NEXT_PUBLIC_API_URL=http://perplexica-backend:3001
- NEXT_PUBLIC_WS_URL=ws://perplexica-backend:3001
ports:
- "${PERPLEXICA_PORT:-3000}:3000"
depends_on:
- perplexica-backend
restart: unless-stopped
networks:
- processpulse-network
# ===========================================
# SearXNG (Search Engine for Perplexica)
# ===========================================
searxng:
image: searxng/searxng:latest
container_name: processpulse-searxng
environment:
- SEARXNG_BASE_URL=http://localhost:8080
volumes:
- searxng-data:/etc/searxng
restart: unless-stopped
networks:
- processpulse-network
# ===========================================
# Persistent Volumes
# ===========================================
volumes:
processpulse-data:
name: processpulse-data
ollama-models:
name: processpulse-ollama-models
searxng-data:
name: processpulse-searxng-data
# ===========================================
# Network
# ===========================================
networks:
processpulse-network:
name: processpulse-network
driver: bridge