-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathDockerfile.gpu
More file actions
154 lines (124 loc) · 5.17 KB
/
Dockerfile.gpu
File metadata and controls
154 lines (124 loc) · 5.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# ── Stage 1: Build the Vue frontend ──────────────────────────────────────────
FROM node:22-slim AS frontend-builder
WORKDIR /build/frontend
COPY frontend/package*.json ./
RUN npm ci
COPY frontend/ ./
# Copy pyproject.toml so Vite can read the version at build time
COPY pyproject.toml /build/pyproject.toml
RUN npm run build
# ── Stage 2: Build Python venv with all dependencies ─────────────────────────
# Use the CUDA image so torch can link against the right CUDA version at build
# time, but we only copy the finished venv into the final stage.
FROM nvidia/cuda:12.8.1-cudnn-runtime-ubuntu24.04 AS python-builder
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
python3.12 \
python3.12-venv \
python3-pip \
python3.12-dev \
build-essential \
libgl1 \
libglib2.0-0 \
libgomp1 \
libheif-dev \
libde265-dev \
libx265-dev \
&& rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1
RUN python -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install --no-cache-dir --upgrade pip wheel setuptools
# PyTorch with CUDA 12.8 — required for Blackwell (RTX 5xxx / sm_120) support.
# Must be installed before open_clip_torch pulls in a CPU-only torch.
RUN pip install --no-cache-dir \
torch \
torchvision \
--index-url https://download.pytorch.org/whl/cu128
# onnxruntime-gpu replaces plain onnxruntime for CUDA inference
RUN pip install --no-cache-dir onnxruntime-gpu
# All other dependencies (onnxruntime already satisfied by onnxruntime-gpu above)
RUN pip install --no-cache-dir \
open_clip_torch \
fastapi \
"uvicorn[standard]" \
numpy \
pillow \
opencv-python-headless \
scipy \
platformdirs \
tomli \
colorlog \
httpx \
python-multipart \
requests \
transformers \
insightface \
rapidfuzz \
tqdm \
einops \
sentence_transformers \
spacy \
pillow-heif \
sqlmodel \
alembic \
"python-jose[cryptography]" \
passlib \
"bcrypt<4.0.0" \
nvidia-ml-py \
piexif \
psutil \
python-dotenv \
accelerate
# Download spaCy English model
RUN python -m spacy download en_core_web_sm
# ── Stage 3: Lean runtime image ───────────────────────────────────────────────
# Start from the same CUDA base but only copy the finished venv — no build
# tools, no dev headers, no pip cache, no compiler output.
FROM nvidia/cuda:12.8.1-cudnn-runtime-ubuntu24.04
ENV DEBIAN_FRONTEND=noninteractive
# Only runtime system libraries — no build-essential, no python3.12-dev
RUN apt-get update && apt-get install -y --no-install-recommends \
python3.12 \
python3.12-venv \
libgl1 \
libglib2.0-0 \
libgomp1 \
libheif1 \
libde265-0 \
libx265-199 \
curl \
&& rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1
# Create non-root user BEFORE copying the venv so we can use --chown at copy
# time — avoids a separate chown layer that would double the venv's disk cost.
RUN groupadd -f -g 10001 pixlstash \
&& useradd -r -u 10001 -g 10001 -m -d /home/pixlstash pixlstash
# Copy the fully-built venv with correct ownership in a single layer
COPY --from=python-builder --chown=pixlstash:pixlstash /opt/venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
WORKDIR /app
RUN chown pixlstash:pixlstash /app
USER pixlstash
# ── Copy application source ───────────────────────────────────────────────────
COPY --chown=pixlstash:pixlstash pyproject.toml setup.py MANIFEST.in alembic.ini ./
COPY --chown=pixlstash:pixlstash pixlstash/ pixlstash/
# Install the pixlstash package itself (no deps — already installed above)
RUN pip install --no-cache-dir --no-deps -e .
# Copy the pre-built frontend into the package's expected location
COPY --chown=pixlstash:pixlstash --from=frontend-builder /build/pixlstash/frontend/dist pixlstash/frontend/dist/
# ── Entrypoint ────────────────────────────────────────────────────────────────
USER root
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
USER pixlstash
# Ensure $HOME always points at the mounted volume regardless of which UID
# --user maps to at runtime (the UID may match a different user in /etc/passwd).
ENV HOME=/home/pixlstash
# Volume for persistent data — mount /home/pixlstash to persist config, images,
# downloaded models, and the database across container restarts.
VOLUME ["/home/pixlstash"]
EXPOSE 9537
ENTRYPOINT ["docker-entrypoint.sh"]