-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathsetup_linux.sh
More file actions
executable file
·272 lines (225 loc) · 9.45 KB
/
setup_linux.sh
File metadata and controls
executable file
·272 lines (225 loc) · 9.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
#!/bin/bash
# ============================================================
# Setup script for Linux cloud GPU (Lambda Labs, RunPod, Vast.ai)
# Downloads all models and dependencies in parallel
# Optimized for Lambda Labs (PyTorch pre-installed)
# ============================================================
set -e
echo "============================================================"
echo " JoyBoy Cloud GPU Setup"
echo " Linux + CUDA"
echo "============================================================"
echo ""
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
ensure_ubuntu_python_bootstrap() {
if python3 -c "import ensurepip, venv" >/dev/null 2>&1; then
return 0
fi
echo -e "${YELLOW}[SETUP]${NC} Python venv/ensurepip support is missing."
if ! command -v apt-get >/dev/null 2>&1; then
echo -e "${RED}[ERROR]${NC} Install Python venv support for your distro, then rerun this script."
exit 1
fi
local python_minor
python_minor=$(python3 -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')")
local venv_pkg="python${python_minor}-venv"
local packages=("$venv_pkg" "python3-venv" "python3-pip")
echo -e "${YELLOW}[SETUP]${NC} Installing ${packages[*]}..."
if [ "$(id -u)" -eq 0 ]; then
apt-get update
apt-get install -y "${packages[@]}" || apt-get install -y python3-venv python3-pip
elif command -v sudo >/dev/null 2>&1; then
sudo apt-get update
sudo apt-get install -y "${packages[@]}" || sudo apt-get install -y python3-venv python3-pip
else
echo -e "${RED}[ERROR]${NC} sudo is not available. Run as root:"
echo " apt-get update && apt-get install -y ${packages[*]}"
exit 1
fi
python3 -c "import ensurepip, venv" >/dev/null 2>&1 || {
echo -e "${RED}[ERROR]${NC} Python venv support is still unavailable after apt install."
exit 1
}
}
# Check CUDA
if ! command -v nvidia-smi &> /dev/null; then
echo -e "${RED}[ERROR] nvidia-smi not found. Is CUDA installed?${NC}"
exit 1
fi
echo -e "${GREEN}[OK]${NC} CUDA detected:"
nvidia-smi --query-gpu=name,memory.total --format=csv,noheader
echo ""
# Check Python
if ! command -v python3 &> /dev/null; then
echo -e "${RED}[ERROR] Python 3 not found${NC}"
exit 1
fi
PYTHON_VERSION=$(python3 --version)
echo -e "${GREEN}[OK]${NC} $PYTHON_VERSION"
# Add ~/.local/bin to PATH (pip installs binaries there)
export PATH="$HOME/.local/bin:$PATH"
ensure_ubuntu_python_bootstrap
# Check if PyTorch already installed (Lambda Labs has it)
if python3 -c "import torch; print(torch.cuda.is_available())" 2>/dev/null | grep -q "True"; then
echo -e "${GREEN}[OK]${NC} PyTorch + CUDA already installed (Lambda Labs)"
USE_SYSTEM_PYTHON=true
else
USE_SYSTEM_PYTHON=false
fi
# Create venv if not using system Python
if [ "$USE_SYSTEM_PYTHON" = false ]; then
if [ -d "venv" ] && [ ! -f "venv/bin/activate" ]; then
echo -e "${YELLOW}[SETUP]${NC} Removing incomplete virtual environment..."
rm -rf venv
fi
if [ ! -d "venv" ]; then
echo -e "${YELLOW}[SETUP]${NC} Creating virtual environment..."
python3 -m venv venv
fi
source venv/bin/activate
echo -e "${GREEN}[OK]${NC} Virtual environment activated"
# Install PyTorch with CUDA
echo -e "${YELLOW}[SETUP]${NC} Installing PyTorch + CUDA..."
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
else
echo -e "${GREEN}[OK]${NC} Using system Python (no venv needed)"
pip install --upgrade pip
fi
# ============================================================
# FIX VERSION CONFLICTS (Lambda Labs has system packages that conflict)
# ============================================================
echo -e "${YELLOW}[SETUP]${NC} Fixing version conflicts..."
# huggingface-hub 1.x breaks transformers
pip install "huggingface-hub>=0.25.0,<1.0"
# Install requirements
echo -e "${YELLOW}[SETUP]${NC} Installing requirements..."
pip install -r scripts/requirements.txt
# Force reinstall the full ML stack to avoid system package conflicts
echo -e "${YELLOW}[SETUP]${NC} Reinstalling ML stack (fixing Lambda system conflicts)..."
pip install --force-reinstall torchvision transformers diffusers accelerate
# IMPORTANT: numpy<2 MUST be installed LAST (torchvision pulls numpy 2.x)
# mediapipe/tensorflow need numpy<2
echo -e "${YELLOW}[SETUP]${NC} Forcing numpy<2 (mediapipe/tensorflow compatibility)..."
pip install "numpy<2" --force-reinstall
# Install additional dependencies for Linux
echo -e "${YELLOW}[SETUP]${NC} Installing Linux-specific dependencies..."
pip install triton
pip install flash_attn --no-build-isolation 2>/dev/null || echo -e "${YELLOW}[WARN]${NC} FlashAttention build skipped (Wan native will retry at runtime)"
pip install sageattention --no-build-isolation 2>/dev/null || echo -e "${YELLOW}[WARN]${NC} SageAttention build skipped (will retry at runtime)"
echo ""
echo "============================================================"
echo " Downloading Models (parallel)"
echo "============================================================"
echo ""
# Create models directory
mkdir -p models/checkpoints
# ============================================================
# FUNCTION: Download with progress (using Python module)
# ============================================================
download_hf_model() {
local repo=$1
local name=$2
local required=${3:-required}
if [ "$required" = "gated" ] && [ -z "${HF_TOKEN:-}" ] && [ -z "${HUGGINGFACE_TOKEN:-}" ]; then
echo -e "${YELLOW}[SKIP]${NC} $name requires Hugging Face access. Set HF_TOKEN and rerun to pre-download it."
return 0
fi
echo -e "${YELLOW}[DL]${NC} $name..."
(
python3 - "$repo" "$name" <<'PY'
import os
import sys
from huggingface_hub import snapshot_download
repo = sys.argv[1]
name = sys.argv[2]
token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN") or None
snapshot_download(repo, token=token)
print(f"[OK] Downloaded {name}")
PY
) || {
if [ "$required" = "optional" ] || [ "$required" = "gated" ]; then
echo -e "${YELLOW}[WARN]${NC} Optional model download failed: $name"
exit 0
fi
exit 1
} &
}
# ============================================================
# IMAGE MODELS
# ============================================================
echo -e "${GREEN}[IMAGE MODELS]${NC}"
# Flux Kontext (editing intelligent, 12B)
download_hf_model "black-forest-labs/FLUX.1-Kontext-dev" "Flux Kontext 12B" "gated"
# SDXL stack (generic inpainting stack)
echo -e "${YELLOW}[DL]${NC} epicRealismXL (CivitAI → downloaded at runtime)..."
# Note: CivitAI models are downloaded at runtime, but we can pre-download SDXL base
download_hf_model "stabilityai/stable-diffusion-xl-base-1.0" "SDXL Base"
download_hf_model "diffusers/stable-diffusion-xl-1.0-inpainting-0.1" "SDXL Inpaint"
download_hf_model "lllyasviel/sd_control_collection" "ControlNet Depth"
# ============================================================
# VIDEO MODELS
# ============================================================
echo -e "${GREEN}[VIDEO MODELS]${NC}"
# Wan 2.2 5B (main video model)
download_hf_model "Wan-AI/Wan2.2-TI2V-5B-Diffusers" "Wan 2.2 5B"
# FastWan (distilled, faster)
download_hf_model "FastVideo/FastWan2.2-TI2V-5B-FullAttn-Diffusers" "FastWan 2.2 5B"
# LTX-Video 2B (includes distilled 0.9.8)
download_hf_model "Lightricks/LTX-Video" "LTX-Video 2B"
# ============================================================
# TEXT ENCODERS (shared)
# ============================================================
echo -e "${GREEN}[TEXT ENCODERS]${NC}"
# T5-XXL (used by LTX, Wan, etc.)
download_hf_model "google/umt5-xxl" "UMT5-XXL"
# ============================================================
# SUPPORT MODELS
# ============================================================
echo -e "${GREEN}[SUPPORT MODELS]${NC}"
# Segmentation
download_hf_model "mattmdjaga/segformer_b2_clothes" "SegFormer B2 Clothes"
# Depth estimation
download_hf_model "LiheYoung/depth-anything-large-hf" "Depth Anything Large"
# Face detection/restoration
download_hf_model "IDEA-Research/grounding-dino-base" "GroundingDINO"
# ============================================================
# OLLAMA (for chat/prompts)
# ============================================================
echo ""
echo -e "${GREEN}[OLLAMA]${NC}"
if ! command -v ollama &> /dev/null; then
echo -e "${YELLOW}[DL]${NC} Installing Ollama..."
curl -fsSL https://ollama.com/install.sh | sh
fi
# Start Ollama in background
echo -e "${YELLOW}[OLLAMA]${NC} Starting Ollama service..."
ollama serve &>/dev/null &
sleep 3
# Download models
echo -e "${YELLOW}[DL]${NC} Downloading Ollama models..."
ollama pull dolphin-phi:2.7b &
ollama pull qwen2.5vl:3b &
# ============================================================
# WAIT FOR ALL DOWNLOADS
# ============================================================
echo ""
echo -e "${YELLOW}[WAIT]${NC} Waiting for all downloads to complete..."
wait
echo ""
echo -e "${GREEN}============================================================${NC}"
echo -e "${GREEN} Setup Complete!${NC}"
echo -e "${GREEN}============================================================${NC}"
echo ""
echo "To start the app:"
echo ""
echo " cd ~/JoyBoy && ./start_linux.sh"
echo ""
echo "Then use SSH tunnel from your PC:"
echo " ssh -L 7860:localhost:7860 ubuntu@YOUR_IP"
echo " Open http://localhost:7860 in browser"
echo ""