diff --git a/Dockerfile.byow b/Dockerfile.byow new file mode 100644 index 00000000..c0aadf1a --- /dev/null +++ b/Dockerfile.byow @@ -0,0 +1,85 @@ +FROM nvcr.io/nvidia/pytorch:25.03-py3 + +# Environment setup +ENV TZ="Etc/UTC" +ENV PYTORCH_CUDA_ALLOC_CONF="backend:cudaMallocAsync,expandable_segments:True" +ENV UV_COMPILE_BYTECODE=1 +ENV UV_NO_CACHE=1 +ENV UV_SYSTEM_PYTHON=1 +# ENV UV_OVERRIDE=/workspace/constraints.txt +# COPY src/comfystream/scripts/constraints.txt /workspace/constraints.txt +ENV UV_BREAK_SYSTEM_PACKAGES=1 +ENV PIP_DISABLE_PIP_VERSION_CHECK=1 +ENV PIP_NO_CACHE_DIR=1 +ENV DEBIAN_FRONTEND=noninteractive +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 + +RUN git clone https://github.com/comfyanonymous/ComfyUI.git /workspace/ComfyUI +RUN git clone https://github.com/Comfy-Org/ComfyUI-Manager.git /workspace/ComfyUI/custom_nodes/ComfyUI-Manager + +RUN pip install uv && uv --version && \ + apt-get update && apt-get install --no-install-recommends ffmpeg libsm6 libxext6 -y && \ + uv pip uninstall --system $(pip list --format=freeze | grep opencv) && \ + rm -rf /usr/local/lib/python3.12/dist-packages/cv2/ && \ + uv pip install wheel && \ + uv pip install --no-build-isolation "opencv-contrib-python-headless!=4.11.0.86" && \ + rm -rf /var/lib/apt/lists/* + +# Install torchaudio with the correct version +COPY docker/install-torchaudio.sh /usr/local/bin/install-torchaudio.sh +RUN chmod +x /usr/local/bin/install-torchaudio.sh && \ + /usr/local/bin/install-torchaudio.sh + +# Create mount point for user's ComfyUI workspace +RUN git clone https://github.com/comfyanonymous/ComfyUI.git /workspace/ComfyUI +RUN git clone https://github.com/Comfy-Org/ComfyUI-Manager.git /workspace/ComfyUI/custom_nodes/ComfyUI-Manager + +COPY src/comfystream/scripts /workspace/tmp/scripts +COPY configs /workspace/tmp/configs + +COPY docker/entrypoint-byow.sh /usr/local/bin/comfyui-entrypoint.sh +RUN chmod +x /usr/local/bin/comfyui-entrypoint.sh + +# Create venv in ComfyUI workspace and set uv enc default to it +WORKDIR /workspace/ComfyUI +RUN uv venv .venv +ENV VIRTUAL_ENV=/workspace/ComfyUI/.venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +# Install comfy-cli +RUN uv pip install comfy-cli + +#RUN comfy --here --skip-prompt install --skip-torch-or-directml +#RUN uv pip install --torch-backend=auto "comfyui@git+https://github.com/hiddenswitch/ComfyUI.git@e62df3a8811d8c652a195d4669f4fb27f6c9a9ba" +RUN uv pip install -r /workspace/ComfyUI/custom_nodes/ComfyUI-Manager/requirements.txt +RUN comfy --skip-prompt set-default /workspace/ComfyUI +# RUN comfy node install comfystream --fast-deps +RUN comfy node install comfystream --mode local comfystream --fast-deps + +# WORKDIR /workspace/tmp +COPY scripts /workspace/ComfyUI/custom_nodes/src/comfystream/scripts +COPY configs /workspace/ComfyUI/custom_nodes/comfystream/configs + +RUN mkdir -p /tmp + +# Set working directory to the mount point +WORKDIR /workspace/ComfyUI +COPY docker/entrypoint-byow.sh /tmp/entrypoint.sh +RUN chmod +x /tmp/entrypoint.sh + +# Set the entrypoint +# ENTRYPOINT ["/etc/local/bin/comfyui-entrypoint.sh"] + +#Testing only +#ENTRYPOINT ["/tmp/entrypoint.sh"] + +# Default command (can be overridden) +CMD [] + +# Expose ComfyUI port +EXPOSE 8188 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ + CMD curl -f http://localhost:8188/system_stats || exit 1 diff --git a/Dockerfile.uv b/Dockerfile.uv new file mode 100644 index 00000000..39f6c737 --- /dev/null +++ b/Dockerfile.uv @@ -0,0 +1,158 @@ +ARG BASE_IMAGE=nvidia/cuda:12.8.0-cudnn-devel-ubuntu24.04 \ + PYTHON_VERSION=3.12 \ + INSTALL_EXTRA_PACKAGES=false \ + INSTALL_NODES=false + +FROM "${BASE_IMAGE}" + +ARG PYTHON_VERSION \ + INSTALL_EXTRA_PACKAGES \ + INSTALL_NODES + +ENV DEBIAN_FRONTEND=noninteractive \ + TensorRT_ROOT=/opt/TensorRT-10.12.0.36 \ + PYTHON_VERSION="${PYTHON_VERSION}" \ + UV_COMPILE_BYTECODE=1 \ + UV_NO_CACHE=0 \ + UV_SYSTEM_PYTHON=0 \ + UV_BREAK_SYSTEM_PACKAGES=0 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PIP_NO_CACHE_DIR=1 \ + LANG=C.UTF-8 \ + LC_ALL=C.UTF-8 + +# System dependencies +RUN apt update && apt install -yqq --no-install-recommends \ + git wget curl nano socat \ + libsndfile1 build-essential llvm tk-dev \ + libglvnd-dev cmake swig libprotobuf-dev \ + protobuf-compiler libcairo2-dev libpango1.0-dev libgdk-pixbuf2.0-dev \ + libffi-dev libgirepository1.0-dev pkg-config libgflags-dev \ + libgoogle-glog-dev libjpeg-dev libavcodec-dev libavformat-dev \ + libavutil-dev libswscale-dev \ + python3.12 python3.12-dev python3.12-venv python3-pip \ + && rm -rf /var/lib/apt/lists/* + +# Enable opengl support with nvidia gpu +RUN printf '%s\n' \ + '{' \ + ' "file_format_version" : "1.0.0",' \ + ' "ICD" : {' \ + ' "library_path" : "libEGL_nvidia.so.0"' \ + ' }' \ + '}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json + +# Install uv +RUN curl -LsSf https://astral.sh/uv/install.sh | sh && \ + . $HOME/.local/bin/env && \ + uv --version +ENV PATH="/root/.local/bin:$PATH" + +# Clone ComfyUI and Manager +RUN git clone --branch v0.3.60 --depth 1 https://github.com/comfyanonymous/ComfyUI.git /workspace/ComfyUI +RUN git clone https://github.com/Comfy-Org/ComfyUI-Manager.git /workspace/ComfyUI/custom_nodes/ComfyUI-Manager + +# Create venv in ComfyUI workspace +WORKDIR /workspace/ComfyUI +RUN uv venv .venv --python /usr/bin/python${PYTHON_VERSION} +ENV VIRTUAL_ENV=/workspace/ComfyUI/.venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +# Conditional TensorRT SDK installation +RUN if [ "$INSTALL_EXTRA_PACKAGES" = "true" ]; then \ + cd /opt && \ + wget --progress=dot:giga \ + https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.12.0/tars/TensorRT-10.12.0.36.Linux.x86_64-gnu.cuda-12.9.tar.gz && \ + tar -xzf TensorRT-10.12.0.36.Linux.x86_64-gnu.cuda-12.9.tar.gz && \ + rm TensorRT-10.12.0.36.Linux.x86_64-gnu.cuda-12.9.tar.gz && \ + echo "${TensorRT_ROOT}/lib" > /etc/ld.so.conf.d/tensorrt.conf && \ + ldconfig && \ + uv pip install --no-cache-dir \ + ${TensorRT_ROOT}/python/tensorrt-10.12.0.36-cp312-none-linux_x86_64.whl; \ + fi + +# Create comfystream directory for constraints and lock file +RUN mkdir -p /tmp/comfystream + +# Copy constraints file and comfy lock file for dependency installation +COPY ./src/comfystream/scripts/constraints.txt /tmp/comfystream/constraints.txt +COPY ./configs/comfy-lock.yaml /tmp/comfystream/comfy-lock.yaml + +# Install constraints (cached unless constraints.txt changes) +RUN uv pip install -r /tmp/comfystream/constraints.txt + +# Copy workflows and test files early (less frequently changed) +COPY ./workflows/comfyui/* /workspace/ComfyUI/user/default/workflows/ +COPY ./test/example-512x512.png /workspace/ComfyUI/input + +# Conditional OpenCV CUDA installation +RUN if [ "$INSTALL_EXTRA_PACKAGES" = "true" ]; then \ + cd /tmp && \ + DOWNLOAD_NAME="opencv-cuda-release.tar.gz" && \ + wget -q -O "$DOWNLOAD_NAME" https://github.com/JJassonn69/ComfyUI-Stream-Pack/releases/download/v2.1/opencv-cuda-release.tar.gz && \ + tar -xzf "$DOWNLOAD_NAME" -C /tmp/ && \ + rm "$DOWNLOAD_NAME" && \ + SITE_PACKAGES_DIR="${VIRTUAL_ENV}/lib/python${PYTHON_VERSION}/site-packages" && \ + rm -rf "${SITE_PACKAGES_DIR}/cv2"* && \ + cp -r /tmp/cv2 "${SITE_PACKAGES_DIR}/" && \ + VENV_LIB="${VIRTUAL_ENV}/lib" && \ + rm -f "${VENV_LIB}/libstdc++.so"* && \ + cp /usr/lib/x86_64-linux-gnu/libstdc++.so* "${VENV_LIB}/" && \ + cp /tmp/opencv/build/lib/libopencv_* /usr/lib/x86_64-linux-gnu/ && \ + rm -rf /tmp/opencv_contrib /tmp/opencv /tmp/cv2 && \ + echo "OpenCV CUDA installation completed"; \ + fi + +# Install ComfyUI requirements (cached unless ComfyUI version changes) +# RUN uv pip install -r /workspace/ComfyUI/requirements.txt +RUN uv pip install -r /workspace/ComfyUI/custom_nodes/ComfyUI-Manager/requirements.txt + +# Copy full comfystream source directly into custom_nodes (done late to maximize cache hits) +COPY . /workspace/ComfyUI/custom_nodes/comfystream + +# Install ComfyStream +RUN cd /workspace/ComfyUI/custom_nodes/comfystream && uv pip install -e . + +# Run install.py (will use uv pip automatically when available) +RUN cd /workspace/ComfyUI/custom_nodes/comfystream && python install.py --workspace /workspace/ComfyUI + +# Accept a build-arg that lets CI force-invalidate cache +ARG CACHEBUST=static +ENV CACHEBUST=${CACHEBUST} + +# Note: setup_nodes.py is intentionally skipped + +# Install comfy-cli for workspace management +RUN uv pip install comfy-cli +RUN comfy --skip-prompt set-default "/workspace/ComfyUI" +RUN comfy tracking disable + +# Conditionally install custom nodes from snapshot +RUN if [ "$INSTALL_NODES" = "true" ]; then \ + cd /workspace/ComfyUI && \ + comfy node restore-snapshot /workspace/ComfyUI/custom_nodes/comfystream/configs/comfy-lock.yaml; \ + fi + +# Install numpy and xformers +#RUN uv pip install "numpy<2.0.0" +RUN uv pip install --no-cache-dir xformers==0.0.32.post2 --no-deps + +# Create marker file to identify this as a built-in workspace +RUN touch /workspace/ComfyUI/.comfystream_builtin_workspace + +WORKDIR /workspace/ComfyUI/custom_nodes/comfystream + +# Expose ComfyUI port +EXPOSE 8188 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ + CMD curl -f http://localhost:8188/system_stats || exit 1 + +# Copy and set up entrypoint script (last for optimal caching) +COPY ./docker/entrypoint-byow.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD [] + diff --git a/Dockerfile.uv.dev b/Dockerfile.uv.dev new file mode 100644 index 00000000..89f0021e --- /dev/null +++ b/Dockerfile.uv.dev @@ -0,0 +1,16 @@ +# Development Dockerfile for comfystream:uv +# This extends the production image and only overrides development-specific changes +# Build: docker build -f Dockerfile.uv.dev -t comfystream:uv-dev . + +FROM comfystream:uv + +# Override entrypoint script for development +# This is the only layer that rebuilds when you modify the entrypoint +COPY ./docker/entrypoint-byow.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +# Development-friendly settings +ENV COMFYUI_DEV_MODE=1 + +WORKDIR /workspace/ComfyUI/custom_nodes/comfystream + diff --git a/Makefile.docker b/Makefile.docker new file mode 100644 index 00000000..3696bdd6 --- /dev/null +++ b/Makefile.docker @@ -0,0 +1,49 @@ +.PHONY: help build build-dev build-install-nodes build-no-extras run run-dev run-dev-live clean + +COMFYUI_PATH ?= ~/ComfyUI +PORT ?= 8188 + +help: ## Show this help message + @echo "ComfyStream Docker Development Commands" + @echo "" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' + +build: ## Build production image with TensorRT and OpenCV CUDA + docker build -f Dockerfile.uv -t comfystream:uv . + +build-install-nodes: ## Build production image with nodes installed + docker build -f Dockerfile.uv --build-arg INSTALL_NODES=true -t comfystream:uv-nodes . + +build-no-extras: ## Build production image without TensorRT and OpenCV CUDA (faster) + docker build -f Dockerfile.uv --build-arg INSTALL_EXTRA_PACKAGES=false -t comfystream:uv-lite . + +build-dev: ## Build development image (extends production, only rebuilds entrypoint layer) + docker build -f Dockerfile.uv.dev -t comfystream:uv-dev . + +build-dev-nodes: ## Build development image (extends production, only rebuilds entrypoint layer) + docker build -f Dockerfile.uv-nodes -t comfystream:uv-dev . + +run: ## Run built-in workspace (no mount) + docker run --gpus all -p $(PORT):8188 comfystream:uv + +run-dev: ## Run with mounted workspace using docker-compose + docker compose -f docker-compose.dev.yml up + +run-dev-live: ## Run with live entrypoint mounting (instant updates, no rebuild!) + docker run --gpus all -p $(PORT):8188 \ + -v $(COMFYUI_PATH):/workspace/ComfyUI \ + -v $$(pwd)/docker/entrypoint-byow.sh:/usr/local/bin/entrypoint.sh:ro \ + comfystream:uv + +run-dev-shell: ## Run with mounted workspace and drop into bash shell + docker run -it --gpus all \ + -v $(COMFYUI_PATH):/workspace/ComfyUI \ + -v $$(pwd)/docker/entrypoint-byow.sh:/usr/local/bin/entrypoint.sh:ro \ + comfystream:uv bash + +clean: ## Remove all comfystream images + docker rmi comfystream:uv comfystream:uv-dev comfystream:uv-lite || true + +# Override ComfyUI path: make run-dev-live COMFYUI_PATH=/path/to/ComfyUI +# Override port: make run PORT=8189 + diff --git a/README-BYOW.md b/README-BYOW.md new file mode 100644 index 00000000..f6a57bd2 --- /dev/null +++ b/README-BYOW.md @@ -0,0 +1,171 @@ +# Bring Your Own ComfyUI Workspace + +This Dockerfile (`Dockerfile.byow`) allows you to run ComfyStream with your own ComfyUI workspace, preserving all your custom nodes, models, and configurations. + +## Features + +- **Workspace Preservation**: Mount your existing ComfyUI workspace with all customizations +- **Automatic venv Management**: Creates and manages a `uv` virtual environment in your workspace +- **Flexible Setup**: Works with existing ComfyUI installations or initializes new ones +- **ComfyStream Integration**: Automatically installs the comfystream node +- **No File Dependencies**: Container doesn't require any files from the build context + +## Usage + +### Building the Image + +```bash +# Build the bring-your-own-workspace image +docker build -f Dockerfile.byow -t comfystream:byow . +``` + +### Running with Your Workspace + +#### Option 1: Mount Existing ComfyUI Workspace + +```bash +# Run with your existing ComfyUI workspace +docker run -it --rm \ + --gpus all \ + -p 8188:8188 \ + -v /path/to/your/comfyui:/workspace/ComfyUI \ + comfystream:byow +``` + +#### Option 2: Initialize New Workspace + +```bash +# Create a new directory for ComfyUI workspace +mkdir -p ~/comfyui-workspace + +# Run and let the container initialize ComfyUI +docker run -it --rm \ + --gpus all \ + -p 8188:8188 \ + -v ~/comfyui-workspace:/workspace/ComfyUI \ + comfystream:byow +``` + +#### Option 3: Development Mode with Shell Access + +```bash +# Run with shell access for development +docker run -it --rm \ + --gpus all \ + -p 8188:8188 \ + -v /path/to/your/comfyui:/workspace/ComfyUI \ + comfystream:byow \ + bash +``` + +### Advanced Usage + +#### Custom ComfyUI Arguments + +```bash +# Run ComfyUI with custom arguments +docker run -it --rm \ + --gpus all \ + -p 8188:8188 \ + -v /path/to/your/comfyui:/workspace/ComfyUI \ + comfystream:byow \ + python main.py --listen 0.0.0.0 --port 8188 --output-directory /workspace/ComfyUI/output +``` + +#### Running comfy-cli Commands + +```bash +# Install additional nodes +docker run -it --rm \ + --gpus all \ + -v /path/to/your/comfyui:/workspace/ComfyUI \ + comfystream:byow \ + comfy node install + +# Update ComfyUI +docker run -it --rm \ + --gpus all \ + -v /path/to/your/comfyui:/workspace/ComfyUI \ + comfystream:byow \ + comfy update +``` + +## How It Works + +1. **Container Initialization**: The container starts with a clean ComfyUI environment +2. **Workspace Detection**: Checks if the mounted volume contains a valid ComfyUI installation +3. **Environment Setup**: Creates or uses existing `.venv` in your workspace using `uv` +4. **Dependency Management**: All packages are installed in the workspace's virtual environment +5. **ComfyStream Integration**: Automatically installs the comfystream custom node +6. **Service Start**: Starts ComfyUI server or runs your specified command + +## Workspace Structure + +Your ComfyUI workspace should follow this structure: + +``` +/path/to/your/comfyui/ +├── .venv/ # Virtual environment (auto-created if missing) +├── custom_nodes/ # Your custom nodes +├── models/ # Your models +├── input/ # Input files +├── output/ # Generated outputs +├── main.py # ComfyUI main script +└── requirements.txt # Python dependencies (optional) +``` + +## Environment Variables + +The container sets these environment variables for optimal performance: + +- `PYTORCH_CUDA_ALLOC_CONF="backend:cudaMallocAsync,expandable_segments:True"` +- `UV_COMPILE_BYTECODE=1` - Compile Python bytecode for faster imports +- `UV_NO_CACHE=1` - Disable uv cache in container +- `UV_SYSTEM_PYTHON=1` - Use system Python with uv + +## Ports + +- `8188`: ComfyUI web interface and API + +## Health Check + +The container includes a health check that verifies the ComfyUI server is responding: + +```bash +curl -f http://localhost:8188/system_stats +``` + +## Troubleshooting + +### Permission Issues + +If you encounter permission issues, ensure your local ComfyUI directory has appropriate permissions: + +```bash +sudo chown -R $(whoami):$(whoami) /path/to/your/comfyui +chmod -R 755 /path/to/your/comfyui +``` + +### Virtual Environment Issues + +If the virtual environment setup fails, you can manually recreate it: + +```bash +# Remove existing venv and let container recreate it +rm -rf /path/to/your/comfyui/.venv + +# Run container again +docker run -it --rm --gpus all -p 8188:8188 -v /path/to/your/comfyui:/workspace/ComfyUI comfystream:byow +``` + +### Missing ComfyStream Node + +If the comfystream node isn't available, manually install it: + +```bash +docker run -it --rm \ + --gpus all \ + -v /path/to/your/comfyui:/workspace/ComfyUI \ + comfystream:byow \ + comfy node registry-install comfystream +``` diff --git a/README.md b/README.md index a9edf4be..7f38c42a 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # comfystream +> ⚠️ **NOTICE:** Active development of this project has moved to [the Livepeer fork](https://github.com/livepeer/comfystream). + comfystream is a package for running img2img [Comfy](https://www.comfy.org/) workflows on video streams. This repo also includes a WebRTC server and UI that uses comfystream to support streaming from a webcam and processing the stream with a workflow JSON file (API format) created in ComfyUI. If you have an existing ComfyUI installation, the same custom nodes used to create the workflow in ComfyUI will be re-used when processing the video stream. @@ -27,7 +29,7 @@ This repo also includes a WebRTC server and UI that uses comfystream to support Refer to [.devcontainer/README.md](.devcontainer/README.md) to setup ComfyStream in a devcontainer using a pre-configured ComfyUI docker environment. -For other installation options, refer to [Install ComfyUI and ComfyStream](https://docs.comfystream.org/technical/get-started/install) in the ComfyStream documentation. +For other installation options, refer to [Install ComfyUI and ComfyStream](https://pipelines.livepeer.org/docs/technical/install/local-testing) in the Livepeer pipelines documentation. For additional information, refer to the remaining sections below. @@ -35,7 +37,7 @@ For additional information, refer to the remaining sections below. You can quickly deploy ComfyStream using the docker image `livepeer/comfystream` -Refer to the documentation at [https://docs.comfystream.org/technical/get-started/install](https://docs.comfystream.org/technical/get-started/install) for instructions to run locally or on a remote server. +Refer to the documentation at [https://pipelines.livepeer.org/docs/technical/getting-started/install-comfystream](https://pipelines.livepeer.org/docs/technical/getting-started/install-comfystream) for instructions to run locally or on a remote server. #### RunPod diff --git a/configs/comfy-lock.yaml b/configs/comfy-lock.yaml new file mode 100755 index 00000000..04b8682d --- /dev/null +++ b/configs/comfy-lock.yaml @@ -0,0 +1,212 @@ +basic: + +models: + # Base models + - model: "Dreamshaper v8" + url: "https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16" + paths: + - path: "checkpoints/SD1.5/dreamshaper-8.safetensors" + type: "checkpoint" + + - model: "SD-Turbo" + url: "https://huggingface.co/stabilityai/sd-turbo/resolve/main/sd_turbo.safetensors" + paths: + - path: "checkpoints/SD1.5/sd_turbo.safetensors" + type: "checkpoint" + + # DMD models + - model: "Dreamshaper DMD" + url: "https://huggingface.co/aaronb/dreamshaper-8-dmd-1kstep/resolve/main/diffusion_pytorch_model.safetensors" + paths: + - path: "unet/dreamshaper-8-dmd-1kstep.safetensors" + - path: "unet/dreamshaper-8-dmd-1kstep.json" + type: "unet" + + # Depth Anything V2 ONNX models + - model: "DepthAnything ONNX" + url: "https://huggingface.co/yuvraj108c/Depth-Anything-2-Onnx/resolve/main/depth_anything_v2_vitb.onnx?download=true" + paths: + - path: "tensorrt/depth-anything/depth_anything_vitl14.onnx" + type: "onnx" + + - model: "DepthAnything V2 Large ONNX" + url: "https://huggingface.co/yuvraj108c/Depth-Anything-2-Onnx/resolve/main/depth_anything_v2_vitl.onnx?download=true" + paths: + - path: "tensorrt/depth-anything/depth_anything_v2_vitl.onnx" + type: "onnx" + + # TAESD models + - model: "TAESD" + url: "https://huggingface.co/madebyollin/taesd/resolve/main/taesd_decoder.safetensors" + paths: + - path: "vae_approx/taesd_decoder.safetensors" + - path: "vae_approx/taesd_encoder.safetensors" + type: "vae_approx" + + # ControlNet models + - model: "ControlNet Depth" + url: "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors" + paths: + - path: "controlnet/control_v11f1p_sd15_depth_fp16.safetensors" + type: "controlnet" + + - model: "ControlNet MediaPipe Face" + url: "https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/control_v2p_sd15_mediapipe_face.safetensors" + paths: + - path: "controlnet/control_v2p_sd15_mediapipe_face.safetensors" + type: "controlnet" + + - model: "outfitToOutfit_v20" + url: "https://huggingface.co/EmmaJohnson311/outfitToOutfit/resolve/main/outfitToOutfit_v20_sd15.safetensors" + paths: + - path: "controlnet/outfitToOutfit_v20.safetensors" + type: "controlnet" + + # Lora models + - model: "ral-polygon-sd15" + url: "https://huggingface.co/Livepeer-Studio/comfystream_loras/resolve/main/ral-polygon-sd15.safetensors" + paths: + - path: "loras/SD1.5/ral-polygon-sd15.safetensors" + type: "lora" + + - model: "ral-chrome-sd15" + url: "https://civitai.com/api/download/models/276570?type=Model&format=SafeTensor" + paths: + - path: "loras/SD1.5/ral-chrome-sd15.safetensors" + type: "lora" + + # Text encoder + - model: "ClipTextModel" + url: "https://huggingface.co/Lykon/dreamshaper-8/resolve/main/text_encoder/model.fp16.safetensors" + paths: + - path: "text_encoders/CLIPText/model.fp16.safetensors" + type: "text_encoder" + + # JoyVASA models for ComfyUI-FasterLivePortrait + - model: "JoyVASA Motion Generator" + url: "https://huggingface.co/jdh-algo/JoyVASA/resolve/main/motion_generator/motion_generator_hubert_chinese.pt?download=true" + paths: + - path: "liveportrait_onnx/joyvasa_models/motion_generator_hubert_chinese.pt" + type: "torch" + + - model: "JoyVASA Hubert Chinese" + url: "https://huggingface.co/TencentGameMate/chinese-hubert-base/resolve/main/chinese-hubert-base-fairseq-ckpt.pt?download=true" + paths: + - path: "liveportrait_onnx/joyvasa_models/chinese-hubert-base-fairseq-ckpt.pt" + type: "torch" + + - model: "JoyVASA Motion Template" + url: "https://huggingface.co/jdh-algo/JoyVASA/resolve/main/motion_template/motion_template.pkl?download=true" + paths: + - path: "liveportrait_onnx/joyvasa_models/motion_template.pkl" + type: "pickle" + + # LivePortrait ONNX models + - model: "WarpingSpadeModel" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/warping_spade-fix.onnx?download=true" + paths: + - path: "liveportrait_onnx/warping_spade-fix.onnx" + type: "onnx" + + - model: "MotionExtractorModel" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/motion_extractor.onnx?download=true" + paths: + - path: "liveportrait_onnx/motion_extractor.onnx" + type: "onnx" + + - model: "LandmarkModel" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/landmark.onnx?download=true" + paths: + - path: "liveportrait_onnx/landmark.onnx" + type: "onnx" + + - model: "FaceAnalysisModel - RetinaFace" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/retinaface_det_static.onnx?download=true" + paths: + - path: "liveportrait_onnx/retinaface_det_static.onnx" + type: "onnx" + + - model: "FaceAnalysisModel - 2DPose" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/face_2dpose_106_static.onnx?download=true" + paths: + - path: "liveportrait_onnx/face_2dpose_106_static.onnx" + type: "onnx" + + - model: "AppearanceFeatureExtractorModel" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/appearance_feature_extractor.onnx?download=true" + paths: + - path: "liveportrait_onnx/appearance_feature_extractor.onnx" + type: "onnx" + + - model: "StitchingModel" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/stitching.onnx?download=true" + paths: + - path: "liveportrait_onnx/stitching.onnx" + type: "onnx" + + - model: "StitchingModel (Eye Retargeting)" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/stitching_eye.onnx?download=true" + paths: + - path: "liveportrait_onnx/stitching_eye.onnx" + type: "onnx" + + - model: "StitchingModel (Lip Retargeting)" + url: "https://huggingface.co/warmshao/FasterLivePortrait/resolve/main/liveportrait_onnx/stitching_lip.onnx?download=true" + paths: + - path: "liveportrait_onnx/stitching_lip.onnx" + type: "onnx" + + # LLM/Vision models + - model: "Florence-2-base-ft" + url: "https://huggingface.co/microsoft/Florence-2-base-ft" + paths: + - path: "LLM/Florence-2-base-ft" + type: "llm" + + # SAM2 models + - model: "SAM2 Hiera Tiny" + url: "https://huggingface.co/facebook/sam2-hiera-tiny/resolve/main/sam2_hiera_tiny.pt" + paths: + - path: "sam2/sam2_hiera_tiny.pt" + type: "sam2" + +# compatible with ComfyUI-Manager's .yaml snapshot +custom_nodes: + git_custom_nodes: + "https://github.com/livepeer/comfystream.git": + disabled: false + "https://github.com/eliteprox/ComfyUI_TensorRT.git": + disabled: false + hash: "patch-1" + "https://github.com/yuvraj108c/ComfyUI-Depth-Anything-Tensorrt": + disabled: false + "https://github.com/pschroedl/ComfyUI-StreamDiffusion": + disabled: false + hash: "main" + "https://github.com/pschroedl/ComfyUI-FasterLivePortrait.git": + disabled: false + hash: "main" + "https://github.com/pschroedl/ComfyUI_RyanOnTheInside.git": + disabled: false + "https://github.com/ryanontheinside/ComfyUI-Misc-Effects.git": + disabled: false + "https://github.com/ryanontheinside/ComfyUI_RealTimeNodes.git": + disabled: false + "https://github.com/ad-astra-video/ComfyUI-Florence2-Vision.git": + disabled: false + "https://github.com/pschroedl/ComfyUI-SAM2-Realtime.git": + disabled: false + hash: "main" + "https://github.com/tsogzark/ComfyUI-load-image-from-url.git": + disabled: false + "https://github.com/yondonfu/ComfyUI-Torch-Compile": + disabled: false + "https://github.com/rgthree/rgthree-comfy.git": + disabled: false + "https://github.com/yondonfu/ComfyUI-Background-Edit": + disabled: false + "https://github.com/Fannovel16/comfyui_controlnet_aux": + disabled: false + "https://github.com/livepeer/ComfyUI-Stream-Pack": + disabled: false + hash: "main" diff --git a/configs/nodes.yaml b/configs/nodes.yaml index cf497e07..5db31b14 100644 --- a/configs/nodes.yaml +++ b/configs/nodes.yaml @@ -2,8 +2,8 @@ nodes: # Core TensorRT nodes comfyui-tensorrt: name: "ComfyUI TensorRT" - url: "https://github.com/yondonfu/ComfyUI_TensorRT.git" - branch: "quantization_with_controlnet_fixes" + url: "https://github.com/eliteprox/ComfyUI_TensorRT.git" + branch: "patch-1" type: "tensorrt" dependencies: - "tensorrt==10.12.0.36" @@ -37,7 +37,7 @@ nodes: name: "ComfyUI Misc Effects" url: "https://github.com/ryanontheinside/ComfyUI-Misc-Effects.git" type: "effects" - + comfyui-realtimenode: name: "ComfyUI RealTimeNodes" url: "https://github.com/ryanontheinside/ComfyUI_RealTimeNodes.git" @@ -49,7 +49,7 @@ nodes: name: "ComfyUI Florence2 Vision" url: "https://github.com/ad-astra-video/ComfyUI-Florence2-Vision.git" type: "vision" - + comfyui-sam2-realtime: name: "ComfyUI SAM2 Realtime" branch: "main" diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 00000000..3d23b817 --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,22 @@ +version: '3.8' + +services: + comfyui-dev: + image: comfystream:uv + runtime: nvidia + environment: + - NVIDIA_VISIBLE_DEVICES=all + - COMFYUI_DEV_MODE=1 + volumes: + # Mount your local ComfyUI workspace + - ~/ComfyUI:/workspace/ComfyUI + # Mount entrypoint for live editing (no rebuild needed!) + - ./docker/entrypoint-byow.sh:/usr/local/bin/entrypoint.sh:ro + # Optional: mount comfystream source for live development + # - ./src:/workspace/ComfyUI/custom_nodes/comfystream/src:ro + ports: + - "8188:8188" + stdin_open: true + tty: true + restart: unless-stopped + diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base index 878f4c9b..3aefc56a 100644 --- a/docker/Dockerfile.base +++ b/docker/Dockerfile.base @@ -12,7 +12,7 @@ ENV DEBIAN_FRONTEND=noninteractive \ CONDA_VERSION="${CONDA_VERSION}" \ PATH="/workspace/miniconda3/bin:${PATH}" \ PYTHON_VERSION="${PYTHON_VERSION}" - + # System dependencies RUN apt update && apt install -yqq --no-install-recommends \ git wget nano socat \ diff --git a/docker/README.md b/docker/README.md index ad691ace..6bc093a9 100644 --- a/docker/README.md +++ b/docker/README.md @@ -49,3 +49,40 @@ There are multiple options that can be passed to the Comfystream server. To see ```bash docker run --gpus all livepeer/comfystream --help ``` + +## Bring Your Own Workspace (BYOW) + +The `Dockerfile.uv` and `entrypoint-byow.sh` support mounting your own ComfyUI workspace, allowing you to persist your custom nodes, models, and configurations across container restarts. + +### Usage + +To mount your own workspace: + +```bash +docker run -it --gpus all -p 8188:8188 \ + -v ~/my-comfyui-workspace:/workspace/ComfyUI \ + comfystream:uv-nodes +``` + +### How It Works + +The entrypoint script automatically detects whether you're using a mounted workspace or the built-in workspace: + +1. **Built-in Workspace**: If the container finds a `.comfystream_builtin_workspace` marker file, it uses the pre-built workspace from the Docker image. + +2. **Mounted Workspace**: If no marker file is present, the script assumes you've mounted your own workspace and will: + - Create a Python virtual environment (`.venv`) in your workspace if it doesn't exist + - Install comfy-cli for workspace management + - Clone ComfyUI directly into your workspace (if not already present) to avoid nested directory issues + - Clone ComfyUI-Manager for custom node management + - Copy the `comfy-lock.yaml` configuration file + - Restore custom nodes from the lock file using comfy-cli + - Install comfystream as a custom node + +### Key Design Decisions + +- **Direct Git Clone**: The script uses `git clone` directly instead of `comfy install` to avoid creating nested `ComfyUI/ComfyUI` directories when the mount point is already `/workspace/ComfyUI`. + +- **Skip Requirements**: ComfyUI's `requirements.txt` is intentionally NOT installed. Dependencies are managed via `constraints.txt` and comfystream's requirements to ensure compatibility. + +- **Custom Node Management**: Custom nodes are managed through comfy-cli's snapshot restore feature using the `comfy-lock.yaml` file. diff --git a/docker/entrypoint-byow.sh b/docker/entrypoint-byow.sh new file mode 100644 index 00000000..2cef13cb --- /dev/null +++ b/docker/entrypoint-byow.sh @@ -0,0 +1,166 @@ +#!/bin/bash +set -e +export UV_LINK_MODE=copy + +# If a command was passed, run it instead of setting up ComfyUI workspace +if [[ $# -ne 0 ]]; then + exec "$@" +fi + +echo "Starting ComfyUI workspace setup..." + +# Detect if /workspace/ComfyUI is a mounted workspace +IS_MOUNTED_WORKSPACE=false +COMFYUI_PATH="/workspace/ComfyUI" +COMFYSTREAM_BUILTIN_PATH="/workspace/ComfyUI/custom_nodes/comfystream" + +# Check if the workspace appears to be mounted +# A built-in workspace will have a marker file created during Docker build +MARKER_FILE="$COMFYUI_PATH/.comfystream_builtin_workspace" + +if [[ -f "$MARKER_FILE" ]]; then + IS_MOUNTED_WORKSPACE=false + echo "Detected built-in workspace (marker file present)" +else + IS_MOUNTED_WORKSPACE=true + echo "Detected mounted workspace (no marker file)" +fi + +if [[ "$IS_MOUNTED_WORKSPACE" == "true" ]]; then + echo "=== Mounted Workspace Mode ===" + cd "$COMFYUI_PATH" + + # Activate the venv from mounted workspace + export VIRTUAL_ENV="$COMFYUI_PATH/.venv" + export PATH="$VIRTUAL_ENV/bin:$PATH" + export UV_NO_BUILD_ISOLATION=1 + export UV_OVERRIDES=/tmp/comfystream/constraints.txt + + FRESH_INSTALL=false + # Check if ComfyUI is already installed + if [[ ! -f "main.py" ]]; then + $FRESH_INSTALL=true + echo "ComfyUI not found in mounted directory, installing..." + echo "Cloning ComfyUI directly (comfy-cli would create nested directory)..." + + # Clone ComfyUI directly instead of using comfy-cli install + # This avoids the nested directory issue + git clone --branch v0.3.60 --depth 1 https://github.com/comfyanonymous/ComfyUI.git /tmp/ComfyUI + + # Move contents to current directory + shopt -s dotglob + mv /tmp/ComfyUI/* "$COMFYUI_PATH/" + rmdir /tmp/ComfyUI + + echo "ComfyUI cloned successfully!" + + # Clone ComfyUI-Manager + if [[ ! -d "custom_nodes/ComfyUI-Manager" ]]; then + echo "Installing ComfyUI-Manager..." + mkdir -p custom_nodes + git clone https://github.com/Comfy-Org/ComfyUI-Manager.git custom_nodes/ComfyUI-Manager + fi + fi + + # Check if .venv exists in mounted workspace, create if needed + if [[ ! -d ".venv" ]]; then + echo "Creating virtual environment in mounted workspace..." + uv venv .venv --python 3.12 + source "$VIRTUAL_ENV/bin/activate" + fi + + # Ensure pip is available in existing venv (in case it was created without pip) + if ! python -m pip --version &> /dev/null; then + echo "Pip not found in venv, installing..." + python -m ensurepip + python -m pip install --upgrade pip + fi + + # Install comfy-cli first if not available + if ! command -v comfy &> /dev/null; then + echo "Installing comfy-cli..." + source "$VIRTUAL_ENV/bin/activate" + uv pip install comfy-cli + comfy tracking disable + fi + + # Set this workspace as default for comfy-cli + if [[ $FRESH_INSTALL == true ]]; then + comfy --skip-prompt --workspace=$COMFYUI_PATH install --nvidia --skip-requirement + else + comfy --skip-prompt --workspace=$COMFYUI_PATH install --nvidia --restore + fi + comfy set-default "$COMFYUI_PATH" + + # Copy comfy-lock file if it doesn't exist in workspace + if [[ ! -f ".comfy-lock.yaml" ]] && [[ -f "/tmp/comfystream/comfy-lock.yaml" ]]; then + echo "Copying comfy-lock.yaml to workspace..." + cp /tmp/comfystream/comfy-lock.yaml .comfy-lock.yaml + fi + + # Note: ComfyUI requirements are intentionally NOT installed here + # Dependencies are managed via constraints.txt and comfystream requirements + + # Restore custom nodes from comfy-lock.yaml if it exists + if [[ -f ".comfy-lock.yaml" ]]; then + echo "Restoring custom nodes from comfy-lock.yaml..." + comfy node restore-snapshot .comfy-lock.yaml || echo "Warning: Some custom nodes may have failed to install" + fi + + echo "ComfyUI installation complete!" + + # Configure git to trust the mounted workspace directory + echo "Configuring git safe.directory for mounted workspace..." + git config --global --add safe.directory "$COMFYUI_PATH" + + # Also configure for custom nodes + for custom_node_dir in custom_nodes/*; do + if [[ -d "$custom_node_dir/.git" ]]; then + git config --global --add safe.directory "$COMFYUI_PATH/$custom_node_dir" + fi + done + + # Ensure comfystream is available in custom_nodes + if [[ ! -d "custom_nodes/comfystream" ]]; then + echo "Comfystream not found in custom_nodes, copying from built-in..." + mkdir -p custom_nodes + cp -r "$COMFYSTREAM_BUILTIN_PATH" custom_nodes/ + fi + + # Install comfystream in editable mode + if [[ -d "custom_nodes/comfystream" ]]; then + echo "Installing comfystream..." + cd custom_nodes/comfystream + uv pip install -e . + cd "$COMFYUI_PATH" + fi + + # Install comfystream requirements if needed + if [[ -f "custom_nodes/comfystream/requirements.txt" ]]; then + echo "Installing comfystream requirements..." + uv pip install -r custom_nodes/comfystream/requirements.txt + fi + + echo "Mounted workspace setup complete!" + +else + echo "=== Built-in Workspace Mode ===" + cd "$COMFYUI_PATH" + + # Use the pre-built venv + export VIRTUAL_ENV="$COMFYUI_PATH/.venv" + export PATH="$VIRTUAL_ENV/bin:$PATH" + export UV_NO_BUILD_ISOLATION=1 + comfy --skip-prompt set-default "$COMFYUI_PATH" + source "$VIRTUAL_ENV/bin/activate" + echo "Built-in workspace ready!" +fi + +# Set up bash completion for comfy-cli if not already done +if [[ ! -f ~/.local/share/bash-completion/completions/comfy ]]; then + comfy --install-completion 2>/dev/null || true +fi + + +echo "Starting ComfyUI server..." +exec comfy launch -- --listen 0.0.0.0 --port 8188 --front-end-version Comfy-Org/ComfyUI_frontend@v1.24.2 diff --git a/docker/install-torchaudio.sh b/docker/install-torchaudio.sh new file mode 100644 index 00000000..90930af8 --- /dev/null +++ b/docker/install-torchaudio.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -euo pipefail + +# Install torchaudio with the correct version matching the installed torch +echo "Installing torchaudio with matching torch version..." + +# Get torch version and CUDA version using Python +TORCH_INFO=$(python -c " +import torch +import re + +torch_version_full = torch.__version__ +torch_ver_match = re.match(r'(\d+\.\d+\.\d+)', torch_version_full) +if not torch_ver_match: + raise ValueError(f'Could not parse torch version from {torch_version_full}') + +torch_ver = torch_ver_match.group(1) +cuda_ver_tag = f'cu{torch.version.cuda.replace(\".\", \"\")}' + +print(f'{torch_ver}:{cuda_ver_tag}') +") + +# Parse the output +IFS=':' read -r TORCH_VER CUDA_VER_TAG <<< "$TORCH_INFO" + +echo "Detected torch version: $TORCH_VER" +echo "Detected CUDA version tag: $CUDA_VER_TAG" + +# Install torchaudio with the matching version +echo "Installing torchaudio==${TORCH_VER}+${CUDA_VER_TAG}..." +uv pip install --no-deps \ + "torchaudio==${TORCH_VER}+${CUDA_VER_TAG}" \ + --extra-index-url "https://download.pytorch.org/whl/${CUDA_VER_TAG}" + +echo "torchaudio installation completed successfully!" diff --git a/install.py b/install.py index 385d160f..9913fe52 100644 --- a/install.py +++ b/install.py @@ -9,6 +9,7 @@ import urllib.request import toml import zipfile +import shutil from comfy_compatibility.workspace import auto_patch_workspace_and_restart logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') @@ -32,12 +33,12 @@ def download_and_extract_ui_files(version: str): pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) base_url = urllib.parse.urljoin("https://github.com/livepeer/comfystream/releases/download/", f"v{version}/comfystream-uikit.zip") fallback_url = "https://github.com/livepeer/comfystream/releases/latest/download/comfystream-uikit.zip" - + # Create a temporary directory instead of a temporary file with tempfile.TemporaryDirectory() as temp_dir: # Define the path for the downloaded file download_path = os.path.join(temp_dir, "comfystream-uikit.zip") - + # Download zip file logger.info(f"Downloading {base_url}") try: @@ -53,7 +54,7 @@ def download_and_extract_ui_files(version: str): else: logger.error(f"Error downloading package: {e}") raise - + # Extract contents try: logger.info(f"Extracting files to {output_dir}") @@ -69,7 +70,7 @@ def download_and_extract_ui_files(version: str): "--workspace", default=os.environ.get('COMFY_UI_WORKSPACE', None), required=False, help="Set Comfy workspace" ) args = parser.parse_args() - + workspace = args.workspace if workspace is None: # Look up to 3 directories up for ComfyUI @@ -90,15 +91,21 @@ def download_and_extract_ui_files(version: str): current = os.path.dirname(current) logger.info("Installing comfystream package...") - subprocess.check_call([sys.executable, "-m", "pip", "install", "-e", "."]) + # Use uv pip if available, otherwise fall back to pip + if shutil.which("uv"): + logger.info("Using uv pip for installation") + subprocess.check_call(["uv", "pip", "install", "-e", "."]) + else: + logger.info("Using pip for installation") + subprocess.check_call([sys.executable, "-m", "pip", "install", "-e", "."]) if workspace is None: logger.warning("No ComfyUI workspace found. Please specify a valid workspace path to fully install") - + if workspace is not None: logger.info("Patching ComfyUI workspace...") auto_patch_workspace_and_restart(workspace) - + logger.info("Downloading and extracting UI files...") version = get_project_version(os.getcwd()) download_and_extract_ui_files(version) diff --git a/src/comfystream/scripts/constraints.txt b/src/comfystream/scripts/constraints.txt index 9d1bdccb..9b23203f 100644 --- a/src/comfystream/scripts/constraints.txt +++ b/src/comfystream/scripts/constraints.txt @@ -1,13 +1,14 @@ --extra-index-url https://download.pytorch.org/whl/cu128 ---extra-index-url https://pypi.nvidia.com +--extra-index-url https://download.pytorch.org/whl/xformers/ numpy<2.0.0 -torch==2.7.1+cu128 +torch==2.8.0+cu128 cuda-python<13.0 -torchvision==0.22.1+cu128 -torchaudio==2.7.1+cu128 +torchvision==0.23.0+cu128 +torchaudio==2.8.0+cu128 +xformers==0.0.32.post2 tensorrt==10.12.0.36 tensorrt-cu12==10.12.0.36 onnx==1.18.0 onnxruntime==1.22.0 onnxruntime-gpu==1.22.0 -onnxmltools==1.14.0 \ No newline at end of file +onnxmltools==1.14.0 diff --git a/src/comfystream/scripts/setup_nodes.py b/src/comfystream/scripts/setup_nodes.py index 418e55f8..8f793148 100755 --- a/src/comfystream/scripts/setup_nodes.py +++ b/src/comfystream/scripts/setup_nodes.py @@ -1,6 +1,7 @@ import os import subprocess import sys +import shutil from pathlib import Path import yaml import argparse @@ -29,6 +30,16 @@ def setup_environment(workspace_dir): os.environ["CUSTOM_NODES_PATH"] = str(workspace_dir / "custom_nodes") +def is_comfy_cli_available(): + """Check if comfy-cli is available on the system""" + return shutil.which("comfy") is not None + + +def get_package_manager(): + """Return the appropriate package manager (comfy-cli or pip)""" + return "comfy-cli" if is_comfy_cli_available() else "pip" + + def setup_directories(workspace_dir): """Create required directories in the workspace""" # Create base directories @@ -54,6 +65,10 @@ def install_custom_nodes(workspace_dir, config_path=None, pull_branches=False): custom_nodes_path.mkdir(parents=True, exist_ok=True) os.chdir(custom_nodes_path) + # Get the appropriate package manager + package_manager = get_package_manager() + print(f"Using package manager: {package_manager}") + # Get the absolute path to constraints.txt constraints_path = Path(__file__).parent / "constraints.txt" if not constraints_path.exists(): @@ -87,28 +102,44 @@ def install_custom_nodes(workspace_dir, config_path=None, pull_branches=False): subprocess.run(["git", "-C", dir_name, "fetch", "origin"], check=True) subprocess.run(["git", "-C", dir_name, "checkout", node_info["branch"]], check=True) - # Install requirements if present - requirements_file = node_path / "requirements.txt" - if requirements_file.exists(): - pip_cmd = [ - sys.executable, - "-m", - "pip", - "install", - "-r", - str(requirements_file), - ] - if constraints_path and constraints_path.exists(): - pip_cmd.extend(["-c", str(constraints_path)]) - subprocess.run(pip_cmd, check=True) - - # Install additional dependencies if specified - if "dependencies" in node_info: - for dep in node_info["dependencies"]: - pip_cmd = [sys.executable, "-m", "pip", "install", dep] + # Install the node using comfy-cli or pip + if package_manager == "comfy-cli": + if pull_branches and node_path.exists(): + # Use local mode for existing nodes (pull-branches scenario) + print(f"Installing {node_info['name']} in local mode...") + install_cmd = ["comfy", "node", "install", "--fast-deps", "--mode", "local", str(node_path)] + else: + # Use URL mode for new installations + print(f"Installing {node_info['name']} from URL...") + install_cmd = ["comfy", "node", "install", "--fast-deps", "--url", node_info["url"]] + if "branch" in node_info: + install_cmd.extend(["--branch", node_info["branch"]]) + + subprocess.run(install_cmd, check=True) + else: + # Fallback to pip installation for requirements and dependencies + # Install requirements if present + requirements_file = node_path / "requirements.txt" + if requirements_file.exists(): + install_cmd = [ + sys.executable, + "-m", + "pip", + "install", + "-r", + str(requirements_file), + ] if constraints_path and constraints_path.exists(): - pip_cmd.extend(["-c", str(constraints_path)]) - subprocess.run(pip_cmd, check=True) + install_cmd.extend(["-c", str(constraints_path)]) + subprocess.run(install_cmd, check=True) + + # Install additional dependencies if specified + if "dependencies" in node_info: + for dep in node_info["dependencies"]: + install_cmd = [sys.executable, "-m", "pip", "install", dep] + if constraints_path and constraints_path.exists(): + install_cmd.extend(["-c", str(constraints_path)]) + subprocess.run(install_cmd, check=True) print(f"Installed {node_info['name']}") except Exception as e: