diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 1c184f5c..40f13467 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,6 +2,10 @@ // README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu { "name": "ComfyStream", + // Commented out to use the build context instead + // TODO: Update this to the correct image name + // when using images other than livepeer/comfystream:latest + // "image": "livepeer/comfystream:streamdiffusion", "build": { "dockerfile": "../docker/Dockerfile", "context": "../" diff --git a/configs/QUICK_REFERENCE.md b/configs/QUICK_REFERENCE.md new file mode 100644 index 00000000..a4a12481 --- /dev/null +++ b/configs/QUICK_REFERENCE.md @@ -0,0 +1,74 @@ +# Quick Reference: Model Configuration + +## Single File vs Directory Download + +### Single File (Default) +```yaml +my-model: + name: "My Model" + url: "https://huggingface.co/user/repo/resolve/main/file.safetensors" + path: "loras/model.safetensors" +``` + +### Directory (Add `is_directory: true`) +```yaml +my-directory: + name: "My Directory" + url: "https://huggingface.co/user/repo/tree/main/folder" + path: "models/folder" + is_directory: true # ← Add this! +``` + +## URL Patterns + +| Download Type | URL Pattern | Example | +|---------------|-------------|---------| +| **Single File** | `/resolve/` | `https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.safetensors` | +| **Directory** | `/tree/` | `https://huggingface.co/h94/IP-Adapter/tree/main/models/image_encoder` | + +## Common Model Paths + +| Model Type | Path Pattern | +|------------|--------------| +| Checkpoints | `checkpoints/SD1.5/` | +| LoRAs | `loras/SD1.5/` | +| ControlNet | `controlnet/` | +| VAE | `vae/` or `vae_approx/` | +| IP-Adapter | `ipadapter/` | +| Text Encoders | `text_encoders/CLIPText/` | +| TensorRT/ONNX | `tensorrt/` | + +## IP-Adapter Example + +```yaml +models: + # Single file - IP-Adapter model + ip-adapter-sd15: + name: "IP Adapter SD15" + url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.safetensors" + path: "ipadapter/ip-adapter_sd15.safetensors" + + # Directory - CLIP image encoder + clip-image-encoder: + name: "CLIP Image Encoder" + url: "https://huggingface.co/h94/IP-Adapter/tree/main/models/image_encoder" + path: "ipadapter/models/image_encoder" + is_directory: true +``` + +## Usage + +```bash +# Use a config +python src/comfystream/scripts/setup_models.py --config my-config.yaml + +# Use default config (models.yaml) +python src/comfystream/scripts/setup_models.py +``` + +## See Also + +- [DIRECTORY_DOWNLOADS.md](../DIRECTORY_DOWNLOADS.md) - Detailed directory download guide +- [models-ipadapter-example.yaml](models-ipadapter-example.yaml) - Complete working example +- [README.md](README.md) - Full configuration reference + diff --git a/configs/build_targets.yaml b/configs/build_targets.yaml new file mode 100644 index 00000000..6836ee95 --- /dev/null +++ b/configs/build_targets.yaml @@ -0,0 +1,17 @@ +# build_targets.yaml +# Maps node names to their build scripts and config files. +# Add new nodes as needed. + +streamdiffusion: + script: /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/scripts/build_tensorrt_engines.py + configs: + - /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/configs/sd15_singlecontrol.yaml + - /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/configs/sdturbo_multicontrol.yaml + folder: /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion/scripts + +# Example for another node: +# depthanything: +# script: /workspace/ComfyUI/custom_nodes/ComfyUI-DepthAnything/scripts/build_depthanything_engine.py +# configs: +# - /workspace/ComfyUI/custom_nodes/ComfyUI-DepthAnything/configs/depthanything.yaml +# folder: /workspace/ComfyUI/custom_nodes/ComfyUI-DepthAnything/scripts diff --git a/configs/models-ipadapter.yaml b/configs/models-ipadapter.yaml new file mode 100644 index 00000000..466a149f --- /dev/null +++ b/configs/models-ipadapter.yaml @@ -0,0 +1,44 @@ +models: + # Example: IP-Adapter setup with directory download + + # Single file download (regular) + ip-adapter-plus-sd15: + name: "IP Adapter SD15" + url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.safetensors" + path: "ipadapter/ip-adapter-plus_sd15.safetensors" + type: "ipadapter" + extra_files: + - url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.bin" + path: "ipadapter/ip-adapter-plus_sd15.bin" + + clip-image-encoder: + name: "CLIP Image Encoder" + url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors" + path: "ipadapter/image_encoder/model.safetensors" + type: "image_encoder" + extra_files: + - url: "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/config.json" + path: "ipadapter/image_encoder/config.json" + + # Base model + sd-turbo: + name: "SD-Turbo" + url: "https://huggingface.co/stabilityai/sd-turbo/resolve/main/sd_turbo.safetensors" + path: "checkpoints/SD1.5/sd_turbo.safetensors" + type: "checkpoint" + + PixelArtRedmond15V-PixelArt-PIXARFK.safetensors: + name: "PixelArtRedmond15V-PixelArt-PIXARFK" + url: "https://huggingface.co/artificialguybr/pixelartredmond-1-5v-pixel-art-loras-for-sd-1-5/resolve/ab43d9e2cf8c9240189f01e9cdc4ca341362500c/PixelArtRedmond15V-PixelArt-PIXARFK.safetensors" + path: "loras/SD1.5/PixelArt.safetensors" + type: "lora" + + # TAESD for fast VAE + taesd: + name: "TAESD" + url: "https://huggingface.co/madebyollin/taesd/resolve/main/taesd_decoder.safetensors" + path: "vae_approx/taesd_decoder.safetensors" + type: "vae_approx" + extra_files: + - url: "https://huggingface.co/madebyollin/taesd/resolve/main/taesd_encoder.safetensors" + path: "vae_approx/taesd_encoder.safetensors" diff --git a/configs/nodes-streamdiffusion.yaml b/configs/nodes-streamdiffusion.yaml new file mode 100644 index 00000000..94fda0d1 --- /dev/null +++ b/configs/nodes-streamdiffusion.yaml @@ -0,0 +1,36 @@ +nodes: + # Minimal node configuration for faster builds + comfyui-tensorrt: + name: "ComfyUI TensorRT" + url: "https://github.com/yondonfu/ComfyUI_TensorRT.git" + branch: "quantization_with_controlnet_fixes" + type: "tensorrt" + dependencies: + - "tensorrt==10.12.0.36" + + comfyui-streamdiffusion: + name: "ComfyUI StreamDiffusion" + url: "https://github.com/muxionlabs/ComfyUI-StreamDiffusion" + branch: "main" + type: "tensorrt" + + comfyui-torch-compile: + name: "ComfyUI Torch Compile" + url: "https://github.com/yondonfu/ComfyUI-Torch-Compile" + type: "tensorrt" + + comfyui_controlnet_aux: + name: "ComfyUI ControlNet Auxiliary" + url: "https://github.com/Fannovel16/comfyui_controlnet_aux" + type: "controlnet" + + comfyui-stream-pack: + name: "ComfyUI Stream Pack" + url: "https://github.com/livepeer/ComfyUI-Stream-Pack" + branch: "main" + type: "utility" + + rgthree-comfy: + name: "rgthree Comfy" + url: "https://github.com/rgthree/rgthree-comfy.git" + type: "utility" diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base index 5bf6fb05..96f1cc2f 100644 --- a/docker/Dockerfile.base +++ b/docker/Dockerfile.base @@ -1,11 +1,13 @@ ARG BASE_IMAGE=nvidia/cuda:12.8.1-devel-ubuntu22.04 \ CONDA_VERSION=latest \ - PYTHON_VERSION=3.12 + PYTHON_VERSION=3.12 \ + NODES_CONFIG=nodes.yaml FROM "${BASE_IMAGE}" ARG CONDA_VERSION \ - PYTHON_VERSION + PYTHON_VERSION \ + NODES_CONFIG ENV DEBIAN_FRONTEND=noninteractive \ TensorRT_ROOT=/opt/TensorRT-10.12.0.36 \ @@ -56,6 +58,9 @@ RUN apt-get remove --purge -y libcudnn9-cuda-12 libcudnn9-dev-cuda-12 || true && # to ensure numpy 2.0 is not installed automatically by another package RUN conda run -n comfystream --no-capture-output pip install "numpy<2.0.0" +# Ensure modelopt pulls a compatible transformers version for HF support +RUN conda run -n comfystream --no-capture-output pip install 'nvidia-modelopt[hf]' + # Install cuDNN 9.8 via conda to match base system version # Caution: Mixed versions installed in environment (system/python) can cause CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH errors RUN conda install -n comfystream -y -c nvidia -c conda-forge cudnn=9.8 cuda-version=12.8 @@ -105,8 +110,8 @@ RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream py ARG CACHEBUST=static ENV CACHEBUST=${CACHEBUST} -# Run setup_nodes -RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream python src/comfystream/scripts/setup_nodes.py --workspace /workspace/ComfyUI +# Run setup_nodes with custom config if specified +RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream python src/comfystream/scripts/setup_nodes.py --workspace /workspace/ComfyUI --config ${NODES_CONFIG} # Setup opencv with CUDA support RUN conda run -n comfystream --no-capture-output --cwd /workspace/comfystream --no-capture-output docker/entrypoint.sh --opencv-cuda diff --git a/docker/README.md b/docker/README.md index ad691ace..cabb2fd4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,4 +1,4 @@ -# ComfyStream Docker +# ComfyStream Docker Build Configuration This folder contains the Docker files that can be used to run ComfyStream in a containerized fashion or to work on the codebase within a dev container. This README contains the general usage instructions while the [Devcontainer Readme](../.devcontainer/README.md) contains instructions on how to use Comfystream inside a dev container and get quickly started with your development journey. @@ -7,21 +7,48 @@ This folder contains the Docker files that can be used to run ComfyStream in a c - [Dockerfile](Dockerfile) - The main Dockerfile that can be used to run ComfyStream in a containerized fashion. - [Dockerfile.base](Dockerfile.base) - The base Dockerfile that can be used to build the base image for ComfyStream. -## Pre-requisites +## Building with Custom Nodes Configuration -- [Docker](https://docs.docker.com/get-docker/) -- [Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) +The base Docker image supports specifying a custom nodes configuration file during build time using the `NODES_CONFIG` build argument. -## Usage +### Usage -### Build the Base Image +#### Default build (uses `nodes.yaml`) +```bash +docker build -t livepeer/comfyui-base -f docker/Dockerfile . +``` -To build the base image, run the following command: +#### Build with custom config from configs directory +```bash +docker build -f docker/Dockerfile.base \ + --build-arg NODES_CONFIG=nodes-streamdiffusion.yaml \ + -t comfyui-base:streamdiffusion . +``` +#### Build with config from absolute path ```bash -docker build -t livepeer/comfyui-base -f docker/Dockerfile.base . +docker build -f docker/Dockerfile.base \ + --build-arg NODES_CONFIG=/path/to/custom-nodes.yaml \ + -t comfyui-base:custom . ``` +### Available Build Arguments + +| Argument | Default | Description | +|----------|---------|-------------| +| `BASE_IMAGE` | `nvidia/cuda:12.8.1-cudnn-devel-ubuntu22.04` | Base CUDA image | +| `CONDA_VERSION` | `latest` | Miniconda version | +| `PYTHON_VERSION` | `3.12` | Python version | +| `NODES_CONFIG` | `nodes.yaml` | Nodes configuration file (filename or path) | +| `CACHEBUST` | `static` | Cache invalidation for node setup | + +### Configuration Files in configs/ + +- **`nodes.yaml`** - Full node configuration (default) +- **`nodes-streamdiffusion.yaml`** - Minimal set of nodes for faster builds + +### Examples + ### Build the Main Image To build the main image, run the following command: diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index e6d44463..f3101859 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -86,6 +86,10 @@ DEPTH_ANYTHING_DIR="${TENSORRT_DIR}/depth-anything" DEPTH_ANYTHING_ENGINE="depth_anything_vitl14-fp16.engine" DEPTH_ANYTHING_ENGINE_LARGE="depth_anything_v2_vitl-fp16.engine" FASTERLIVEPORTRAIT_DIR="/workspace/ComfyUI/models/liveportrait_onnx" +DEPTH_ANYTHING_NODE_DIR="/workspace/ComfyUI/custom_nodes/ComfyUI-Depth-Anything-Tensorrt" +DEPTH_ANYTHING_EXPORT_SCRIPT="${DEPTH_ANYTHING_NODE_DIR}/export_trt.py" +FASTERLIVEPORTRAIT_NODE_DIR="/workspace/ComfyUI/custom_nodes/ComfyUI-FasterLivePortrait" +FASTERLIVEPORTRAIT_BUILD_SCRIPT="${FASTERLIVEPORTRAIT_NODE_DIR}/scripts/build_fasterliveportrait_trt.sh" if [ "$1" = "--build-engines" ]; then cd /workspace/comfystream @@ -111,49 +115,75 @@ if [ "$1" = "--build-engines" ]; then --max-width 448 \ --max-height 704 - # Build Engine for Depth Anything V2 - if [ ! -f "$DEPTH_ANYTHING_DIR/$DEPTH_ANYTHING_ENGINE" ]; then - if [ ! -d "$DEPTH_ANYTHING_DIR" ]; then - mkdir -p "$DEPTH_ANYTHING_DIR" + # Build Engine for Depth Anything V2 (guarded by custom node) + if [ -d "$DEPTH_ANYTHING_NODE_DIR" ] && [ -f "$DEPTH_ANYTHING_EXPORT_SCRIPT" ]; then + if [ ! -f "$DEPTH_ANYTHING_DIR/$DEPTH_ANYTHING_ENGINE" ]; then + if [ ! -d "$DEPTH_ANYTHING_DIR" ]; then + mkdir -p "$DEPTH_ANYTHING_DIR" + fi + cd "$DEPTH_ANYTHING_DIR" + python "$DEPTH_ANYTHING_EXPORT_SCRIPT" + else + echo "Engine for DepthAnything2 already exists at ${DEPTH_ANYTHING_DIR}/${DEPTH_ANYTHING_ENGINE}, skipping..." fi - cd "$DEPTH_ANYTHING_DIR" - python /workspace/ComfyUI/custom_nodes/ComfyUI-Depth-Anything-Tensorrt/export_trt.py - else - echo "Engine for DepthAnything2 already exists at ${DEPTH_ANYTHING_DIR}/${DEPTH_ANYTHING_ENGINE}, skipping..." - fi - # Build Engine for Depth Anything2 (large) - if [ ! -f "$DEPTH_ANYTHING_DIR/$DEPTH_ANYTHING_ENGINE_LARGE" ]; then - cd "$DEPTH_ANYTHING_DIR" - python /workspace/ComfyUI/custom_nodes/ComfyUI-Depth-Anything-Tensorrt/export_trt.py --trt-path "${DEPTH_ANYTHING_DIR}/${DEPTH_ANYTHING_ENGINE_LARGE}" --onnx-path "${DEPTH_ANYTHING_DIR}/depth_anything_v2_vitl.onnx" + # Build Engine for Depth Anything2 (large) + if [ ! -f "$DEPTH_ANYTHING_DIR/$DEPTH_ANYTHING_ENGINE_LARGE" ]; then + cd "$DEPTH_ANYTHING_DIR" + python "$DEPTH_ANYTHING_EXPORT_SCRIPT" --trt-path "${DEPTH_ANYTHING_DIR}/${DEPTH_ANYTHING_ENGINE_LARGE}" --onnx-path "${DEPTH_ANYTHING_DIR}/depth_anything_v2_vitl.onnx" + else + echo "Engine for DepthAnything2 (large) already exists at ${DEPTH_ANYTHING_DIR}/${DEPTH_ANYTHING_ENGINE_LARGE}, skipping..." + fi else - echo "Engine for DepthAnything2 (large) already exists at ${DEPTH_ANYTHING_DIR}/${DEPTH_ANYTHING_ENGINE_LARGE}, skipping..." + echo "DepthAnything custom node not found at ${DEPTH_ANYTHING_NODE_DIR}, skipping engine build." fi - # Build Engines for FasterLivePortrait - if [ ! -f "$FASTERLIVEPORTRAIT_DIR/warping_spade-fix.trt" ]; then - cd "$FASTERLIVEPORTRAIT_DIR" - bash /workspace/ComfyUI/custom_nodes/ComfyUI-FasterLivePortrait/scripts/build_fasterliveportrait_trt.sh "${FASTERLIVEPORTRAIT_DIR}" "${FASTERLIVEPORTRAIT_DIR}" "${FASTERLIVEPORTRAIT_DIR}" + # Build Engines for FasterLivePortrait (guarded by custom node) + if [ -d "$FASTERLIVEPORTRAIT_NODE_DIR" ] && [ -f "$FASTERLIVEPORTRAIT_BUILD_SCRIPT" ]; then + if [ ! -f "$FASTERLIVEPORTRAIT_DIR/warping_spade-fix.trt" ]; then + cd "$FASTERLIVEPORTRAIT_DIR" + bash "$FASTERLIVEPORTRAIT_BUILD_SCRIPT" "${FASTERLIVEPORTRAIT_DIR}" "${FASTERLIVEPORTRAIT_DIR}" "${FASTERLIVEPORTRAIT_DIR}" + else + echo "Engines for FasterLivePortrait already exists, skipping..." + fi else - echo "Engines for FasterLivePortrait already exists, skipping..." + echo "ComfyUI-FasterLivePortrait custom node not found at ${FASTERLIVEPORTRAIT_NODE_DIR}, skipping engine build." fi - # Build Engine for StreamDiffusion - if [ ! -f "$TENSORRT_DIR/StreamDiffusion-engines/stabilityai/sd-turbo--lcm_lora-True--tiny_vae-True--max_batch-3--min_batch-3--mode-img2img/unet.engine.opt.onnx" ]; then - cd /workspace/ComfyUI/custom_nodes/ComfyUI-StreamDiffusion - MODELS="stabilityai/sd-turbo KBlueLeaf/kohaku-v2.1" - TIMESTEPS="3" - for model in $MODELS; do - for timestep in $TIMESTEPS; do - echo "Building model=$model with timestep=$timestep" - python build_tensorrt.py \ - --model-id "$model" \ - --timesteps "$timestep" \ - --engine-dir $TENSORRT_DIR/StreamDiffusion-engines - done - done - else - echo "Engine for StreamDiffusion already exists, skipping..." + + # Build Engine for StreamDiffusion using build_targets.yaml (dynamic, robust) + BUILD_TARGETS_FILE="/workspace/comfystream/configs/build_targets.yaml" + if [ ! -f "$BUILD_TARGETS_FILE" ]; then + echo "build_targets.yaml not found at $BUILD_TARGETS_FILE. Skipping StreamDiffusion engine builds." + else + python3 - <<'EOF' +import os +import yaml +import subprocess + +with open("/workspace/comfystream/configs/build_targets.yaml", "r") as f: + targets = yaml.safe_load(f) + +info = targets.get("streamdiffusion") +if info: + folder = info.get("folder") + script = info.get("script") + configs = info.get("configs", []) + if folder and os.path.isdir(folder) and script and os.path.isfile(script): + for config in configs: + if os.path.isfile(config): + print(f"Building streamdiffusion engine with config: {config}") + try: + subprocess.run(["python", script, "--config", config], check=True) + except subprocess.CalledProcessError as e: + print(f"Error building engine for config {config}: {e}") + else: + print(f"Warning: Config {config} for streamdiffusion not found, skipping...") + else: + print(f"Skipping streamdiffusion: required folder or script not found.") +else: + print("streamdiffusion not found in build_targets.yaml, skipping...") +EOF fi shift fi @@ -161,7 +191,7 @@ fi if [ "$1" = "--opencv-cuda" ]; then cd /workspace/comfystream conda activate comfystream - + # Check if OpenCV CUDA build already exists if [ ! -f "/workspace/comfystream/opencv-cuda-release.tar.gz" ]; then # Download and extract OpenCV CUDA build @@ -183,7 +213,7 @@ if [ "$1" = "--opencv-cuda" ]; then # Handle library dependencies CONDA_ENV_LIB="/workspace/miniconda3/envs/comfystream/lib" - + # Remove existing libstdc++ and copy system one rm -f "${CONDA_ENV_LIB}/libstdc++.so"* cp /usr/lib/x86_64-linux-gnu/libstdc++.so* "${CONDA_ENV_LIB}/" @@ -206,22 +236,24 @@ if [ "$START_COMFYUI" = true ] || [ "$START_API" = true ] || [ "$START_UI" = tru # Start supervisord in background /usr/bin/supervisord -c /etc/supervisor/supervisord.conf & sleep 2 # Give supervisord time to start - + # Start requested services if [ "$START_COMFYUI" = true ]; then supervisorctl -c /etc/supervisor/supervisord.conf start comfyui fi - + if [ "$START_API" = true ]; then supervisorctl -c /etc/supervisor/supervisord.conf start comfystream-api fi - + if [ "$START_UI" = true ]; then supervisorctl -c /etc/supervisor/supervisord.conf start comfystream-ui fi - + # Keep the script running tail -f /var/log/supervisord.log fi + + exec "$@" diff --git a/src/comfystream/scripts/README.md b/src/comfystream/scripts/README.md index 1d95d49f..e593a376 100644 --- a/src/comfystream/scripts/README.md +++ b/src/comfystream/scripts/README.md @@ -22,11 +22,23 @@ python src/comfystream/scripts/setup_nodes.py --workspace /path/to/comfyui ``` > The optional flag `--pull-branches` can be used to ensure the latest git changes are pulled for any custom nodes defined with a `branch` in nodes.yaml +#### Using a custom nodes configuration +```bash +python src/comfystream/scripts/setup_nodes.py --workspace /path/to/comfyui --config nodes-streamdiffusion.yaml +``` +> The `--config` flag accepts a filename (searches in `configs/`), relative path, or absolute path to a custom nodes configuration file + ### Download models and compile tensorrt engines ```bash python src/comfystream/scripts/setup_models.py --workspace /path/to/comfyui ``` +#### Using a custom models configuration +```bash +python src/comfystream/scripts/setup_models.py --workspace /path/to/comfyui --config models-minimal.yaml +``` +> The `--config` flag accepts a filename (searches in `configs/`), relative path, or absolute path to a custom models configuration file + ## Configuration Examples ### Custom Nodes (nodes.yaml) @@ -55,6 +67,10 @@ models: type: "checkpoint" ``` +> You can create custom model configurations for different use cases. See `configs/models-minimal.yaml` and `configs/models-pixelart.yaml` for examples. + +**Directory Downloads:** The script now supports downloading entire directories from HuggingFace! Add `is_directory: true` to your config. See `configs/models-ipadapter-example.yaml` for examples or read [DIRECTORY_DOWNLOADS.md](../../../DIRECTORY_DOWNLOADS.md) for the full guide. + ## Directory Structure ```sh diff --git a/src/comfystream/scripts/constraints.txt b/src/comfystream/scripts/constraints.txt index 3de1d200..54bd7cb6 100644 --- a/src/comfystream/scripts/constraints.txt +++ b/src/comfystream/scripts/constraints.txt @@ -8,7 +8,9 @@ tensorrt==10.12.0.36 tensorrt-cu12==10.12.0.36 xformers==0.0.32.post2 onnx==1.18.0 -onnxruntime==1.22.0 -onnxruntime-gpu==1.22.0 +onnxruntime>=1.22.0 +onnxruntime-gpu>=1.22.0 onnxmltools==1.14.0 cuda-python<13.0 +huggingface-hub>=0.20.0 +mediapipe==0.10.21 diff --git a/src/comfystream/scripts/setup_models.py b/src/comfystream/scripts/setup_models.py index 50a186f4..b607bdd9 100644 --- a/src/comfystream/scripts/setup_models.py +++ b/src/comfystream/scripts/setup_models.py @@ -1,5 +1,6 @@ import argparse import os +import sys from pathlib import Path import requests @@ -7,6 +8,13 @@ from tqdm import tqdm from utils import get_config_path, load_model_config +try: + from huggingface_hub import snapshot_download, hf_hub_download + HF_HUB_AVAILABLE = True +except ImportError: + HF_HUB_AVAILABLE = False + print("Warning: huggingface_hub not installed. Directory downloads from HuggingFace will not be available.") + def parse_args(): parser = argparse.ArgumentParser(description="Setup ComfyUI models") @@ -15,6 +23,9 @@ def parse_args(): default=os.environ.get("COMFY_UI_WORKSPACE", os.path.expanduser("~/comfyui")), help="ComfyUI workspace directory (default: ~/comfyui or $COMFY_UI_WORKSPACE)", ) + parser.add_argument('--config', + default=None, + help='Path to custom models config file (default: configs/models.yaml). Can be a filename (searches in configs/), or an absolute/relative path.') return parser.parse_args() @@ -48,6 +59,29 @@ def download_file(url, destination, description=None): destination.unlink() raise ValueError(f"LFS pointer detected. Failed to download: {url}") +def download_hf_directory(repo_id, subfolder, destination, description=None): + """Download an entire directory from HuggingFace Hub""" + if not HF_HUB_AVAILABLE: + raise RuntimeError("huggingface_hub is required for directory downloads. Install with: pip install huggingface_hub") + + destination = Path(destination) + destination.mkdir(parents=True, exist_ok=True) + + desc = description or f"Downloading {repo_id}/{subfolder}" + print(f"{desc}...") + + try: + # Download the specific subfolder to the destination + snapshot_download( + repo_id=repo_id, + allow_patterns=f"{subfolder}/*", + local_dir=destination.parent, + local_dir_use_symlinks=False + ) + print(f"✓ Downloaded {repo_id}/{subfolder} to {destination}") + except Exception as e: + print(f"❌ Error downloading {repo_id}/{subfolder}: {e}") + raise def setup_model_files(workspace_dir, config_path=None): """Download and setup required model files based on configuration""" @@ -74,8 +108,38 @@ def setup_model_files(workspace_dir, config_path=None): if not full_path.exists(): print(f"Downloading {model_info['name']}...") - download_file(model_info["url"], full_path, f"Downloading {model_info['name']}") - print(f"Downloaded {model_info['name']} to {full_path}") + + # Check if this is a HuggingFace directory download + if model_info.get('is_directory', False): + # Parse HuggingFace URL to extract repo_id and subfolder + # Format: https://huggingface.co/{repo_id}/tree/main/{subfolder} + # Or: https://huggingface.co/{repo_id}/blob/main/{subfolder} + url = model_info['url'] + if 'huggingface.co' in url: + parts = url.split('huggingface.co/')[-1].split('/') + if len(parts) >= 4 and (parts[2] in ['tree', 'blob']): + repo_id = f"{parts[0]}/{parts[1]}" + subfolder = '/'.join(parts[4:]) if len(parts) > 4 else parts[3] + download_hf_directory( + repo_id=repo_id, + subfolder=subfolder, + destination=full_path, + description=f"Downloading {model_info['name']}" + ) + else: + print(f"❌ Invalid HuggingFace URL format: {url}") + continue + else: + print(f"❌ Directory download only supports HuggingFace URLs: {url}") + continue + else: + # Regular file download + download_file( + model_info['url'], + full_path, + f"Downloading {model_info['name']}" + ) + print(f"Downloaded {model_info['name']} to {full_path}") # Handle any extra files (like configs) if "extra_files" in model_info: @@ -112,8 +176,13 @@ def setup_directories(workspace_dir): "checkpoints/SD1.5", "controlnet", "vae", + "vae_approx", "tensorrt", "unet", + "loras/SD1.5", + "ipadapter", + "text_encoders/CLIPText", + "liveportrait_onnx/joyvasa_models", "LLM", ] for dir_name in model_dirs: @@ -124,9 +193,19 @@ def setup_directories(workspace_dir): def setup_models(): args = parse_args() workspace_dir = Path(args.workspace) + + # Resolve config path if provided + config_path = None + if args.config: + config_path = Path(args.config) + # If it's just a filename, look in configs directory + if not config_path.is_absolute() and "/" not in str(config_path): + config_path = Path("configs") / config_path + if not config_path.exists(): + print(f"Error: Config file not found at {config_path}") + sys.exit(1) setup_directories(workspace_dir) setup_model_files(workspace_dir) - setup_models() diff --git a/src/comfystream/scripts/setup_nodes.py b/src/comfystream/scripts/setup_nodes.py index 2aca1077..9bacc61a 100755 --- a/src/comfystream/scripts/setup_nodes.py +++ b/src/comfystream/scripts/setup_nodes.py @@ -21,6 +21,11 @@ def parse_args(): default=False, help="Update existing nodes to their specified branches", ) + parser.add_argument( + "--config", + default=None, + help="Path to custom nodes config file (default: configs/nodes.yaml). Can be a filename (searches in configs/), or an absolute/relative path.", + ) return parser.parse_args() @@ -122,10 +127,21 @@ def install_custom_nodes(workspace_dir, config_path=None, pull_branches=False): def setup_nodes(): args = parse_args() workspace_dir = Path(args.workspace) + + # Resolve config path if provided + config_path = None + if args.config: + config_path = Path(args.config) + # If it's just a filename, look in configs directory + if not config_path.is_absolute() and "/" not in str(config_path): + config_path = Path("configs") / config_path + if not config_path.exists(): + print(f"Error: Config file not found at {config_path}") + sys.exit(1) setup_environment(workspace_dir) setup_directories(workspace_dir) - install_custom_nodes(workspace_dir, pull_branches=args.pull_branches) + install_custom_nodes(workspace_dir, config_path=config_path, pull_branches=args.pull_branches) if __name__ == "__main__":