Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 91 additions & 0 deletions .github/workflows/backend.yml
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,19 @@ jobs:
context: "./backend"
ubuntu-version: '2204'
# CUDA 12 builds
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vibevoice'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
Expand Down Expand Up @@ -407,6 +420,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13-vibevoice'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
Expand Down Expand Up @@ -459,6 +485,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-cuda-13-arm64-vibevoice'
runs-on: 'ubuntu-24.04-arm'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
ubuntu-version: '2404'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'l4t'
cuda-major-version: "13"
cuda-minor-version: "0"
Expand Down Expand Up @@ -669,6 +708,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-vibevoice'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
skip-drivers: 'false'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
Expand Down Expand Up @@ -787,6 +839,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-vibevoice'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
Expand Down Expand Up @@ -827,6 +892,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-vibevoice'
runs-on: 'arc-runner-set'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
Expand Down Expand Up @@ -1319,6 +1397,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-vibevoice'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
ubuntu-version: '2204'
backend-jobs-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
strategy:
Expand Down
13 changes: 12 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -287,12 +287,14 @@ prepare-test-extra: protogen-python
$(MAKE) -C backend/python/diffusers
$(MAKE) -C backend/python/chatterbox
$(MAKE) -C backend/python/vllm
$(MAKE) -C backend/python/vibevoice

test-extra: prepare-test-extra
$(MAKE) -C backend/python/transformers test
$(MAKE) -C backend/python/diffusers test
$(MAKE) -C backend/python/chatterbox test
$(MAKE) -C backend/python/vllm test
$(MAKE) -C backend/python/vibevoice test

DOCKER_IMAGE?=local-ai
DOCKER_AIO_IMAGE?=local-ai-aio
Expand Down Expand Up @@ -389,6 +391,9 @@ backends/neutts: docker-build-neutts docker-save-neutts build
backends/vllm: docker-build-vllm docker-save-vllm build
./local-ai backends install "ocifile://$(abspath ./backend-images/vllm.tar)"

backends/vibevoice: docker-build-vibevoice docker-save-vibevoice build
./local-ai backends install "ocifile://$(abspath ./backend-images/vibevoice.tar)"

build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh

Expand Down Expand Up @@ -445,6 +450,9 @@ docker-save-kitten-tts: backend-images
docker-save-chatterbox: backend-images
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar

docker-save-vibevoice: backend-images
docker save local-ai-backend:vibevoice -o backend-images/vibevoice.tar

docker-build-neutts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend

Expand Down Expand Up @@ -523,10 +531,13 @@ docker-build-bark:
docker-build-chatterbox:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend

docker-build-vibevoice:
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vibevoice -f backend/Dockerfile.python --build-arg BACKEND=vibevoice ./backend

docker-build-exllama2:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .

docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2

########################################################
### END Backends
Expand Down
105 changes: 105 additions & 0 deletions backend/index.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@
capabilities:
nvidia: "cuda12-rfdetr"
intel: "intel-rfdetr"
#amd: "rocm-rfdetr"

Check warning on line 108 in backend/index.yaml

View workflow job for this annotation

GitHub Actions / Yamllint

108:6 [comments] missing starting space in comment
nvidia-l4t: "nvidia-l4t-arm64-rfdetr"
default: "cpu-rfdetr"
nvidia-cuda-13: "cuda13-rfdetr"
Expand Down Expand Up @@ -390,6 +390,28 @@
nvidia-cuda-12: "cuda12-chatterbox"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-chatterbox"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-chatterbox"
- &vibevoice
urls:
- https://github.com/microsoft/VibeVoice
description: |
VibeVoice-Realtime is a real-time text-to-speech model that generates natural-sounding speech.
tags:
- text-to-speech
- TTS
license: mit
name: "vibevoice"
alias: "vibevoice"
capabilities:
nvidia: "cuda12-vibevoice"
intel: "intel-vibevoice"
amd: "rocm-vibevoice"
nvidia-l4t: "nvidia-l4t-vibevoice"
default: "cpu-vibevoice"
nvidia-cuda-13: "cuda13-vibevoice"
nvidia-cuda-12: "cuda12-vibevoice"
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice"
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
- &piper
name: "piper"
uri: "quay.io/go-skynet/local-ai-backends:latest-piper"
Expand Down Expand Up @@ -967,7 +989,7 @@
capabilities:
nvidia: "cuda12-rfdetr-development"
intel: "intel-rfdetr-development"
#amd: "rocm-rfdetr-development"

Check warning on line 992 in backend/index.yaml

View workflow job for this annotation

GitHub Actions / Yamllint

992:6 [comments] missing starting space in comment
nvidia-l4t: "nvidia-l4t-arm64-rfdetr-development"
default: "cpu-rfdetr-development"
nvidia-cuda-13: "cuda13-rfdetr-development"
Expand Down Expand Up @@ -1253,7 +1275,7 @@
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-diffusers
## exllama2

Check warning on line 1278 in backend/index.yaml

View workflow job for this annotation

GitHub Actions / Yamllint

1278:3 [comments-indentation] comment not indented like content
- !!merge <<: *exllama2
name: "exllama2-development"
capabilities:
Expand Down Expand Up @@ -1571,3 +1593,86 @@
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-chatterbox"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-chatterbox
## vibevoice
- !!merge <<: *vibevoice
name: "vibevoice-development"
capabilities:
nvidia: "cuda12-vibevoice-development"
intel: "intel-vibevoice-development"
amd: "rocm-vibevoice-development"
nvidia-l4t: "nvidia-l4t-vibevoice-development"
default: "cpu-vibevoice-development"
nvidia-cuda-13: "cuda13-vibevoice-development"
nvidia-cuda-12: "cuda12-vibevoice-development"
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice-development"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice-development"
- !!merge <<: *vibevoice
name: "cpu-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-vibevoice"
mirrors:
- localai/localai-backends:latest-cpu-vibevoice
- !!merge <<: *vibevoice
name: "cpu-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-vibevoice"
mirrors:
- localai/localai-backends:master-cpu-vibevoice
- !!merge <<: *vibevoice
name: "cuda12-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-vibevoice
- !!merge <<: *vibevoice
name: "cuda12-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-vibevoice
- !!merge <<: *vibevoice
name: "intel-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-intel-vibevoice
- !!merge <<: *vibevoice
name: "intel-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-intel-vibevoice
- !!merge <<: *vibevoice
name: "rocm-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-vibevoice
- !!merge <<: *vibevoice
name: "rocm-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-vibevoice
- !!merge <<: *vibevoice
name: "nvidia-l4t-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-vibevoice"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-vibevoice
- !!merge <<: *vibevoice
name: "nvidia-l4t-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-vibevoice"
mirrors:
- localai/localai-backends:master-nvidia-l4t-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-nvidia-l4t-arm64-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-vibevoice"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-nvidia-l4t-arm64-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice
23 changes: 23 additions & 0 deletions backend/python/vibevoice/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
.PHONY: vibevoice
vibevoice:
bash install.sh

.PHONY: run
run: vibevoice
@echo "Running vibevoice..."
bash run.sh
@echo "vibevoice run."

.PHONY: test
test: vibevoice
@echo "Testing vibevoice..."
bash test.sh
@echo "vibevoice tested."

.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py

.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__
Loading