From b7bee006d78bac9909b51f8b6ec5c9050ceca9c9 Mon Sep 17 00:00:00 2001 From: ZachCafego Date: Fri, 31 Jan 2025 14:51:27 -0500 Subject: [PATCH 1/6] Added llava detection component and server to docker componse --- docker-compose.components.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docker-compose.components.yml b/docker-compose.components.yml index 5da534ea..64b5578d 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -143,6 +143,27 @@ services: image: ${REGISTRY}openmpf_keyword_tagging:${TAG} build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/cpp/KeywordTagging + llava-detection: + <<: *detection-component-base + image: ${REGISTRY}openmpf_llava_detection:${TAG} + build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection + + llava-detection-server: + <<: *detection-component-base + image: ${REGISTRY}ollama_server:${TAG} + build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection/ollama_server + deploy: + mode: global + ulimits: + memlock: -1 + stack: 67108864 + ports: + - "11434:11434" + runtime: nvidia + environment: + NVIDIA_VISIBLE_DEVICES: all + command: [serve] + mog-motion-detection: <<: *detection-component-base image: ${REGISTRY}openmpf_mog_motion_detection:${TAG} From 4160ac86ffc357817850d7a2cc7c3f78eacf94d6 Mon Sep 17 00:00:00 2001 From: ZachCafego Date: Thu, 6 Feb 2025 16:03:16 -0500 Subject: [PATCH 2/6] Removed runtime ndivia line. --- docker-compose.components.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docker-compose.components.yml b/docker-compose.components.yml index 6ba5ce18..e69f72b5 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -155,7 +155,6 @@ services: stack: 67108864 ports: - "11434:11434" - runtime: nvidia environment: NVIDIA_VISIBLE_DEVICES: all command: [serve] From 3c9348e6e318b63056622dd3bd1187e467769ca7 Mon Sep 17 00:00:00 2001 From: ZachCafego Date: Thu, 27 Feb 2025 07:49:33 -0500 Subject: [PATCH 3/6] Addressed changes to llava-detection-server --- docker-compose.components.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker-compose.components.yml b/docker-compose.components.yml index 2ae66d71..b8df0ec0 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -156,7 +156,6 @@ services: build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection llava-detection-server: - <<: *detection-component-base image: ${REGISTRY}ollama_server:${TAG} build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection/ollama_server deploy: @@ -164,8 +163,6 @@ services: ulimits: memlock: -1 stack: 67108864 - ports: - - "11434:11434" environment: NVIDIA_VISIBLE_DEVICES: all command: [serve] From 242d126b24968c607421d9f29530566a83eb9100 Mon Sep 17 00:00:00 2001 From: jrobble Date: Wed, 18 Mar 2026 17:23:45 +0000 Subject: [PATCH 4/6] Cleanup. --- docker-compose.components.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.components.yml b/docker-compose.components.yml index f49d27b6..a810bb0f 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -152,7 +152,7 @@ services: <<: *detection-component-base image: ${REGISTRY}openmpf_llava_detection:${TAG} build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection - + llava-detection-server: image: ${REGISTRY}openmpf_llava_detection_server:${TAG} build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection/ollama_server @@ -175,7 +175,7 @@ services: # https://github.com/ollama/ollama/issues/5756#issuecomment-2239896500 - OLLAMA_NUM_PARALLEL=1 - OLLAMA_MAX_LOADED_MODELS=1 - - OLLAMA_DEBUG=1 + # - OLLAMA_DEBUG=1 command: [serve] mog-motion-detection: From 89f8d2ac9236c55b1d9919474836279beb0be0a2 Mon Sep 17 00:00:00 2001 From: jrobble Date: Wed, 18 Mar 2026 13:53:21 -0400 Subject: [PATCH 5/6] Use component-base. --- docker-compose.components.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.components.yml b/docker-compose.components.yml index a810bb0f..6adbb127 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -149,7 +149,7 @@ services: build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlamaVideoSummarization llava-detection: - <<: *detection-component-base + <<: *component-base image: ${REGISTRY}openmpf_llava_detection:${TAG} build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection From c451769f6f2b34b449c165425b3b77d5d265e167 Mon Sep 17 00:00:00 2001 From: jrobble Date: Thu, 19 Mar 2026 21:26:55 -0400 Subject: [PATCH 6/6] Fix GPU spec. --- docker-compose.components.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/docker-compose.components.yml b/docker-compose.components.yml index 6adbb127..7ff95aad 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -158,19 +158,14 @@ services: build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection/ollama_server deploy: mode: global - resources: - reservations: - devices: - - driver: nvidia - # Ollama can run with a GPU, but to use CPU mode we need to blank out the - # device ids; otherwise the server will see all of the GPUs on this host - # and not use CPU mode. - device_ids: [''] - capabilities: [gpu] ulimits: memlock: -1 stack: 67108864 environment: + # Use "NVIDIA_VISIBLE_DEVICES=" (no value) for CPU mode. + # CPU mode is very slow and not recommended. + # To use a GPU, specify a GPU id. + - NVIDIA_VISIBLE_DEVICES= - OLLAMA_KEEP_ALIVE=-1m # https://github.com/ollama/ollama/issues/5756#issuecomment-2239896500 - OLLAMA_NUM_PARALLEL=1