diff --git a/docker-compose.components.yml b/docker-compose.components.yml index d2e530a9..7ff95aad 100644 --- a/docker-compose.components.yml +++ b/docker-compose.components.yml @@ -148,6 +148,31 @@ services: image: ${REGISTRY}openmpf_llama_video_summarization:${TAG} build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlamaVideoSummarization + llava-detection: + <<: *component-base + image: ${REGISTRY}openmpf_llava_detection:${TAG} + build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection + + llava-detection-server: + image: ${REGISTRY}openmpf_llava_detection_server:${TAG} + build: ${OPENMPF_PROJECTS_PATH}/openmpf-components/python/LlavaDetection/ollama_server + deploy: + mode: global + ulimits: + memlock: -1 + stack: 67108864 + environment: + # Use "NVIDIA_VISIBLE_DEVICES=" (no value) for CPU mode. + # CPU mode is very slow and not recommended. + # To use a GPU, specify a GPU id. + - NVIDIA_VISIBLE_DEVICES= + - OLLAMA_KEEP_ALIVE=-1m + # https://github.com/ollama/ollama/issues/5756#issuecomment-2239896500 + - OLLAMA_NUM_PARALLEL=1 + - OLLAMA_MAX_LOADED_MODELS=1 + # - OLLAMA_DEBUG=1 + command: [serve] + mog-motion-detection: <<: *component-base image: ${REGISTRY}openmpf_mog_motion_detection:${TAG}