Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
45a0683
fix: replace face_recognition with deepface and remove feer
IliasHad Dec 10, 2025
d59ef02
fix: remove emotion detection because we used emotion from deepface o…
IliasHad Dec 10, 2025
40ce41c
fix: remove face_matcher and face indexer and switch to smart groupin…
IliasHad Dec 10, 2025
2e256a9
fix: minor updates
IliasHad Dec 10, 2025
8a4ea95
fix: remove unused python packages
IliasHad Dec 10, 2025
b97298d
fix: improve dev docker build
IliasHad Dec 10, 2025
27203ab
fix: remove color_plattete from dominant color plugin
IliasHad Dec 10, 2025
77f962e
fix: add simple test case for frame analysis python script
IliasHad Dec 10, 2025
fa6ee8b
fix: update image tag
IliasHad Dec 10, 2025
98bbcd8
fix: update version to 0.1.12
IliasHad Dec 10, 2025
e211682
fix: update docker tags and revert multiple platform support
IliasHad Dec 10, 2025
1e7b6b8
fix: update workflow file
IliasHad Dec 10, 2025
1ebf430
fix: clean up docker matrix platform tags
IliasHad Dec 10, 2025
9641957
fix: clean up docker tags
IliasHad Dec 10, 2025
f656de7
fix: remove tags
IliasHad Dec 10, 2025
43092a9
fix: update docker tags
IliasHad Dec 10, 2025
c65309e
fix: update requirement to use CPU only for ML models to reduce image…
IliasHad Dec 10, 2025
16b2aae
fix: reduce image size
IliasHad Dec 10, 2025
af3214b
feat: add test github workflows
IliasHad Dec 10, 2025
b89123c
fix: update and use docker compose
IliasHad Dec 10, 2025
3b40a0b
fix: update workflow
IliasHad Dec 10, 2025
46e1dfd
fix: update docker setup
IliasHad Dec 10, 2025
492a450
fix: GEMINI_API_KEY is optional
IliasHad Dec 10, 2025
dae3750
fix: update gemini
IliasHad Dec 10, 2025
2a621d7
fix: add free up space to test workflow
IliasHad Dec 10, 2025
38527a0
fix: skip local llm tests because of file size of local llm
IliasHad Dec 10, 2025
9bd698a
fix: revert tf-keras to use with deep face
IliasHad Dec 10, 2025
5bf16c7
Merge branch 'main' into feat/drop-support-of-face-recoginition-library
IliasHad Dec 24, 2025
c99cdf7
fix: remove python test github workflow
IliasHad Dec 24, 2025
5c9989a
fix: drop support of dlip and use deep face for face recognition
IliasHad Dec 24, 2025
72c8db1
fix: update path for redis connection
IliasHad Dec 24, 2025
8795799
fix: update github workflow action to build docker images
IliasHad Dec 24, 2025
91252ff
fix: clean up storage for github actions
IliasHad Dec 24, 2025
8180a18
fix: split github action by platform and one build a time
IliasHad Dec 24, 2025
65f6392
fix: optimize docker background jobs image size
IliasHad Dec 26, 2025
7ec54fa
fix: model caching issue
IliasHad Dec 26, 2025
78500cd
fix: update test transcribe case
IliasHad Dec 26, 2025
6e04acd
fix: include python for node-gyp
IliasHad Dec 26, 2025
71140f4
fix: make test analyze script simpler for now
IliasHad Dec 26, 2025
057ff67
fix: update vector test timeout
IliasHad Dec 26, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 71 additions & 26 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ env:
IMAGE_PREFIX: ${{ github.repository_owner }}/edit-mind

jobs:

build-and-push:
runs-on: ubuntu-latest
permissions:
Expand All @@ -25,7 +24,7 @@ jobs:
packages: write
artifact-metadata: write
attestations: write

strategy:
matrix:
service:
Expand All @@ -36,23 +35,26 @@ jobs:
dockerfile: docker/Dockerfile.web
context: .
platform:
- linux/amd64
- linux/arm64

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: "node-cleanup"
- name: Maximize build space
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /usr/share/swift
sudo rm -rf /usr/local/share/boost
sudo apt-get autoremove -y
sudo apt-get clean
docker system prune -af --volumes
sudo docker system prune -a -f
df -h

- name: Checkout code
uses: actions/checkout@v4

- name: Set up QEMU
uses: docker/setup-qemu-action@v3
Expand All @@ -73,42 +75,56 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Generate platform tag
id: platform-tag
- name: Generate platform suffix
id: platform
run: |
PLATFORM_TAG=$(echo "${{ matrix.platform }}" | sed 's/\//-/g')
echo "tag=${PLATFORM_TAG}" >> $GITHUB_OUTPUT
echo "suffix=$(echo ${{ matrix.platform }} | sed 's/\//-/g')" >> $GITHUB_OUTPUT

- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service.name }}
tags: |
type=raw,value=latest-${{ steps.platform-tag.outputs.tag }},enable={{is_default_branch}}
type=semver,pattern={{version}}-${{ steps.platform-tag.outputs.tag }}
type=semver,pattern={{major}}.{{minor}}-${{ steps.platform-tag.outputs.tag }}
type=sha,prefix={{branch}}-${{ steps.platform-tag.outputs.tag }}-
- name: Build and push Docker image
type=ref,event=branch,suffix=-${{ steps.platform.outputs.suffix }}
type=semver,pattern={{version}},suffix=-${{ steps.platform.outputs.suffix }}
type=semver,pattern={{major}}.{{minor}},suffix=-${{ steps.platform.outputs.suffix }}

- name: Set up env files
run: |
cp .env.example .env
cp .env.system.example .env.system

- name: Start ChromaDB (testing)
run: docker compose up -d chroma

- name: Build and Test
uses: docker/build-push-action@v6
with:
context: ${{ matrix.service.context }}
file: ${{ matrix.service.dockerfile }}
target: testing
platforms: ${{ matrix.platform }}
push: false
cache-from: type=gha,scope=${{ matrix.service.name }}-${{ matrix.platform }}
cache-to: type=gha,mode=max,scope=${{ matrix.service.name }}-${{ matrix.platform }}

- name: Build and push Docker image
id: push
if: github.event_name != 'pull_request'
uses: docker/build-push-action@v6
with:
context: ${{ matrix.service.context }}
file: ${{ matrix.service.dockerfile }}
target: production
platforms: linux/arm64
platforms: ${{ matrix.platform }}
build-args: |
NODE_VERSION=22.20.0
PNPM_VERSION=10.20.0
push: ${{ github.event_name != 'pull_request' }}
load: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: |
type=gha,scope=${{ matrix.service.name }}-${{ matrix.platform }}
type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service.name }}:buildcache-${{ matrix.platform }}
cache-to: type=gha,mode=max,scope=${{ matrix.service.name }}-${{ matrix.platform }}
cache-from: type=gha,scope=${{ matrix.service }}-${{ matrix.platform }}
cache-to: type=gha,mode=max,scope=${{ matrix.service }}-${{ matrix.platform }}
sbom: false

- name: Generate artifact attestation
Expand All @@ -119,12 +135,42 @@ jobs:
subject-digest: ${{ steps.push.outputs.digest }}
push-to-registry: true

create-release:
create-manifest:
needs: build-and-push
runs-on: ubuntu-latest
if: github.event_name != 'pull_request'
permissions:
contents: write
packages: write
strategy:
matrix:
service:
- name: background-jobs
- name: web
steps:
- name: Log in to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Create and push manifest
run: |
TAG="${{ github.ref_name }}"
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
TAG="main"
fi

docker buildx imagetools create -t ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service.name }}:${TAG} \
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service.name }}:${TAG}-linux-amd64 \
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service.name }}:${TAG}-linux-arm64

create-release:
needs: create-manifest
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
Expand All @@ -135,7 +181,6 @@ jobs:
tag_name: ${{ github.ref_name }}
name: Release ${{ github.ref_name }}
draft: true
prerelease: false
files: |
docker-compose.yml
.env.example
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,5 @@ test-results/*
.env.dev
docker/.env.system
.env.system
!python/.faces
*.pkl
2 changes: 1 addition & 1 deletion apps/background-jobs/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "background-jobs",
"version": "0.1.1",
"version": "0.1.2",
"private": true,
"type": "module",
"scripts": {
Expand Down
7 changes: 4 additions & 3 deletions apps/background-jobs/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@ import { BullMQAdapter } from '@bull-board/api/bullMQAdapter'
import { ExpressAdapter } from '@bull-board/express'
import foldersRoute from './routes/folders'
import stitcherRoute from './routes/stitcher'
import faceRoute from './routes/face'
import { config } from './config'
import { faceMatcherQueue, immichImporterQueue, videoQueue, videoStitcherQueue } from './queue'
import { immichImporterQueue, videoQueue, videoStitcherQueue } from './queue'
import './jobs/videoIndexer'
import './jobs/faceMatcher'
import './jobs/ImmichImporter'
import './jobs/videoStitcher'
import './jobs/faceLabelling'

import { pythonService } from '@shared/services/pythonService'
import { initializeWatchers } from './watcher'
Expand All @@ -28,7 +29,6 @@ if (process.env.NODE_ENV === 'development') {
createBullBoard({
queues: [
new BullMQAdapter(videoQueue),
new BullMQAdapter(faceMatcherQueue),
new BullMQAdapter(immichImporterQueue),
new BullMQAdapter(videoStitcherQueue),
],
Expand All @@ -40,6 +40,7 @@ if (process.env.NODE_ENV === 'development') {

app.use('/folders', foldersRoute)
app.use('/stitcher', stitcherRoute)
app.use('/face', faceRoute)

app.get('/health', (_req, res) => res.json({ status: 'ok' }))

Expand Down
8 changes: 2 additions & 6 deletions apps/background-jobs/src/jobs/ImmichImporter.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import { decryptApiKey } from '@shared/services/encryption'
import { Worker, Job } from 'bullmq'
import { connection } from '../queue'
import { connection } from '../services/redis'
import { ImmichImporterJobData } from '@shared/types/immich'
import { getAllImmichFaces } from '@shared/services/immich'
import { reindexFaces } from '@shared/utils/faces'
import { pythonService } from '@shared/services/pythonService'
import { prisma } from 'src/services/db'

async function processImmichImporterJob(job: Job<ImmichImporterJobData>) {
Expand All @@ -14,9 +12,7 @@ async function processImmichImporterJob(job: Job<ImmichImporterJobData>) {
})
if (!integration) throw new Error('Integration not found')
const apiKey = decryptApiKey(integration.immichApiKey)
const facesFiles = await getAllImmichFaces({ baseUrl: integration.immichBaseUrl, apiKey })
if (!pythonService.isServiceRunning) await pythonService.start()
if (job.id) await reindexFaces(facesFiles, job.id)
await getAllImmichFaces({ baseUrl: integration.immichBaseUrl, apiKey })
} catch (error) {
console.error(error)
}
Expand Down
130 changes: 130 additions & 0 deletions apps/background-jobs/src/jobs/faceLabelling.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import { Worker, Job } from 'bullmq'
import { connection } from '../services/redis'
import { logger } from '@shared/services/logger'
import { FaceLabellingJobData } from '@shared/types/face'
import type { FaceDetectionData } from '@shared/types/unknownFace'
import { promises as fs } from 'fs'
import { existsSync } from 'fs'
import path from 'path'
import type { Scene } from '@shared/types/scene'
import { FACES_DIR, PROCESSED_VIDEOS_DIR, UNKNOWN_FACES_DIR } from '@shared/constants'
import { getByVideoSource, updateMetadata } from '@shared/services/vectorDb'

async function processFaceLabellingJob(job: Job<FaceLabellingJobData>) {
const { faces, name } = job.data
logger.info({ jobId: job.id }, 'Starting Face labelling job')

const personDir = path.join(FACES_DIR, name)
if (!existsSync(personDir)) {
await fs.mkdir(personDir, { recursive: true })
}

for (const face of faces) {
try {
const jsonPath = path.join(UNKNOWN_FACES_DIR, face.jsonFile)

if (existsSync(jsonPath)) {
let faceData: FaceDetectionData
try {
faceData = JSON.parse(await fs.readFile(jsonPath, 'utf8'))
} catch {
continue
}

const imageFile = faceData.image_file
const srcImagePath = path.join(UNKNOWN_FACES_DIR, imageFile)
const destImagePath = path.join(personDir, imageFile)

const scenes = await getByVideoSource(faceData.video_path)
const sortedAppearances = faceData.all_appearances?.sort((a, b) => a.frame_index - b.frame_index)

if (sortedAppearances && scenes && scenes.length > 0) {
const firstAppearance = sortedAppearances[0]
const lastAppearance = sortedAppearances[sortedAppearances.length - 1]

for (const scene of scenes) {
// Check if the face appears at any point during the scene
const overlapsScene =
firstAppearance.timestamp_seconds <= scene.endTime && lastAppearance.timestamp_seconds >= scene.startTime

if (!overlapsScene) continue

if (scene.faces.includes(face.faceId)) {
scene.faces = scene.faces.map((f) => (f === face.faceId ? name : f))
}

if (scene.facesData) {
scene.facesData = scene.facesData.map((f) =>
f.name === face.faceId ? { ...f, name, confidence: 100 } : f
)
}

await updateMetadata(scene)
}

const videoDir = path.join(PROCESSED_VIDEOS_DIR, path.basename(faceData.video_path))
const scenesJsonPath = path.join(videoDir, 'scenes.json')

if (existsSync(scenesJsonPath)) {
const fileScenes: Scene[] = JSON.parse(await fs.readFile(scenesJsonPath, 'utf8'))
let modified = false

for (const scene of fileScenes) {
const inRange =
scene.startTime <= faceData.last_appearance?.timestamp_seconds &&
scene.endTime >= faceData.first_appearance?.timestamp_seconds

if (!inRange) continue

let sceneModified = false

if (scene.faces.includes(face.faceId)) {
scene.faces = scene.faces.map((f) => (f === face.faceId ? name : f))
sceneModified = true
}

if (scene.facesData) {
const hadFace = scene.facesData.some((f) => f.name === face.faceId)
scene.facesData = scene.facesData.map((f) =>
f.name === face.faceId ? { ...f, name, confidence: 100 } : f
)
if (hadFace) sceneModified = true
}

if (sceneModified) modified = true
}

if (modified) {
await fs.writeFile(scenesJsonPath, JSON.stringify(fileScenes, null, 2), 'utf8')
}
}
}
if (existsSync(srcImagePath)) {
await fs.copyFile(srcImagePath, destImagePath)
await fs.unlink(srcImagePath)
}
try {
await fs.unlink(jsonPath)
} catch (error) {
logger.warn(error)
}
} else {
try {
const imageFile = face.jsonFile.replace('.json', '.jpg')
const srcImagePath = path.join(UNKNOWN_FACES_DIR, imageFile)
await fs.unlink(srcImagePath)
} catch (error) {
logger.warn(error)
}
}
} catch (err) {
logger.error('Label error: ' + err)
}
}
logger.info({ jobId: job.id }, 'Face labelling job completed')
}

export const faceLabellingWorker = new Worker('face-labelling', processFaceLabellingJob, {
connection,
concurrency: 3,
})
Loading
Loading