From f46e88c8ce6a4e74738bea023f43d5e5df714cde Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Thu, 18 Sep 2025 10:20:31 -0400
Subject: [PATCH 01/44] feat: add base and runner classes for generic
post-processing framework
---
ami/ml/post_processing/base.py | 24 ++++++++++++++++++++++++
ami/ml/post_processing/runner.py | 28 ++++++++++++++++++++++++++++
2 files changed, 52 insertions(+)
create mode 100644 ami/ml/post_processing/base.py
create mode 100644 ami/ml/post_processing/runner.py
diff --git a/ami/ml/post_processing/base.py b/ami/ml/post_processing/base.py
new file mode 100644
index 000000000..759438ab0
--- /dev/null
+++ b/ami/ml/post_processing/base.py
@@ -0,0 +1,24 @@
+import logging
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+POST_PROCESSING_REGISTRY = {}
+
+
+def register_step(cls):
+ """Decorator to register a post-processing step."""
+ POST_PROCESSING_REGISTRY[cls.name] = cls()
+ return cls
+
+
+class PostProcessingStep:
+ """Base interface for a post-processing step."""
+
+ name: str = "base_step"
+ description: str = "Base step (does nothing)"
+ default_enabled: bool = False
+
+ def apply(self, pipeline_input: Any, pipeline_output: Any) -> tuple[Any, Any]:
+ """Process and return modified input/output."""
+ raise NotImplementedError
diff --git a/ami/ml/post_processing/runner.py b/ami/ml/post_processing/runner.py
new file mode 100644
index 000000000..3a660f6da
--- /dev/null
+++ b/ami/ml/post_processing/runner.py
@@ -0,0 +1,28 @@
+import logging
+from typing import Any
+
+from .base import POST_PROCESSING_REGISTRY
+
+logger = logging.getLogger(__name__)
+
+
+def run_postprocessing(
+ pipeline_input: Any,
+ pipeline_output: Any,
+ enabled_steps: list[str] | None = None,
+) -> tuple[Any, Any]:
+ """
+ Run all enabled post-processing steps on pipeline results.
+ """
+ steps = enabled_steps or [name for name, step in POST_PROCESSING_REGISTRY.items() if step.default_enabled]
+
+ logger.info(f"Running post-processing steps: {steps}")
+
+ for name in steps:
+ step = POST_PROCESSING_REGISTRY.get(name)
+ if not step:
+ logger.warning(f"Post-processing step '{name}' not found, skipping")
+ continue
+ pipeline_input, pipeline_output = step.apply(pipeline_input, pipeline_output)
+
+ return pipeline_input, pipeline_output
From d86ea4de7b6e7282858231ff99380aa1f5e7b8d5 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:09:08 -0400
Subject: [PATCH 02/44] feat: add post-processing framework base
post-processing task class
---
ami/ml/post_processing/__init__.py | 1 +
ami/ml/post_processing/base.py | 82 +++++++++++++++++++++++++-----
2 files changed, 69 insertions(+), 14 deletions(-)
create mode 100644 ami/ml/post_processing/__init__.py
diff --git a/ami/ml/post_processing/__init__.py b/ami/ml/post_processing/__init__.py
new file mode 100644
index 000000000..3517ed47c
--- /dev/null
+++ b/ami/ml/post_processing/__init__.py
@@ -0,0 +1 @@
+from . import small_size_filter # noqa: F401
diff --git a/ami/ml/post_processing/base.py b/ami/ml/post_processing/base.py
index 759438ab0..055f925ce 100644
--- a/ami/ml/post_processing/base.py
+++ b/ami/ml/post_processing/base.py
@@ -1,24 +1,78 @@
+# ami/ml/post_processing/base.py
+
+import abc
import logging
from typing import Any
-logger = logging.getLogger(__name__)
+from ami.jobs.models import Job
+from ami.ml.models import Algorithm, AlgorithmTaskType
+
+# Registry of available post-processing tasks
+POSTPROCESSING_TASKS: dict[str, type["BasePostProcessingTask"]] = {}
+
+
+def register_postprocessing_task(task_cls: type["BasePostProcessingTask"]):
+ """
+ Decorator to register a post-processing task in the global registry.
+ Each task must define a unique `key`.
+ Ensures an Algorithm entry exists for this task.
+ """
+ if not hasattr(task_cls, "key") or not task_cls.key:
+ raise ValueError(f"Task {task_cls.__name__} missing required 'key' attribute")
+
+ # Register the task
+ POSTPROCESSING_TASKS[task_cls.key] = task_cls
+
+ # Ensure Algorithm object exists for this task
+ algorithm, _ = Algorithm.objects.get_or_create(
+ name=task_cls.__name__,
+ defaults={
+ "description": f"Post-processing task: {task_cls.key}",
+ "task_type": AlgorithmTaskType.POST_PROCESSING.value,
+ },
+ )
+
+ # Attach the Algorithm object to the task class
+ task_cls.algorithm = algorithm
+
+ return task_cls
+
-POST_PROCESSING_REGISTRY = {}
+def get_postprocessing_task(name: str) -> type["BasePostProcessingTask"] | None:
+ """
+ Get a task class by its registry key.
+ Returns None if not found.
+ """
+ return POSTPROCESSING_TASKS.get(name)
-def register_step(cls):
- """Decorator to register a post-processing step."""
- POST_PROCESSING_REGISTRY[cls.name] = cls()
- return cls
+class BasePostProcessingTask(abc.ABC):
+ """
+ Abstract base class for all post-processing tasks.
+ """
+ # Each task must override these
+ key: str = ""
+ name: str = ""
-class PostProcessingStep:
- """Base interface for a post-processing step."""
+ def __init__(self, **config: Any):
+ """
+ Initialize task with configuration parameters.
+ """
+ self.config: dict[str, Any] = config
+ self.logger = logging.getLogger(f"ami.post_processing.{self.key}")
- name: str = "base_step"
- description: str = "Base step (does nothing)"
- default_enabled: bool = False
+ @abc.abstractmethod
+ def run(self, job: Job) -> None:
+ """
+ Run the task logic.
+ Must be implemented by subclasses.
+ The job parameter provides context (project, logs, etc.).
+ """
+ raise NotImplementedError("Subclasses must implement run()")
- def apply(self, pipeline_input: Any, pipeline_output: Any) -> tuple[Any, Any]:
- """Process and return modified input/output."""
- raise NotImplementedError
+ def log_config(self, job: Job):
+ """
+ Helper to log the task configuration at start.
+ """
+ job.logger.info(f"Running task {self.name} ({self.key}) with config: {self.config}")
From 2c0f78fbd87b4984342ce88b45255261b1868fb8 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:10:52 -0400
Subject: [PATCH 03/44] feat: add small size filter post-processing task class
---
ami/ml/post_processing/small_size_filter.py | 92 +++++++++++++++++++++
1 file changed, 92 insertions(+)
create mode 100644 ami/ml/post_processing/small_size_filter.py
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
new file mode 100644
index 000000000..3487bab42
--- /dev/null
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -0,0 +1,92 @@
+from django.db import transaction
+from django.utils import timezone
+
+from ami.jobs.models import Job
+from ami.main.models import Detection, SourceImageCollection, Taxon, TaxonRank
+from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
+
+
+@register_postprocessing_task
+class SmallSizeFilter(BasePostProcessingTask):
+ key = "small_size_filter"
+ name = "Small Size Filter"
+
+ def run(self, job: "Job") -> None:
+ threshold = self.config.get("size_threshold", 0.01)
+ collection_id = self.config.get("source_image_collection_id")
+ # Get or create the "Not identifiable" taxon
+
+ not_identifiable_taxon, _ = Taxon.objects.get_or_create(
+ name="Not identifiable",
+ defaults={
+ "rank": TaxonRank.UNKNOWN,
+ "notes": "Auto-generated taxon for small size filter",
+ },
+ )
+ job.logger.info(f"=== Starting {self.name} ===")
+
+ if not collection_id:
+ msg = "Missing required config param: source_image_collection_id"
+ job.logger.error(msg)
+ raise ValueError(msg)
+
+ try:
+ collection = SourceImageCollection.objects.get(pk=collection_id)
+ job.logger.info(f"Loaded SourceImageCollection {collection_id} " f"(Project={collection.project_id})")
+ except SourceImageCollection.DoesNotExist:
+ msg = f"SourceImageCollection {collection_id} not found"
+ job.logger.error(msg)
+ raise ValueError(msg)
+
+ detections = Detection.objects.filter(source_image__collections=collection)
+ total = detections.count()
+ job.logger.info(f"Found {total} detections in collection {collection_id}")
+
+ modified = 0
+
+ for det in detections.iterator():
+ bbox = det.get_bbox()
+ if not bbox:
+ job.logger.debug(f"Detection {det.pk}: no bbox, skipping")
+ continue
+
+ img_w, img_h = det.source_image.width, det.source_image.height
+ if not img_w or not img_h:
+ job.logger.debug(f"Detection {det.pk}: missing source image dims, skipping")
+ continue
+
+ det_area = det.width() * det.height()
+ img_area = img_w * img_h
+ rel_area = det_area / img_area if img_area else 0
+
+ job.logger.debug(
+ f"Detection {det.pk}: area={det_area}, rel_area={rel_area:.4f}, " f"threshold={threshold:.4f}"
+ )
+
+ if rel_area < threshold:
+ with transaction.atomic():
+ # Mark existing classifications as non-terminal
+ det.classifications.update(terminal=False)
+
+ # Create the new "Not identifiable" classification
+ det.classifications.create(
+ detection=det,
+ taxon=not_identifiable_taxon,
+ score=1.0,
+ terminal=True,
+ timestamp=timezone.now(),
+ algorithm=self.algorithm,
+ )
+
+ modified += 1
+ job.logger.debug(f"Detection {det.pk}: marked as 'Not identifiable'")
+
+ job.logger.info(f"=== Completed {self.name}: {modified}/{total} detections modified ===")
+
+ job.result = {
+ "detections_total": total,
+ "detections_modified": modified,
+ "threshold": threshold,
+ "collection_id": collection_id,
+ }
+ job.save(update_fields=["result"])
From ffba70964ec9e7b6f328c31a2d8f5f173ea210e7 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:12:05 -0400
Subject: [PATCH 04/44] feat: add post processing job type
---
ami/jobs/models.py | 40 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 39 insertions(+), 1 deletion(-)
diff --git a/ami/jobs/models.py b/ami/jobs/models.py
index f7b85283b..39e7a0ff3 100644
--- a/ami/jobs/models.py
+++ b/ami/jobs/models.py
@@ -645,6 +645,37 @@ def run(cls, job: "Job"):
job.update_status(JobState.SUCCESS, save=True)
+class PostProcessingJob(JobType):
+ name = "Post Processing"
+ key = "post_processing"
+
+ @classmethod
+ def run(cls, job: "Job"):
+ import ami.ml.post_processing # noqa F401
+ from ami.ml.post_processing.base import get_postprocessing_task
+
+ job.progress.add_stage(cls.name, key=cls.key)
+ job.update_status(JobState.STARTED)
+ job.started_at = datetime.datetime.now()
+ job.save()
+
+ params = job.params or {}
+ task_key: str = params.get("task", "")
+ config = params.get("config", {})
+ job.logger.info(f"Post-processing task: {task_key} with params: {job.params}")
+
+ task_cls = get_postprocessing_task(task_key)
+ if not task_cls:
+ raise ValueError(f"Unknown post-processing task '{task_key}'")
+
+ task = task_cls(**config)
+ task.run(job)
+ job.progress.update_stage(cls.key, status=JobState.SUCCESS, progress=1)
+ job.finished_at = datetime.datetime.now()
+ job.update_status(JobState.SUCCESS)
+ job.save()
+
+
class UnknownJobType(JobType):
name = "Unknown"
key = "unknown"
@@ -654,7 +685,14 @@ def run(cls, job: "Job"):
raise ValueError(f"Unknown job type '{job.job_type()}'")
-VALID_JOB_TYPES = [MLJob, SourceImageCollectionPopulateJob, DataStorageSyncJob, UnknownJobType, DataExportJob]
+VALID_JOB_TYPES = [
+ MLJob,
+ SourceImageCollectionPopulateJob,
+ DataStorageSyncJob,
+ UnknownJobType,
+ DataExportJob,
+ PostProcessingJob,
+]
def get_job_type_by_key(key: str) -> type[JobType] | None:
From 63cd84b456aa4e819ceb32da495d9b3c489cc08d Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:12:33 -0400
Subject: [PATCH 05/44] feat: trigger small size filter post processing task
from admin page
---
ami/main/admin.py | 24 +++++++++++++++++++++++-
1 file changed, 23 insertions(+), 1 deletion(-)
diff --git a/ami/main/admin.py b/ami/main/admin.py
index dd2cea5c5..a3f4d6aae 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -10,6 +10,7 @@
import ami.utils
from ami import tasks
+from ami.jobs.models import Job
from ami.ml.models.project_pipeline_config import ProjectPipelineConfig
from ami.ml.tasks import remove_duplicate_classifications
@@ -619,7 +620,28 @@ def populate_collection_async(self, request: HttpRequest, queryset: QuerySet[Sou
f"Populating {len(queued_tasks)} collection(s) background tasks: {queued_tasks}.",
)
- actions = [populate_collection, populate_collection_async]
+ @admin.action(description="Run Small Size Filter post-processing task (async)")
+ def run_small_size_filter(self, request: HttpRequest, queryset: QuerySet[SourceImageCollection]) -> None:
+ jobs = []
+ for collection in queryset:
+ job = Job.objects.create(
+ name=f"Post-processing: SmallSizeFilter on Collection {collection.pk}",
+ project=collection.project,
+ job_type_key="post_processing",
+ params={
+ "task": "small_size_filter",
+ "config": {
+ "size_threshold": 0.01, # default threshold
+ "source_image_collection_id": collection.pk,
+ },
+ },
+ )
+ job.enqueue()
+ jobs.append(job.pk)
+
+ self.message_user(request, f"Queued Small Size Filter for {queryset.count()} collection(s). Jobs: {jobs}")
+
+ actions = [populate_collection, populate_collection_async, run_small_size_filter]
# Hide images many-to-many field from form. This would list all source images in the database.
exclude = ("images",)
From cab62bf02e88e2abd13edd27aa6af7e428601d2f Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:13:22 -0400
Subject: [PATCH 06/44] feat: add a new algorithm task type for post-processing
---
ami/ml/models/algorithm.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/ami/ml/models/algorithm.py b/ami/ml/models/algorithm.py
index 48b2e2336..0e9df4609 100644
--- a/ami/ml/models/algorithm.py
+++ b/ami/ml/models/algorithm.py
@@ -176,6 +176,7 @@ class AlgorithmTaskType(str, enum.Enum):
DEPTH_ESTIMATION = "depth_estimation"
POSE_ESTIMATION = "pose_estimation"
SIZE_ESTIMATION = "size_estimation"
+ POST_PROCESSING = "post_processing"
OTHER = "other"
UNKNOWN = "unknown"
From 6d0e284d0ceb952945c2c50767a3737bcd06603c Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:15:57 -0400
Subject: [PATCH 07/44] chore: deleted runner.py
---
ami/ml/post_processing/runner.py | 28 ----------------------------
1 file changed, 28 deletions(-)
delete mode 100644 ami/ml/post_processing/runner.py
diff --git a/ami/ml/post_processing/runner.py b/ami/ml/post_processing/runner.py
deleted file mode 100644
index 3a660f6da..000000000
--- a/ami/ml/post_processing/runner.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import logging
-from typing import Any
-
-from .base import POST_PROCESSING_REGISTRY
-
-logger = logging.getLogger(__name__)
-
-
-def run_postprocessing(
- pipeline_input: Any,
- pipeline_output: Any,
- enabled_steps: list[str] | None = None,
-) -> tuple[Any, Any]:
- """
- Run all enabled post-processing steps on pipeline results.
- """
- steps = enabled_steps or [name for name, step in POST_PROCESSING_REGISTRY.items() if step.default_enabled]
-
- logger.info(f"Running post-processing steps: {steps}")
-
- for name in steps:
- step = POST_PROCESSING_REGISTRY.get(name)
- if not step:
- logger.warning(f"Post-processing step '{name}' not found, skipping")
- continue
- pipeline_input, pipeline_output = step.apply(pipeline_input, pipeline_output)
-
- return pipeline_input, pipeline_output
From 4cfe2d8dcc21447470adf38c18c3ebc567f1d9e6 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 13:19:12 -0400
Subject: [PATCH 08/44] feat: add migration for creating a new job type
---
.../migrations/0018_alter_job_job_type_key.py | 29 +++++++++++++++++++
1 file changed, 29 insertions(+)
create mode 100644 ami/jobs/migrations/0018_alter_job_job_type_key.py
diff --git a/ami/jobs/migrations/0018_alter_job_job_type_key.py b/ami/jobs/migrations/0018_alter_job_job_type_key.py
new file mode 100644
index 000000000..e51a84b08
--- /dev/null
+++ b/ami/jobs/migrations/0018_alter_job_job_type_key.py
@@ -0,0 +1,29 @@
+# Generated by Django 4.2.10 on 2025-09-30 12:25
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("jobs", "0017_alter_job_logs_alter_job_progress"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="job",
+ name="job_type_key",
+ field=models.CharField(
+ choices=[
+ ("ml", "ML pipeline"),
+ ("populate_captures_collection", "Populate captures collection"),
+ ("data_storage_sync", "Data storage sync"),
+ ("unknown", "Unknown"),
+ ("data_export", "Data Export"),
+ ("post_processing", "Post Processing"),
+ ],
+ default="unknown",
+ max_length=255,
+ verbose_name="Job Type",
+ ),
+ ),
+ ]
From b42e06929e493e0e9c7e42df74f476fe9216a4f8 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 30 Sep 2025 14:35:31 -0400
Subject: [PATCH 09/44] fix: fix an import error with the AlgorithmTaskType
---
ami/ml/post_processing/base.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/ami/ml/post_processing/base.py b/ami/ml/post_processing/base.py
index 055f925ce..baa1c452e 100644
--- a/ami/ml/post_processing/base.py
+++ b/ami/ml/post_processing/base.py
@@ -5,7 +5,8 @@
from typing import Any
from ami.jobs.models import Job
-from ami.ml.models import Algorithm, AlgorithmTaskType
+from ami.ml.models import Algorithm
+from ami.ml.models.algorithm import AlgorithmTaskType
# Registry of available post-processing tasks
POSTPROCESSING_TASKS: dict[str, type["BasePostProcessingTask"]] = {}
From cb7c83a465c28f5fc8eb34dee6c66f8265851780 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Thu, 2 Oct 2025 10:54:09 -0400
Subject: [PATCH 10/44] feat: update identification history of occurrences in
SmallSizeFilter
---
ami/ml/post_processing/small_size_filter.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
index 3487bab42..784647dd8 100644
--- a/ami/ml/post_processing/small_size_filter.py
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -2,7 +2,7 @@
from django.utils import timezone
from ami.jobs.models import Job
-from ami.main.models import Detection, SourceImageCollection, Taxon, TaxonRank
+from ami.main.models import Detection, Identification, SourceImageCollection, Taxon, TaxonRank
from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
@@ -77,7 +77,14 @@ def run(self, job: "Job") -> None:
timestamp=timezone.now(),
algorithm=self.algorithm,
)
-
+ # Also create/update Identification for the linked occurrence
+ if det.occurrence:
+ Identification.objects.create(
+ occurrence=det.occurrence,
+ taxon=not_identifiable_taxon,
+ user=None, # since this comes from a post-processing algorithm not a human
+ comment=f"Auto-set by {self.name} Filter post-processing task",
+ )
modified += 1
job.logger.debug(f"Detection {det.pk}: marked as 'Not identifiable'")
From 10103db3318282ea632e2c16fa34322ee220db5d Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Mon, 6 Oct 2025 10:59:57 -0400
Subject: [PATCH 11/44] feat: add rank rollup
---
ami/ml/post_processing/rank_rollup.py | 127 ++++++++++++++++++++++++++
1 file changed, 127 insertions(+)
create mode 100644 ami/ml/post_processing/rank_rollup.py
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
new file mode 100644
index 000000000..01a315d3e
--- /dev/null
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -0,0 +1,127 @@
+import logging
+from collections import defaultdict
+
+from django.db import transaction
+from django.utils import timezone
+from main.models import Classification, Identification, Taxon
+
+from ami.jobs.models import Job
+from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
+
+logger = logging.getLogger(__name__)
+
+
+def find_ancestor_by_parent_chain(taxon, target_rank: str):
+ """Climb up parent relationships until a taxon with the target rank is found."""
+ if not taxon:
+ return None
+
+ target_rank = target_rank.upper()
+
+ current = taxon
+ while current:
+ if current.rank.upper() == target_rank:
+ return current
+ current = current.parent
+
+ return None
+
+
+@register_postprocessing_task
+class RankRollupTask(BasePostProcessingTask):
+ """Post-processing task that rolls up low-confidence classifications
+ to higher ranks using aggregated scores.
+ """
+
+ key = "rank_rollup"
+ name = "Rank Rollup"
+
+ DEFAULT_THRESHOLDS = {"species": 0.8, "genus": 0.6, "family": 0.4}
+ ROLLUP_ORDER = ["species", "genus", "family"]
+
+ def run(self, job: "Job") -> None:
+ job.logger.info(f"Running Rank Rollup task for job {job.id}")
+
+ # ---- Read config parameters ----
+ config = job.config or {}
+ collection_id = config.get("source_image_collection_id")
+ thresholds = config.get("thresholds", self.DEFAULT_THRESHOLDS)
+ rollup_order = config.get("rollup_order", self.ROLLUP_ORDER)
+
+ if not collection_id:
+ job.logger.warning("No 'source_image_collection_id' provided in job config. Aborting task.")
+ return
+
+ job.logger.info(f"Config: collection_id={collection_id}, thresholds={thresholds}, rollup_order={rollup_order}")
+
+ qs = Classification.objects.filter(
+ terminal=True,
+ taxon__isnull=False,
+ detection__source_image__collections__id=collection_id,
+ )
+
+ updated_occurrences = []
+
+ with transaction.atomic():
+ for clf in qs:
+ if not clf.scores or not clf.category_map:
+ continue
+
+ taxon_scores = defaultdict(float)
+
+ for idx, score in enumerate(clf.scores):
+ label = clf.category_map.labels[idx]
+ if not label:
+ continue
+
+ taxon = Taxon.objects.filter(name=label).first()
+ if not taxon:
+ continue
+
+ for rank in rollup_order:
+ ancestor = find_ancestor_by_parent_chain(taxon, rank)
+ if ancestor:
+ taxon_scores[ancestor] += score
+
+ new_taxon = None
+ new_score = None
+ for rank in rollup_order:
+ threshold = thresholds.get(rank, 1.0)
+ candidates = {t: s for t, s in taxon_scores.items() if t.rank == rank}
+ if not candidates:
+ continue
+ best_taxon, best_score = max(candidates.items(), key=lambda kv: kv[1])
+ if best_score >= threshold:
+ new_taxon, new_score = best_taxon, best_score
+ break
+
+ if new_taxon and new_taxon != clf.taxon:
+ with transaction.atomic():
+ Classification.objects.filter(detection=clf.detection, terminal=True).update(terminal=False)
+
+ Classification.objects.create(
+ detection=clf.detection,
+ taxon=new_taxon,
+ score=new_score,
+ terminal=True,
+ algorithm=clf.algorithm,
+ timestamp=timezone.now(),
+ )
+
+ occurrence = clf.detection.occurrence
+ if occurrence:
+ Identification.objects.create(
+ occurrence=occurrence,
+ taxon=new_taxon,
+ user=None,
+ comment=f"Auto-set by {self.name} post-processing task",
+ )
+ updated_occurrences.append(occurrence.pk)
+
+ job.logger.info(
+ f"Rolled up occurrence {occurrence.pk}: {clf.taxon} to{new_taxon} "
+ f"({new_taxon.rank}) with rolled-up score={new_score:.3f}"
+ )
+
+ job.logger.info(f"Rank rollup completed. Updated {len(updated_occurrences)} occurrences.")
+ job.logger.info(f"Rank rollup completed for collection {collection_id}.")
From 2e81d9038ac89ea25a14145c7695be78f8f65211 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 7 Oct 2025 10:43:30 -0400
Subject: [PATCH 12/44] feat: add class masking post processing task
---
ami/ml/post_processing/class_masking.py | 249 ++++++++++++++++++++++++
1 file changed, 249 insertions(+)
create mode 100644 ami/ml/post_processing/class_masking.py
diff --git a/ami/ml/post_processing/class_masking.py b/ami/ml/post_processing/class_masking.py
new file mode 100644
index 000000000..7bbf9a0e8
--- /dev/null
+++ b/ami/ml/post_processing/class_masking.py
@@ -0,0 +1,249 @@
+import logging
+
+from django.db.models import QuerySet
+from django.utils import timezone
+
+from ami.main.models import Classification, Occurrence, SourceImageCollection, TaxaList
+from ami.ml.models import Algorithm, AlgorithmCategoryMap
+from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
+
+logger = logging.getLogger(__name__)
+
+
+def update_single_occurrence(
+ occurrence: Occurrence,
+ algorithm: Algorithm,
+ taxa_list: TaxaList,
+ task_logger: logging.Logger = logger,
+):
+ task_logger.info(f"Recalculating classifications for occurrence {occurrence.pk}.")
+
+ # Get the classifications for the occurrence in the collection
+ classifications = Classification.objects.filter(
+ detection__occurrence=occurrence,
+ terminal=True,
+ algorithm=algorithm,
+ scores__isnull=False,
+ ).distinct()
+
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=taxa_list,
+ algorithm=algorithm,
+ )
+
+
+def update_occurrences_in_collection(
+ collection: SourceImageCollection,
+ taxa_list: TaxaList,
+ algorithm: Algorithm,
+ params: dict,
+ task_logger: logging.Logger = logger,
+ job=None,
+):
+ task_logger.info(f"Recalculating classifications based on a taxa list. Params: {params}")
+
+ # Make new AlgorithmCategoryMap with the taxa in the list
+ # @TODO
+
+ classifications = Classification.objects.filter(
+ detection__source_image__collections=collection,
+ terminal=True,
+ # algorithm__task_type="classification",
+ algorithm=algorithm,
+ scores__isnull=False,
+ ).distinct()
+
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=taxa_list,
+ algorithm=algorithm,
+ )
+
+
+def make_classifications_filtered_by_taxa_list(
+ classifications: QuerySet[Classification],
+ taxa_list: TaxaList,
+ algorithm: Algorithm,
+):
+ taxa_in_list = taxa_list.taxa.all()
+
+ occurrences_to_update: set[Occurrence] = set()
+ logger.info(f"Found {len(classifications)} terminal classifications with scores to update.")
+
+ if not classifications:
+ raise ValueError("No terminal classifications with scores found to update.")
+
+ if not algorithm.category_map:
+ raise ValueError(f"Algorithm {algorithm} does not have a category map.")
+ category_map: AlgorithmCategoryMap = algorithm.category_map
+
+ # Consider moving this to a method on the Classification model
+
+ # @TODO find a more efficient way to get the category map with taxa. This is slow!
+ logger.info(f"Retrieving category map with Taxa instances for algorithm {algorithm}")
+ category_map_with_taxa = category_map.with_taxa()
+ # Filter the category map to only include taxa that are in the taxa list
+ # included_category_map_with_taxa = [
+ # category for category in category_map_with_taxa if category["taxon"] in taxa_in_list
+ # ]
+ excluded_category_map_with_taxa = [
+ category for category in category_map_with_taxa if category["taxon"] not in taxa_in_list
+ ]
+
+ # included_category_indices = [int(category["index"]) for category in category_map_with_taxa]
+ excluded_category_indices = [
+ int(category["index"]) for category in excluded_category_map_with_taxa # type: ignore
+ ]
+
+ # Log number of categories in the category map, num included, and num excluded, num classifications to update
+ logger.info(
+ f"Category map has {len(category_map_with_taxa)} categories, "
+ f"{len(excluded_category_map_with_taxa)} categories excluded, "
+ f"{len(classifications)} classifications to check"
+ )
+
+ classifications_to_add = []
+ classifications_to_update = []
+
+ timestamp = timezone.now()
+ for classification in classifications:
+ scores, logits = classification.scores, classification.logits
+ # Set scores and logits to zero if they are not in the filtered category indices
+
+ import numpy as np
+
+ # Assert that all scores & logits are lists of numbers
+ if not isinstance(scores, list) or not all(isinstance(score, (int, float)) for score in scores):
+ raise ValueError(f"Scores for classification {classification.pk} are not a list of numbers: {scores}")
+ if not isinstance(logits, list) or not all(isinstance(logit, (int, float)) for logit in logits):
+ raise ValueError(f"Logits for classification {classification.pk} are not a list of numbers: {logits}")
+
+ logger.debug(f"Processing classification {classification.pk} with {len(scores)} scores")
+ logger.info(f"Previous totals: {sum(scores)} scores, {sum(logits)} logits")
+
+ # scores_np_filtered = np.array(scores)
+ logits_np = np.array(logits)
+
+ # scores_np_filtered[excluded_category_indices] = 0.0
+
+ # @TODO can we use np.NAN instead of 0.0? zero will NOT calculate correctly in softmax.
+ # @TODO delete the excluded categories from the scores and logits instead of setting to 0.0
+ # logits_np[excluded_category_indices] = 0.0
+ # logits_np[excluded_category_indices] = np.nan
+ logits_np[excluded_category_indices] = -100
+
+ logits: list[float] = logits_np.tolist()
+
+ from numpy import exp
+ from numpy import sum as np_sum
+
+ # @TODO add test to see if this is correct, or needed!
+ # Recalculate the softmax scores based on the filtered logits
+ scores_np: np.ndarray = exp(logits_np - np.max(logits_np)) # Subtract max for numerical stability
+ scores_np /= np_sum(scores_np) # Normalize to get probabilities
+
+ scores: list = scores_np.tolist() # Convert back to list
+
+ logger.info(f"New totals: {sum(scores)} scores, {sum(logits)} logits")
+
+ # Get the taxon with the highest score using the index of the max score
+ top_index = scores.index(max(scores))
+ top_taxon = category_map_with_taxa[top_index][
+ "taxon"
+ ] # @TODO: This doesn't work if the taxon has never been classified
+ print("Top taxon: ", category_map_with_taxa[top_index]) # @TODO: REMOVE
+ print("Top index: ", top_index) # @TODO: REMOVE
+
+ # check if needs updating
+ if classification.scores == scores and classification.logits == logits:
+ logger.debug(f"Classification {classification.pk} does not need updating")
+ continue
+
+ # Consider the existing classification as an intermediate classification
+ classification.terminal = False
+ classification.updated_at = timestamp
+
+ # Recalculate the top taxon and score
+ new_classification = Classification(
+ taxon=top_taxon,
+ algorithm=classification.algorithm,
+ score=max(scores),
+ scores=scores,
+ logits=logits,
+ detection=classification.detection,
+ timestamp=classification.timestamp,
+ terminal=True,
+ category_map=None, # @TODO need a new category map with the filtered taxa
+ created_at=timestamp,
+ updated_at=timestamp,
+ )
+ if new_classification.taxon is None:
+ raise (ValueError("Classification isn't registered yet. Aborting")) # @TODO remove or fail gracefully
+
+ classifications_to_update.append(classification)
+ classifications_to_add.append(new_classification)
+
+ assert new_classification.detection is not None
+ assert new_classification.detection.occurrence is not None
+ occurrences_to_update.add(new_classification.detection.occurrence)
+
+ logging.info(
+ f"Adding new classification for Taxon {top_taxon} to occurrence {new_classification.detection.occurrence}"
+ )
+
+ # Bulk update the existing classifications
+ if classifications_to_update:
+ logger.info(f"Bulk updating {len(classifications_to_update)} existing classifications")
+ Classification.objects.bulk_update(classifications_to_update, ["terminal", "updated_at"])
+ logger.info(f"Updated {len(classifications_to_update)} existing classifications")
+
+ if classifications_to_add:
+ # Bulk create the new classifications
+ logger.info(f"Bulk creating {len(classifications_to_add)} new classifications")
+ Classification.objects.bulk_create(classifications_to_add)
+ logger.info(f"Added {len(classifications_to_add)} new classifications")
+
+ # Update the occurrence determinations
+ logger.info(f"Updating the determinations for {len(occurrences_to_update)} occurrences")
+ for occurrence in occurrences_to_update:
+ occurrence.save(update_determination=True)
+ logger.info(f"Updated determinations for {len(occurrences_to_update)} occurrences")
+
+
+@register_postprocessing_task
+class ClassMaskingTask(BasePostProcessingTask):
+ key = "class_masking"
+ name = "Class Masking"
+
+ def run(self, job):
+ self.log_config(job)
+
+ collection_id = self.config.get("collection_id")
+ taxa_list_id = self.config.get("taxa_list_id")
+ algorithm_id = self.config.get("algorithm_id")
+
+ if not all([collection_id, taxa_list_id, algorithm_id]):
+ job.logger.error("Missing required configuration: collection_id, taxa_list_id, algorithm_id")
+ return
+
+ try:
+ collection = SourceImageCollection.objects.get(pk=collection_id)
+ taxa_list = TaxaList.objects.get(pk=taxa_list_id)
+ algorithm = Algorithm.objects.get(pk=algorithm_id)
+ except Exception as e:
+ job.logger.exception(f"Failed to load objects: {e}")
+ return
+
+ job.logger.info(f"Applying class masking on collection {collection_id} using taxa list {taxa_list_id}")
+
+ update_occurrences_in_collection(
+ collection=collection,
+ taxa_list=taxa_list,
+ algorithm=algorithm,
+ params=self.config,
+ task_logger=job.logger,
+ job=job,
+ )
+
+ job.logger.info("Class masking completed successfully.")
From 0baf8ce39151a6520708c1063347a3ea9fb59615 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 7 Oct 2025 10:43:57 -0400
Subject: [PATCH 13/44] feat: trigger class masking from admin page
---
ami/main/admin.py | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/ami/main/admin.py b/ami/main/admin.py
index a3f4d6aae..d7d79aafa 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -641,7 +641,33 @@ def run_small_size_filter(self, request: HttpRequest, queryset: QuerySet[SourceI
self.message_user(request, f"Queued Small Size Filter for {queryset.count()} collection(s). Jobs: {jobs}")
- actions = [populate_collection, populate_collection_async, run_small_size_filter]
+ @admin.action(description="Run Class Masking post-processing task (async)")
+ def run_class_masking(self, request: HttpRequest, queryset: QuerySet[SourceImageCollection]) -> None:
+ jobs = []
+
+ DEFAULT_TAXA_LIST_ID = 5
+ DEFAULT_ALGORITHM_ID = 11
+
+ for collection in queryset:
+ job = Job.objects.create(
+ name=f"Post-processing: ClassMasking on Collection {collection.pk}",
+ project=collection.project,
+ job_type_key="post_processing",
+ params={
+ "task": "class_masking",
+ "config": {
+ "collection_id": collection.pk,
+ "taxa_list_id": DEFAULT_TAXA_LIST_ID,
+ "algorithm_id": DEFAULT_ALGORITHM_ID,
+ },
+ },
+ )
+ job.enqueue()
+ jobs.append(job.pk)
+
+ self.message_user(request, f"Queued Class Masking for {queryset.count()} collection(s). Jobs: {jobs}")
+
+ actions = [populate_collection, populate_collection_async, run_small_size_filter, run_class_masking]
# Hide images many-to-many field from form. This would list all source images in the database.
exclude = ("images",)
From f3caa18c4f93b499527910f6b273d804d279d2c8 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 8 Oct 2025 10:21:31 -0400
Subject: [PATCH 14/44] fix: modified log messages
---
ami/ml/post_processing/__init__.py | 2 +-
ami/ml/post_processing/rank_rollup.py | 6 +++---
ami/ml/post_processing/small_size_filter.py | 4 ++--
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/ami/ml/post_processing/__init__.py b/ami/ml/post_processing/__init__.py
index 3517ed47c..bf2e99698 100644
--- a/ami/ml/post_processing/__init__.py
+++ b/ami/ml/post_processing/__init__.py
@@ -1 +1 @@
-from . import small_size_filter # noqa: F401
+from . import class_masking, rank_rollup, small_size_filter # noqa: F401
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index 01a315d3e..a66eda0e0 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -3,9 +3,9 @@
from django.db import transaction
from django.utils import timezone
-from main.models import Classification, Identification, Taxon
from ami.jobs.models import Job
+from ami.main.models import Classification, Identification, Taxon
from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
logger = logging.getLogger(__name__)
@@ -40,10 +40,10 @@ class RankRollupTask(BasePostProcessingTask):
ROLLUP_ORDER = ["species", "genus", "family"]
def run(self, job: "Job") -> None:
- job.logger.info(f"Running Rank Rollup task for job {job.id}")
+ job.logger.info(f"Running Rank Rollup task for job {job.pk}")
# ---- Read config parameters ----
- config = job.config or {}
+ config = self.config or {}
collection_id = config.get("source_image_collection_id")
thresholds = config.get("thresholds", self.DEFAULT_THRESHOLDS)
rollup_order = config.get("rollup_order", self.ROLLUP_ORDER)
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
index 784647dd8..3f144aff4 100644
--- a/ami/ml/post_processing/small_size_filter.py
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -32,7 +32,7 @@ def run(self, job: "Job") -> None:
try:
collection = SourceImageCollection.objects.get(pk=collection_id)
- job.logger.info(f"Loaded SourceImageCollection {collection_id} " f"(Project={collection.project_id})")
+ job.logger.info(f"Loaded SourceImageCollection {collection_id} " f"(Project={collection.project})")
except SourceImageCollection.DoesNotExist:
msg = f"SourceImageCollection {collection_id} not found"
job.logger.error(msg)
@@ -83,7 +83,7 @@ def run(self, job: "Job") -> None:
occurrence=det.occurrence,
taxon=not_identifiable_taxon,
user=None, # since this comes from a post-processing algorithm not a human
- comment=f"Auto-set by {self.name} Filter post-processing task",
+ comment=f"Auto-set by {self.name} post-processing task",
)
modified += 1
job.logger.debug(f"Detection {det.pk}: marked as 'Not identifiable'")
From 65d4fef35518e6aa97908e48a513c3f1f5de7c2b Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 8 Oct 2025 10:41:16 -0400
Subject: [PATCH 15/44] fix: set the classification algorithm to the rank
rollup Algorithm when creating a terminal classification with the rolled up
taxon
---
ami/ml/post_processing/rank_rollup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index a66eda0e0..4042612b2 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -104,7 +104,7 @@ def run(self, job: "Job") -> None:
taxon=new_taxon,
score=new_score,
terminal=True,
- algorithm=clf.algorithm,
+ algorithm=self.algorithm,
timestamp=timezone.now(),
)
From e13afc1205a71b18104604a0565ddf26a6e9a3b7 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 8 Oct 2025 10:41:40 -0400
Subject: [PATCH 16/44] feat: trigger rank rollup from admin page
---
ami/main/admin.py | 29 ++++++++++++++++++++++++++++-
1 file changed, 28 insertions(+), 1 deletion(-)
diff --git a/ami/main/admin.py b/ami/main/admin.py
index d7d79aafa..4519ae6db 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -667,7 +667,34 @@ def run_class_masking(self, request: HttpRequest, queryset: QuerySet[SourceImage
self.message_user(request, f"Queued Class Masking for {queryset.count()} collection(s). Jobs: {jobs}")
- actions = [populate_collection, populate_collection_async, run_small_size_filter, run_class_masking]
+ @admin.action(description="Run Rank Rollup post-processing task (async)")
+ def run_rank_rollup(self, request: HttpRequest, queryset: QuerySet[SourceImageCollection]) -> None:
+ """Trigger the Rank Rollup post-processing job asynchronously."""
+ jobs = []
+ DEFAULT_THRESHOLDS = {"species": 0.8, "genus": 0.6, "family": 0.4}
+
+ for collection in queryset:
+ job = Job.objects.create(
+ name=f"Post-processing: RankRollup on Collection {collection.pk}",
+ project=collection.project,
+ job_type_key="post_processing",
+ params={
+ "task": "rank_rollup",
+ "config": {"source_image_collection_id": collection.pk, "thresholds": DEFAULT_THRESHOLDS},
+ },
+ )
+ job.enqueue()
+ jobs.append(job.pk)
+
+ self.message_user(request, f"Queued Rank Rollup for {queryset.count()} collection(s). Jobs: {jobs}")
+
+ actions = [
+ populate_collection,
+ populate_collection_async,
+ run_small_size_filter,
+ run_class_masking,
+ run_rank_rollup,
+ ]
# Hide images many-to-many field from form. This would list all source images in the database.
exclude = ("images",)
From 7ecc18cf06cb7c781b483bb4b1102a2ad354a2a4 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:31:41 -0400
Subject: [PATCH 17/44] Remove class_masking.py from framework branch
---
ami/ml/post_processing/class_masking.py | 249 ------------------------
1 file changed, 249 deletions(-)
delete mode 100644 ami/ml/post_processing/class_masking.py
diff --git a/ami/ml/post_processing/class_masking.py b/ami/ml/post_processing/class_masking.py
deleted file mode 100644
index 7bbf9a0e8..000000000
--- a/ami/ml/post_processing/class_masking.py
+++ /dev/null
@@ -1,249 +0,0 @@
-import logging
-
-from django.db.models import QuerySet
-from django.utils import timezone
-
-from ami.main.models import Classification, Occurrence, SourceImageCollection, TaxaList
-from ami.ml.models import Algorithm, AlgorithmCategoryMap
-from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
-
-logger = logging.getLogger(__name__)
-
-
-def update_single_occurrence(
- occurrence: Occurrence,
- algorithm: Algorithm,
- taxa_list: TaxaList,
- task_logger: logging.Logger = logger,
-):
- task_logger.info(f"Recalculating classifications for occurrence {occurrence.pk}.")
-
- # Get the classifications for the occurrence in the collection
- classifications = Classification.objects.filter(
- detection__occurrence=occurrence,
- terminal=True,
- algorithm=algorithm,
- scores__isnull=False,
- ).distinct()
-
- make_classifications_filtered_by_taxa_list(
- classifications=classifications,
- taxa_list=taxa_list,
- algorithm=algorithm,
- )
-
-
-def update_occurrences_in_collection(
- collection: SourceImageCollection,
- taxa_list: TaxaList,
- algorithm: Algorithm,
- params: dict,
- task_logger: logging.Logger = logger,
- job=None,
-):
- task_logger.info(f"Recalculating classifications based on a taxa list. Params: {params}")
-
- # Make new AlgorithmCategoryMap with the taxa in the list
- # @TODO
-
- classifications = Classification.objects.filter(
- detection__source_image__collections=collection,
- terminal=True,
- # algorithm__task_type="classification",
- algorithm=algorithm,
- scores__isnull=False,
- ).distinct()
-
- make_classifications_filtered_by_taxa_list(
- classifications=classifications,
- taxa_list=taxa_list,
- algorithm=algorithm,
- )
-
-
-def make_classifications_filtered_by_taxa_list(
- classifications: QuerySet[Classification],
- taxa_list: TaxaList,
- algorithm: Algorithm,
-):
- taxa_in_list = taxa_list.taxa.all()
-
- occurrences_to_update: set[Occurrence] = set()
- logger.info(f"Found {len(classifications)} terminal classifications with scores to update.")
-
- if not classifications:
- raise ValueError("No terminal classifications with scores found to update.")
-
- if not algorithm.category_map:
- raise ValueError(f"Algorithm {algorithm} does not have a category map.")
- category_map: AlgorithmCategoryMap = algorithm.category_map
-
- # Consider moving this to a method on the Classification model
-
- # @TODO find a more efficient way to get the category map with taxa. This is slow!
- logger.info(f"Retrieving category map with Taxa instances for algorithm {algorithm}")
- category_map_with_taxa = category_map.with_taxa()
- # Filter the category map to only include taxa that are in the taxa list
- # included_category_map_with_taxa = [
- # category for category in category_map_with_taxa if category["taxon"] in taxa_in_list
- # ]
- excluded_category_map_with_taxa = [
- category for category in category_map_with_taxa if category["taxon"] not in taxa_in_list
- ]
-
- # included_category_indices = [int(category["index"]) for category in category_map_with_taxa]
- excluded_category_indices = [
- int(category["index"]) for category in excluded_category_map_with_taxa # type: ignore
- ]
-
- # Log number of categories in the category map, num included, and num excluded, num classifications to update
- logger.info(
- f"Category map has {len(category_map_with_taxa)} categories, "
- f"{len(excluded_category_map_with_taxa)} categories excluded, "
- f"{len(classifications)} classifications to check"
- )
-
- classifications_to_add = []
- classifications_to_update = []
-
- timestamp = timezone.now()
- for classification in classifications:
- scores, logits = classification.scores, classification.logits
- # Set scores and logits to zero if they are not in the filtered category indices
-
- import numpy as np
-
- # Assert that all scores & logits are lists of numbers
- if not isinstance(scores, list) or not all(isinstance(score, (int, float)) for score in scores):
- raise ValueError(f"Scores for classification {classification.pk} are not a list of numbers: {scores}")
- if not isinstance(logits, list) or not all(isinstance(logit, (int, float)) for logit in logits):
- raise ValueError(f"Logits for classification {classification.pk} are not a list of numbers: {logits}")
-
- logger.debug(f"Processing classification {classification.pk} with {len(scores)} scores")
- logger.info(f"Previous totals: {sum(scores)} scores, {sum(logits)} logits")
-
- # scores_np_filtered = np.array(scores)
- logits_np = np.array(logits)
-
- # scores_np_filtered[excluded_category_indices] = 0.0
-
- # @TODO can we use np.NAN instead of 0.0? zero will NOT calculate correctly in softmax.
- # @TODO delete the excluded categories from the scores and logits instead of setting to 0.0
- # logits_np[excluded_category_indices] = 0.0
- # logits_np[excluded_category_indices] = np.nan
- logits_np[excluded_category_indices] = -100
-
- logits: list[float] = logits_np.tolist()
-
- from numpy import exp
- from numpy import sum as np_sum
-
- # @TODO add test to see if this is correct, or needed!
- # Recalculate the softmax scores based on the filtered logits
- scores_np: np.ndarray = exp(logits_np - np.max(logits_np)) # Subtract max for numerical stability
- scores_np /= np_sum(scores_np) # Normalize to get probabilities
-
- scores: list = scores_np.tolist() # Convert back to list
-
- logger.info(f"New totals: {sum(scores)} scores, {sum(logits)} logits")
-
- # Get the taxon with the highest score using the index of the max score
- top_index = scores.index(max(scores))
- top_taxon = category_map_with_taxa[top_index][
- "taxon"
- ] # @TODO: This doesn't work if the taxon has never been classified
- print("Top taxon: ", category_map_with_taxa[top_index]) # @TODO: REMOVE
- print("Top index: ", top_index) # @TODO: REMOVE
-
- # check if needs updating
- if classification.scores == scores and classification.logits == logits:
- logger.debug(f"Classification {classification.pk} does not need updating")
- continue
-
- # Consider the existing classification as an intermediate classification
- classification.terminal = False
- classification.updated_at = timestamp
-
- # Recalculate the top taxon and score
- new_classification = Classification(
- taxon=top_taxon,
- algorithm=classification.algorithm,
- score=max(scores),
- scores=scores,
- logits=logits,
- detection=classification.detection,
- timestamp=classification.timestamp,
- terminal=True,
- category_map=None, # @TODO need a new category map with the filtered taxa
- created_at=timestamp,
- updated_at=timestamp,
- )
- if new_classification.taxon is None:
- raise (ValueError("Classification isn't registered yet. Aborting")) # @TODO remove or fail gracefully
-
- classifications_to_update.append(classification)
- classifications_to_add.append(new_classification)
-
- assert new_classification.detection is not None
- assert new_classification.detection.occurrence is not None
- occurrences_to_update.add(new_classification.detection.occurrence)
-
- logging.info(
- f"Adding new classification for Taxon {top_taxon} to occurrence {new_classification.detection.occurrence}"
- )
-
- # Bulk update the existing classifications
- if classifications_to_update:
- logger.info(f"Bulk updating {len(classifications_to_update)} existing classifications")
- Classification.objects.bulk_update(classifications_to_update, ["terminal", "updated_at"])
- logger.info(f"Updated {len(classifications_to_update)} existing classifications")
-
- if classifications_to_add:
- # Bulk create the new classifications
- logger.info(f"Bulk creating {len(classifications_to_add)} new classifications")
- Classification.objects.bulk_create(classifications_to_add)
- logger.info(f"Added {len(classifications_to_add)} new classifications")
-
- # Update the occurrence determinations
- logger.info(f"Updating the determinations for {len(occurrences_to_update)} occurrences")
- for occurrence in occurrences_to_update:
- occurrence.save(update_determination=True)
- logger.info(f"Updated determinations for {len(occurrences_to_update)} occurrences")
-
-
-@register_postprocessing_task
-class ClassMaskingTask(BasePostProcessingTask):
- key = "class_masking"
- name = "Class Masking"
-
- def run(self, job):
- self.log_config(job)
-
- collection_id = self.config.get("collection_id")
- taxa_list_id = self.config.get("taxa_list_id")
- algorithm_id = self.config.get("algorithm_id")
-
- if not all([collection_id, taxa_list_id, algorithm_id]):
- job.logger.error("Missing required configuration: collection_id, taxa_list_id, algorithm_id")
- return
-
- try:
- collection = SourceImageCollection.objects.get(pk=collection_id)
- taxa_list = TaxaList.objects.get(pk=taxa_list_id)
- algorithm = Algorithm.objects.get(pk=algorithm_id)
- except Exception as e:
- job.logger.exception(f"Failed to load objects: {e}")
- return
-
- job.logger.info(f"Applying class masking on collection {collection_id} using taxa list {taxa_list_id}")
-
- update_occurrences_in_collection(
- collection=collection,
- taxa_list=taxa_list,
- algorithm=algorithm,
- params=self.config,
- task_logger=job.logger,
- job=job,
- )
-
- job.logger.info("Class masking completed successfully.")
From f2140256f8bc620944d5588ece43fc2bee4722be Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:38:46 -0400
Subject: [PATCH 18/44] fix: initialize post-processing tasks with job context
and simplify run() call
---
ami/jobs/models.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ami/jobs/models.py b/ami/jobs/models.py
index 39e7a0ff3..96470a97d 100644
--- a/ami/jobs/models.py
+++ b/ami/jobs/models.py
@@ -668,8 +668,8 @@ def run(cls, job: "Job"):
if not task_cls:
raise ValueError(f"Unknown post-processing task '{task_key}'")
- task = task_cls(**config)
- task.run(job)
+ task = task_cls(job=job, **config)
+ task.run()
job.progress.update_stage(cls.key, status=JobState.SUCCESS, progress=1)
job.finished_at = datetime.datetime.now()
job.update_status(JobState.SUCCESS)
From 20ff4b6b3f5b555bae81b7510f6cf05173cd642d Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:39:07 -0400
Subject: [PATCH 19/44] feat: add permission to run post-processing jobs
---
ami/main/models.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/ami/main/models.py b/ami/main/models.py
index 7e3935609..bab6b3a20 100644
--- a/ami/main/models.py
+++ b/ami/main/models.py
@@ -314,6 +314,7 @@ class Permissions:
RUN_POPULATE_CAPTURES_COLLECTION_JOB = "run_populate_captures_collection_job"
RUN_DATA_STORAGE_SYNC_JOB = "run_data_storage_sync_job"
RUN_DATA_EXPORT_JOB = "run_data_export_job"
+ RUN_POST_PROCESSING_JOB = "run_post_processing_job"
DELETE_JOB = "delete_job"
# Deployment permissions
@@ -376,6 +377,7 @@ class Meta:
("run_data_storage_sync_job", "Can run/retry/cancel Data Storage Sync jobs"),
("run_data_export_job", "Can run/retry/cancel Data Export jobs"),
("run_single_image_ml_job", "Can process a single capture"),
+ ("run_post_processing_job", "Can run/retry/cancel Post-Processing jobs"),
("delete_job", "Can delete a job"),
# Deployment permissions
("create_deployment", "Can create a deployment"),
From 5b66ae3033dbad38fa0b70217dfac42f14474747 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:39:41 -0400
Subject: [PATCH 20/44] chore: remove class_masking import
---
ami/ml/post_processing/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ami/ml/post_processing/__init__.py b/ami/ml/post_processing/__init__.py
index bf2e99698..136481120 100644
--- a/ami/ml/post_processing/__init__.py
+++ b/ami/ml/post_processing/__init__.py
@@ -1 +1 @@
-from . import class_masking, rank_rollup, small_size_filter # noqa: F401
+from . import rank_rollup, small_size_filter # noqa: F401
From 0419eff07909dc56467f6de2667edf8103ac25bb Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:40:29 -0400
Subject: [PATCH 21/44] refactor: redesign BasePostProcessingTask with
job-aware logging, progress, and algorithm binding
---
ami/ml/post_processing/base.py | 65 ++++++++++++++++++++--------------
1 file changed, 39 insertions(+), 26 deletions(-)
diff --git a/ami/ml/post_processing/base.py b/ami/ml/post_processing/base.py
index baa1c452e..f294b9aab 100644
--- a/ami/ml/post_processing/base.py
+++ b/ami/ml/post_processing/base.py
@@ -23,19 +23,6 @@ def register_postprocessing_task(task_cls: type["BasePostProcessingTask"]):
# Register the task
POSTPROCESSING_TASKS[task_cls.key] = task_cls
-
- # Ensure Algorithm object exists for this task
- algorithm, _ = Algorithm.objects.get_or_create(
- name=task_cls.__name__,
- defaults={
- "description": f"Post-processing task: {task_cls.key}",
- "task_type": AlgorithmTaskType.POST_PROCESSING.value,
- },
- )
-
- # Attach the Algorithm object to the task class
- task_cls.algorithm = algorithm
-
return task_cls
@@ -56,24 +43,50 @@ class BasePostProcessingTask(abc.ABC):
key: str = ""
name: str = ""
- def __init__(self, **config: Any):
+ def __init__(
+ self,
+ job: Job | None = None,
+ logger: logging.Logger | None = None,
+ **config: Any,
+ ):
+ self.job = job
+ self.config = config
+ # Choose the right logger
+ if logger is not None:
+ self.logger = logger
+ elif job is not None:
+ self.logger = job.logger
+ else:
+ self.logger = logging.getLogger(f"ami.post_processing.{self.key}")
+
+ algorithm, _ = Algorithm.objects.get_or_create(
+ name=self.__class__.__name__,
+ defaults={
+ "description": f"Post-processing task: {self.key}",
+ "task_type": AlgorithmTaskType.POST_PROCESSING.value,
+ },
+ )
+ self.algorithm: Algorithm = algorithm
+
+ self.logger.info(f"Initialized {self.__class__.__name__} with config={self.config}, job={job}")
+
+ def update_progress(self, progress: float):
"""
- Initialize task with configuration parameters.
+ Update progress if job is present, otherwise just log.
"""
- self.config: dict[str, Any] = config
- self.logger = logging.getLogger(f"ami.post_processing.{self.key}")
+
+ if self.job:
+ self.job.progress.update_stage(self.job.job_type_key, progress=progress)
+ self.job.save(update_fields=["progress"])
+
+ else:
+ # No job object — fallback to plain logging
+ self.logger.info(f"[{self.name}] Progress {progress:.0%}")
@abc.abstractmethod
- def run(self, job: Job) -> None:
+ def run(self) -> None:
"""
Run the task logic.
Must be implemented by subclasses.
- The job parameter provides context (project, logs, etc.).
- """
- raise NotImplementedError("Subclasses must implement run()")
-
- def log_config(self, job: Job):
- """
- Helper to log the task configuration at start.
"""
- job.logger.info(f"Running task {self.name} ({self.key}) with config: {self.config}")
+ raise NotImplementedError("BasePostProcessingTask subclasses must implement run()")
From 1ad1e765237d9b07763edaf06b1bfd7450dcc679 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:41:16 -0400
Subject: [PATCH 22/44] refactor: adapt RankRollupTask to new
BasePostProcessingTask with self.logger and progress updates
---
ami/ml/post_processing/rank_rollup.py | 55 ++++++++++++++++++++-------
1 file changed, 42 insertions(+), 13 deletions(-)
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index 4042612b2..9f1e3b8f8 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -4,7 +4,6 @@
from django.db import transaction
from django.utils import timezone
-from ami.jobs.models import Job
from ami.main.models import Classification, Identification, Taxon
from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
@@ -34,13 +33,14 @@ class RankRollupTask(BasePostProcessingTask):
"""
key = "rank_rollup"
- name = "Rank Rollup"
+ name = "Rank rollup"
DEFAULT_THRESHOLDS = {"species": 0.8, "genus": 0.6, "family": 0.4}
ROLLUP_ORDER = ["species", "genus", "family"]
- def run(self, job: "Job") -> None:
- job.logger.info(f"Running Rank Rollup task for job {job.pk}")
+ def run(self) -> None:
+ job = self.job
+ self.logger.info(f"Starting {self.name} task for job {job.pk if job else 'N/A'}")
# ---- Read config parameters ----
config = self.config or {}
@@ -49,10 +49,12 @@ def run(self, job: "Job") -> None:
rollup_order = config.get("rollup_order", self.ROLLUP_ORDER)
if not collection_id:
- job.logger.warning("No 'source_image_collection_id' provided in job config. Aborting task.")
+ self.logger.warning("No 'source_image_collection_id' provided in config. Aborting task.")
return
- job.logger.info(f"Config: collection_id={collection_id}, thresholds={thresholds}, rollup_order={rollup_order}")
+ self.logger.info(
+ f"Config loaded: collection_id={collection_id}, thresholds={thresholds}, rollup_order={rollup_order}"
+ )
qs = Classification.objects.filter(
terminal=True,
@@ -60,11 +62,20 @@ def run(self, job: "Job") -> None:
detection__source_image__collections__id=collection_id,
)
+ total = qs.count()
+ self.logger.info(f"Found {total} terminal classifications to process for collection {collection_id}")
+
updated_occurrences = []
with transaction.atomic():
- for clf in qs:
- if not clf.scores or not clf.category_map:
+ for i, clf in enumerate(qs.iterator(), start=1):
+ self.logger.info(f"Processing classification #{clf.pk} (taxon={clf.taxon}, score={clf.score:.3f})")
+
+ if not clf.scores:
+ self.logger.warning(f"Skipping classification #{clf.pk}: no scores available")
+ continue
+ if not clf.category_map:
+ self.logger.warning(f"Skipping classification #{clf.pk}: no category_map assigned")
continue
taxon_scores = defaultdict(float)
@@ -76,29 +87,40 @@ def run(self, job: "Job") -> None:
taxon = Taxon.objects.filter(name=label).first()
if not taxon:
+ self.logger.debug(f"Skipping label '{label}' (no matching Taxon in DB)")
continue
for rank in rollup_order:
ancestor = find_ancestor_by_parent_chain(taxon, rank)
if ancestor:
taxon_scores[ancestor] += score
+ self.logger.debug(f" + Added {score:.3f} to ancestor {ancestor.name} ({rank})")
new_taxon = None
new_score = None
for rank in rollup_order:
threshold = thresholds.get(rank, 1.0)
candidates = {t: s for t, s in taxon_scores.items() if t.rank == rank}
+
if not candidates:
+ self.logger.debug(f"No candidates found at rank {rank}")
continue
+
best_taxon, best_score = max(candidates.items(), key=lambda kv: kv[1])
+ self.logger.debug(
+ f"Best at rank {rank}: {best_taxon.name} ({best_score:.3f}) [threshold={threshold}]"
+ )
+
if best_score >= threshold:
new_taxon, new_score = best_taxon, best_score
+ self.logger.info(f"Rollup decision: {new_taxon.name} ({rank}) with score {new_score:.3f}")
break
if new_taxon and new_taxon != clf.taxon:
+ self.logger.info(f"Rolling up {clf.taxon} → {new_taxon} ({new_taxon.rank})")
+
with transaction.atomic():
Classification.objects.filter(detection=clf.detection, terminal=True).update(terminal=False)
-
Classification.objects.create(
detection=clf.detection,
taxon=new_taxon,
@@ -118,10 +140,17 @@ def run(self, job: "Job") -> None:
)
updated_occurrences.append(occurrence.pk)
- job.logger.info(
- f"Rolled up occurrence {occurrence.pk}: {clf.taxon} to{new_taxon} "
+ self.logger.info(
+ f"Rolled up occurrence {occurrence.pk}: {clf.taxon} → {new_taxon} "
f"({new_taxon.rank}) with rolled-up score={new_score:.3f}"
)
+ else:
+ self.logger.info(f"No rollup applied for classification #{clf.pk} (taxon={clf.taxon})")
+
+ # 🔹 Periodic progress updates
+ if i % 50 == 0 or i == total:
+ progress = i / total if total > 0 else 1.0
+ self.update_progress(progress)
- job.logger.info(f"Rank rollup completed. Updated {len(updated_occurrences)} occurrences.")
- job.logger.info(f"Rank rollup completed for collection {collection_id}.")
+ self.logger.info(f"Rank rollup completed. Updated {len(updated_occurrences)} occurrences.")
+ self.logger.info(f"{self.name} task finished for collection {collection_id}.")
From d97e8e0e031165adfe9a2e61f3c6a99eec809e76 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:41:53 -0400
Subject: [PATCH 23/44] refactor: update SmallSizeFilter to use
BasePostProcessingTask logging and progress tracking
---
ami/ml/post_processing/small_size_filter.py | 42 ++++++++++-----------
1 file changed, 19 insertions(+), 23 deletions(-)
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
index 3f144aff4..a8f9b9010 100644
--- a/ami/ml/post_processing/small_size_filter.py
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -1,7 +1,6 @@
from django.db import transaction
from django.utils import timezone
-from ami.jobs.models import Job
from ami.main.models import Detection, Identification, SourceImageCollection, Taxon, TaxonRank
from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
@@ -9,13 +8,13 @@
@register_postprocessing_task
class SmallSizeFilter(BasePostProcessingTask):
key = "small_size_filter"
- name = "Small Size Filter"
+ name = "Small size filter"
- def run(self, job: "Job") -> None:
+ def run(self) -> None:
threshold = self.config.get("size_threshold", 0.01)
collection_id = self.config.get("source_image_collection_id")
- # Get or create the "Not identifiable" taxon
+ # Get or create the "Not identifiable" taxon
not_identifiable_taxon, _ = Taxon.objects.get_or_create(
name="Not identifiable",
defaults={
@@ -23,43 +22,43 @@ def run(self, job: "Job") -> None:
"notes": "Auto-generated taxon for small size filter",
},
)
- job.logger.info(f"=== Starting {self.name} ===")
+ self.logger.info(f"=== Starting {self.name} ===")
if not collection_id:
msg = "Missing required config param: source_image_collection_id"
- job.logger.error(msg)
+ self.logger.error(msg)
raise ValueError(msg)
try:
collection = SourceImageCollection.objects.get(pk=collection_id)
- job.logger.info(f"Loaded SourceImageCollection {collection_id} " f"(Project={collection.project})")
+ self.logger.info(f"Loaded SourceImageCollection {collection_id} (Project={collection.project})")
except SourceImageCollection.DoesNotExist:
msg = f"SourceImageCollection {collection_id} not found"
- job.logger.error(msg)
+ self.logger.error(msg)
raise ValueError(msg)
detections = Detection.objects.filter(source_image__collections=collection)
total = detections.count()
- job.logger.info(f"Found {total} detections in collection {collection_id}")
+ self.logger.info(f"Found {total} detections in collection {collection_id}")
modified = 0
- for det in detections.iterator():
+ for i, det in enumerate(detections.iterator(), start=1):
bbox = det.get_bbox()
if not bbox:
- job.logger.debug(f"Detection {det.pk}: no bbox, skipping")
+ self.logger.debug(f"Detection {det.pk}: no bbox, skipping")
continue
img_w, img_h = det.source_image.width, det.source_image.height
if not img_w or not img_h:
- job.logger.debug(f"Detection {det.pk}: missing source image dims, skipping")
+ self.logger.debug(f"Detection {det.pk}: missing source image dims, skipping")
continue
det_area = det.width() * det.height()
img_area = img_w * img_h
rel_area = det_area / img_area if img_area else 0
- job.logger.debug(
+ self.logger.debug(
f"Detection {det.pk}: area={det_area}, rel_area={rel_area:.4f}, " f"threshold={threshold:.4f}"
)
@@ -82,18 +81,15 @@ def run(self, job: "Job") -> None:
Identification.objects.create(
occurrence=det.occurrence,
taxon=not_identifiable_taxon,
- user=None, # since this comes from a post-processing algorithm not a human
+ user=None, # auto-generated by post-processing
comment=f"Auto-set by {self.name} post-processing task",
)
modified += 1
- job.logger.debug(f"Detection {det.pk}: marked as 'Not identifiable'")
+ self.logger.info(f"Detection {det.pk}: marked as 'Not identifiable'")
- job.logger.info(f"=== Completed {self.name}: {modified}/{total} detections modified ===")
+ # Update progress every 10 detections
+ if i % 10 == 0 or i == total:
+ progress = i / total if total > 0 else 1.0
+ self.update_progress(progress)
- job.result = {
- "detections_total": total,
- "detections_modified": modified,
- "threshold": threshold,
- "collection_id": collection_id,
- }
- job.save(update_fields=["result"])
+ self.logger.info(f"=== Completed {self.name}: {modified}/{total} detections modified ===")
From 2922c86c1a0044f5630d076464320abccb4983a3 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:43:24 -0400
Subject: [PATCH 24/44] migrations: update Project options to include
post-processing job permission
---
.../migrations/0075_alter_project_options.py | 59 +++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 ami/main/migrations/0075_alter_project_options.py
diff --git a/ami/main/migrations/0075_alter_project_options.py b/ami/main/migrations/0075_alter_project_options.py
new file mode 100644
index 000000000..618ba1753
--- /dev/null
+++ b/ami/main/migrations/0075_alter_project_options.py
@@ -0,0 +1,59 @@
+# Generated by Django 4.2.10 on 2025-10-14 05:01
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("main", "0074_taxon_cover_image_credit_taxon_cover_image_url_and_more"),
+ ]
+
+ operations = [
+ migrations.AlterModelOptions(
+ name="project",
+ options={
+ "ordering": ["-priority", "created_at"],
+ "permissions": [
+ ("create_identification", "Can create identifications"),
+ ("update_identification", "Can update identifications"),
+ ("delete_identification", "Can delete identifications"),
+ ("create_job", "Can create a job"),
+ ("update_job", "Can update a job"),
+ ("run_ml_job", "Can run/retry/cancel ML jobs"),
+ ("run_populate_captures_collection_job", "Can run/retry/cancel Populate Collection jobs"),
+ ("run_data_storage_sync_job", "Can run/retry/cancel Data Storage Sync jobs"),
+ ("run_data_export_job", "Can run/retry/cancel Data Export jobs"),
+ ("run_single_image_ml_job", "Can process a single capture"),
+ ("run_post_processing_job", "Can run/retry/cancel Post-Processing jobs"),
+ ("delete_job", "Can delete a job"),
+ ("create_deployment", "Can create a deployment"),
+ ("delete_deployment", "Can delete a deployment"),
+ ("update_deployment", "Can update a deployment"),
+ ("sync_deployment", "Can sync images to a deployment"),
+ ("create_sourceimagecollection", "Can create a collection"),
+ ("update_sourceimagecollection", "Can update a collection"),
+ ("delete_sourceimagecollection", "Can delete a collection"),
+ ("populate_sourceimagecollection", "Can populate a collection"),
+ ("create_sourceimage", "Can create a source image"),
+ ("update_sourceimage", "Can update a source image"),
+ ("delete_sourceimage", "Can delete a source image"),
+ ("star_sourceimage", "Can star a source image"),
+ ("create_sourceimageupload", "Can create a source image upload"),
+ ("update_sourceimageupload", "Can update a source image upload"),
+ ("delete_sourceimageupload", "Can delete a source image upload"),
+ ("create_s3storagesource", "Can create storage"),
+ ("delete_s3storagesource", "Can delete storage"),
+ ("update_s3storagesource", "Can update storage"),
+ ("test_s3storagesource", "Can test storage connection"),
+ ("create_site", "Can create a site"),
+ ("delete_site", "Can delete a site"),
+ ("update_site", "Can update a site"),
+ ("create_device", "Can create a device"),
+ ("delete_device", "Can delete a device"),
+ ("update_device", "Can update a device"),
+ ("view_private_data", "Can view private data"),
+ ("trigger_exports", "Can trigger data exports"),
+ ],
+ },
+ ),
+ ]
From 9012d7f365a661803dc8b86ffd01fb83d8c1b713 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 11:43:41 -0400
Subject: [PATCH 25/44] migrations: update Algorithm.task_type choices to
include post-processing
---
.../0025_alter_algorithm_task_type.py | 42 +++++++++++++++++++
1 file changed, 42 insertions(+)
create mode 100644 ami/ml/migrations/0025_alter_algorithm_task_type.py
diff --git a/ami/ml/migrations/0025_alter_algorithm_task_type.py b/ami/ml/migrations/0025_alter_algorithm_task_type.py
new file mode 100644
index 000000000..6fc4e8167
--- /dev/null
+++ b/ami/ml/migrations/0025_alter_algorithm_task_type.py
@@ -0,0 +1,42 @@
+# Generated by Django 4.2.10 on 2025-10-14 05:01
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("ml", "0024_fix_classifications_missing_category_maps"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="algorithm",
+ name="task_type",
+ field=models.CharField(
+ choices=[
+ ("detection", "Detection"),
+ ("localization", "Localization"),
+ ("segmentation", "Segmentation"),
+ ("classification", "Classification"),
+ ("embedding", "Embedding"),
+ ("tracking", "Tracking"),
+ ("tagging", "Tagging"),
+ ("regression", "Regression"),
+ ("captioning", "Captioning"),
+ ("generation", "Generation"),
+ ("translation", "Translation"),
+ ("summarization", "Summarization"),
+ ("question_answering", "Question Answering"),
+ ("depth_estimation", "Depth Estimation"),
+ ("pose_estimation", "Pose Estimation"),
+ ("size_estimation", "Size Estimation"),
+ ("post_processing", "Post Processing"),
+ ("other", "Other"),
+ ("unknown", "Unknown"),
+ ],
+ default="unknown",
+ max_length=255,
+ null=True,
+ ),
+ ),
+ ]
From 787ac0ba4fcf4782bccc3633d5e9b267d8e97bbe Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 14:27:20 -0400
Subject: [PATCH 26/44] migrations: merged migrations
---
ami/main/migrations/0077_merge_20251014_1426.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
create mode 100644 ami/main/migrations/0077_merge_20251014_1426.py
diff --git a/ami/main/migrations/0077_merge_20251014_1426.py b/ami/main/migrations/0077_merge_20251014_1426.py
new file mode 100644
index 000000000..7826482e0
--- /dev/null
+++ b/ami/main/migrations/0077_merge_20251014_1426.py
@@ -0,0 +1,12 @@
+# Generated by Django 4.2.10 on 2025-10-14 14:26
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("main", "0075_alter_project_options"),
+ ("main", "0076_add_occurrence_composite_indexes"),
+ ]
+
+ operations = []
From 5e85b75ddf18382a884f94620873145730585ec8 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Fri, 10 Oct 2025 10:58:51 -0400
Subject: [PATCH 27/44] refactor: refactor job runner to initialize
post-processing tasks with job context
---
ami/jobs/models.py | 7 ++-
ami/ml/post_processing/base.py | 56 +++++++--------------
ami/ml/post_processing/small_size_filter.py | 9 +---
3 files changed, 25 insertions(+), 47 deletions(-)
diff --git a/ami/jobs/models.py b/ami/jobs/models.py
index 96470a97d..27d8f5946 100644
--- a/ami/jobs/models.py
+++ b/ami/jobs/models.py
@@ -662,13 +662,16 @@ def run(cls, job: "Job"):
params = job.params or {}
task_key: str = params.get("task", "")
config = params.get("config", {})
- job.logger.info(f"Post-processing task: {task_key} with params: {job.params}")
-
+ job.logger.info(f"Post-processing task: {task_key} with params: {config}")
+ # Get the registered task class
task_cls = get_postprocessing_task(task_key)
if not task_cls:
raise ValueError(f"Unknown post-processing task '{task_key}'")
+ # Instantiate the task with job context and config
task = task_cls(job=job, **config)
+
+ # Run the task
task.run()
job.progress.update_stage(cls.key, status=JobState.SUCCESS, progress=1)
job.finished_at = datetime.datetime.now()
diff --git a/ami/ml/post_processing/base.py b/ami/ml/post_processing/base.py
index f294b9aab..bd1e8b0e2 100644
--- a/ami/ml/post_processing/base.py
+++ b/ami/ml/post_processing/base.py
@@ -2,7 +2,7 @@
import abc
import logging
-from typing import Any
+from typing import Any, Optional
from ami.jobs.models import Job
from ami.ml.models import Algorithm
@@ -39,54 +39,34 @@ class BasePostProcessingTask(abc.ABC):
Abstract base class for all post-processing tasks.
"""
- # Each task must override these
key: str = ""
name: str = ""
def __init__(
self,
- job: Job | None = None,
- logger: logging.Logger | None = None,
+ job: Optional["Job"] = None,
+ task_logger: logging.Logger | None = None,
**config: Any,
):
- self.job = job
- self.config = config
- # Choose the right logger
- if logger is not None:
- self.logger = logger
- elif job is not None:
- self.logger = job.logger
- else:
- self.logger = logging.getLogger(f"ami.post_processing.{self.key}")
-
- algorithm, _ = Algorithm.objects.get_or_create(
- name=self.__class__.__name__,
- defaults={
- "description": f"Post-processing task: {self.key}",
- "task_type": AlgorithmTaskType.POST_PROCESSING.value,
- },
- )
- self.algorithm: Algorithm = algorithm
-
- self.logger.info(f"Initialized {self.__class__.__name__} with config={self.config}, job={job}")
-
- def update_progress(self, progress: float):
"""
- Update progress if job is present, otherwise just log.
+ Initialize task with optional job and logger context.
"""
+ self.job = job
+ self.config: dict[str, Any] = config
- if self.job:
- self.job.progress.update_stage(self.job.job_type_key, progress=progress)
- self.job.save(update_fields=["progress"])
-
+ if job:
+ self.logger = job.logger
+ elif task_logger:
+ self.logger = task_logger
else:
- # No job object — fallback to plain logging
- self.logger.info(f"[{self.name}] Progress {progress:.0%}")
+ self.logger = logging.getLogger(f"ami.post_processing.{self.key}")
+ self.log_config()
@abc.abstractmethod
def run(self) -> None:
- """
- Run the task logic.
- Must be implemented by subclasses.
- """
- raise NotImplementedError("BasePostProcessingTask subclasses must implement run()")
+ """Run the task logic. Must be implemented by subclasses."""
+ raise NotImplementedError("Subclasses must implement run()")
+
+ def log_config(self):
+ """Helper to log the task configuration at start."""
+ self.logger.info(f"Running task {self.name} ({self.key}) with config: {self.config}")
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
index a8f9b9010..33f9901b9 100644
--- a/ami/ml/post_processing/small_size_filter.py
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -31,7 +31,7 @@ def run(self) -> None:
try:
collection = SourceImageCollection.objects.get(pk=collection_id)
- self.logger.info(f"Loaded SourceImageCollection {collection_id} (Project={collection.project})")
+ self.logger.info(f"Loaded SourceImageCollection {collection_id} " f"(Project={collection.project})")
except SourceImageCollection.DoesNotExist:
msg = f"SourceImageCollection {collection_id} not found"
self.logger.error(msg)
@@ -85,11 +85,6 @@ def run(self) -> None:
comment=f"Auto-set by {self.name} post-processing task",
)
modified += 1
- self.logger.info(f"Detection {det.pk}: marked as 'Not identifiable'")
-
- # Update progress every 10 detections
- if i % 10 == 0 or i == total:
- progress = i / total if total > 0 else 1.0
- self.update_progress(progress)
+ self.logger.debug(f"Detection {det.pk}: marked as 'Not identifiable'")
self.logger.info(f"=== Completed {self.name}: {modified}/{total} detections modified ===")
From 88ffba8fb7e03c12aa2513cf79112cb790f301aa Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 14:52:36 -0400
Subject: [PATCH 28/44] chore: rebase feat/postprocessing-class-masking onto
feat/postprocessing-framework
---
ami/ml/post_processing/__init__.py | 2 +-
ami/ml/post_processing/base.py | 56 +++--
ami/ml/post_processing/class_masking.py | 253 ++++++++++++++++++++
ami/ml/post_processing/small_size_filter.py | 9 +-
4 files changed, 299 insertions(+), 21 deletions(-)
create mode 100644 ami/ml/post_processing/class_masking.py
diff --git a/ami/ml/post_processing/__init__.py b/ami/ml/post_processing/__init__.py
index 136481120..bf2e99698 100644
--- a/ami/ml/post_processing/__init__.py
+++ b/ami/ml/post_processing/__init__.py
@@ -1 +1 @@
-from . import rank_rollup, small_size_filter # noqa: F401
+from . import class_masking, rank_rollup, small_size_filter # noqa: F401
diff --git a/ami/ml/post_processing/base.py b/ami/ml/post_processing/base.py
index bd1e8b0e2..f294b9aab 100644
--- a/ami/ml/post_processing/base.py
+++ b/ami/ml/post_processing/base.py
@@ -2,7 +2,7 @@
import abc
import logging
-from typing import Any, Optional
+from typing import Any
from ami.jobs.models import Job
from ami.ml.models import Algorithm
@@ -39,34 +39,54 @@ class BasePostProcessingTask(abc.ABC):
Abstract base class for all post-processing tasks.
"""
+ # Each task must override these
key: str = ""
name: str = ""
def __init__(
self,
- job: Optional["Job"] = None,
- task_logger: logging.Logger | None = None,
+ job: Job | None = None,
+ logger: logging.Logger | None = None,
**config: Any,
):
- """
- Initialize task with optional job and logger context.
- """
self.job = job
- self.config: dict[str, Any] = config
-
- if job:
+ self.config = config
+ # Choose the right logger
+ if logger is not None:
+ self.logger = logger
+ elif job is not None:
self.logger = job.logger
- elif task_logger:
- self.logger = task_logger
else:
self.logger = logging.getLogger(f"ami.post_processing.{self.key}")
- self.log_config()
+
+ algorithm, _ = Algorithm.objects.get_or_create(
+ name=self.__class__.__name__,
+ defaults={
+ "description": f"Post-processing task: {self.key}",
+ "task_type": AlgorithmTaskType.POST_PROCESSING.value,
+ },
+ )
+ self.algorithm: Algorithm = algorithm
+
+ self.logger.info(f"Initialized {self.__class__.__name__} with config={self.config}, job={job}")
+
+ def update_progress(self, progress: float):
+ """
+ Update progress if job is present, otherwise just log.
+ """
+
+ if self.job:
+ self.job.progress.update_stage(self.job.job_type_key, progress=progress)
+ self.job.save(update_fields=["progress"])
+
+ else:
+ # No job object — fallback to plain logging
+ self.logger.info(f"[{self.name}] Progress {progress:.0%}")
@abc.abstractmethod
def run(self) -> None:
- """Run the task logic. Must be implemented by subclasses."""
- raise NotImplementedError("Subclasses must implement run()")
-
- def log_config(self):
- """Helper to log the task configuration at start."""
- self.logger.info(f"Running task {self.name} ({self.key}) with config: {self.config}")
+ """
+ Run the task logic.
+ Must be implemented by subclasses.
+ """
+ raise NotImplementedError("BasePostProcessingTask subclasses must implement run()")
diff --git a/ami/ml/post_processing/class_masking.py b/ami/ml/post_processing/class_masking.py
new file mode 100644
index 000000000..04c621830
--- /dev/null
+++ b/ami/ml/post_processing/class_masking.py
@@ -0,0 +1,253 @@
+import logging
+
+from django.db.models import QuerySet
+from django.utils import timezone
+
+from ami.main.models import Classification, Occurrence, SourceImageCollection, TaxaList
+from ami.ml.models import Algorithm, AlgorithmCategoryMap
+from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
+
+logger = logging.getLogger(__name__)
+
+
+def update_single_occurrence(
+ occurrence: Occurrence,
+ algorithm: Algorithm,
+ taxa_list: TaxaList,
+ task_logger: logging.Logger = logger,
+):
+ task_logger.info(f"Recalculating classifications for occurrence {occurrence.pk}.")
+
+ # Get the classifications for the occurrence in the collection
+ classifications = Classification.objects.filter(
+ detection__occurrence=occurrence,
+ terminal=True,
+ algorithm=algorithm,
+ scores__isnull=False,
+ ).distinct()
+
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=taxa_list,
+ algorithm=algorithm,
+ )
+
+
+def update_occurrences_in_collection(
+ collection: SourceImageCollection,
+ taxa_list: TaxaList,
+ algorithm: Algorithm,
+ params: dict,
+ task_logger: logging.Logger = logger,
+ job=None,
+):
+ task_logger.info(f"Recalculating classifications based on a taxa list. Params: {params}")
+
+ # Make new AlgorithmCategoryMap with the taxa in the list
+ # @TODO
+
+ classifications = Classification.objects.filter(
+ detection__source_image__collections=collection,
+ terminal=True,
+ # algorithm__task_type="classification",
+ algorithm=algorithm,
+ scores__isnull=False,
+ ).distinct()
+
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=taxa_list,
+ algorithm=algorithm,
+ )
+
+
+def make_classifications_filtered_by_taxa_list(
+ classifications: QuerySet[Classification],
+ taxa_list: TaxaList,
+ algorithm: Algorithm,
+):
+ taxa_in_list = taxa_list.taxa.all()
+
+ occurrences_to_update: set[Occurrence] = set()
+ logger.info(f"Found {len(classifications)} terminal classifications with scores to update.")
+
+ if not classifications:
+ raise ValueError("No terminal classifications with scores found to update.")
+
+ if not algorithm.category_map:
+ raise ValueError(f"Algorithm {algorithm} does not have a category map.")
+ category_map: AlgorithmCategoryMap = algorithm.category_map
+
+ # Consider moving this to a method on the Classification model
+
+ # @TODO find a more efficient way to get the category map with taxa. This is slow!
+ logger.info(f"Retrieving category map with Taxa instances for algorithm {algorithm}")
+ category_map_with_taxa = category_map.with_taxa()
+ # Filter the category map to only include taxa that are in the taxa list
+ # included_category_map_with_taxa = [
+ # category for category in category_map_with_taxa if category["taxon"] in taxa_in_list
+ # ]
+ excluded_category_map_with_taxa = [
+ category for category in category_map_with_taxa if category["taxon"] not in taxa_in_list
+ ]
+
+ # included_category_indices = [int(category["index"]) for category in category_map_with_taxa]
+ excluded_category_indices = [
+ int(category["index"]) for category in excluded_category_map_with_taxa # type: ignore
+ ]
+
+ # Log number of categories in the category map, num included, and num excluded, num classifications to update
+ logger.info(
+ f"Category map has {len(category_map_with_taxa)} categories, "
+ f"{len(excluded_category_map_with_taxa)} categories excluded, "
+ f"{len(classifications)} classifications to check"
+ )
+
+ classifications_to_add = []
+ classifications_to_update = []
+
+ timestamp = timezone.now()
+ for classification in classifications:
+ scores, logits = classification.scores, classification.logits
+ # Set scores and logits to zero if they are not in the filtered category indices
+
+ import numpy as np
+
+ # Assert that all scores & logits are lists of numbers
+ if not isinstance(scores, list) or not all(isinstance(score, (int, float)) for score in scores):
+ raise ValueError(f"Scores for classification {classification.pk} are not a list of numbers: {scores}")
+ if not isinstance(logits, list) or not all(isinstance(logit, (int, float)) for logit in logits):
+ raise ValueError(f"Logits for classification {classification.pk} are not a list of numbers: {logits}")
+
+ logger.debug(f"Processing classification {classification.pk} with {len(scores)} scores")
+ logger.info(f"Previous totals: {sum(scores)} scores, {sum(logits)} logits")
+
+ # scores_np_filtered = np.array(scores)
+ logits_np = np.array(logits)
+
+ # scores_np_filtered[excluded_category_indices] = 0.0
+
+ # @TODO can we use np.NAN instead of 0.0? zero will NOT calculate correctly in softmax.
+ # @TODO delete the excluded categories from the scores and logits instead of setting to 0.0
+ # logits_np[excluded_category_indices] = 0.0
+ # logits_np[excluded_category_indices] = np.nan
+ logits_np[excluded_category_indices] = -100
+
+ logits: list[float] = logits_np.tolist()
+
+ from numpy import exp
+ from numpy import sum as np_sum
+
+ # @TODO add test to see if this is correct, or needed!
+ # Recalculate the softmax scores based on the filtered logits
+ scores_np: np.ndarray = exp(logits_np - np.max(logits_np)) # Subtract max for numerical stability
+ scores_np /= np_sum(scores_np) # Normalize to get probabilities
+
+ scores: list = scores_np.tolist() # Convert back to list
+
+ logger.info(f"New totals: {sum(scores)} scores, {sum(logits)} logits")
+
+ # Get the taxon with the highest score using the index of the max score
+ top_index = scores.index(max(scores))
+ top_taxon = category_map_with_taxa[top_index][
+ "taxon"
+ ] # @TODO: This doesn't work if the taxon has never been classified
+ print("Top taxon: ", category_map_with_taxa[top_index]) # @TODO: REMOVE
+ print("Top index: ", top_index) # @TODO: REMOVE
+
+ # check if needs updating
+ if classification.scores == scores and classification.logits == logits:
+ logger.debug(f"Classification {classification.pk} does not need updating")
+ continue
+
+ # Consider the existing classification as an intermediate classification
+ classification.terminal = False
+ classification.updated_at = timestamp
+
+ # Recalculate the top taxon and score
+ new_classification = Classification(
+ taxon=top_taxon,
+ algorithm=classification.algorithm,
+ score=max(scores),
+ scores=scores,
+ logits=logits,
+ detection=classification.detection,
+ timestamp=classification.timestamp,
+ terminal=True,
+ category_map=None, # @TODO need a new category map with the filtered taxa
+ created_at=timestamp,
+ updated_at=timestamp,
+ )
+ if new_classification.taxon is None:
+ raise (ValueError("Classification isn't registered yet. Aborting")) # @TODO remove or fail gracefully
+
+ classifications_to_update.append(classification)
+ classifications_to_add.append(new_classification)
+
+ assert new_classification.detection is not None
+ assert new_classification.detection.occurrence is not None
+ occurrences_to_update.add(new_classification.detection.occurrence)
+
+ logging.info(
+ f"Adding new classification for Taxon {top_taxon} to occurrence {new_classification.detection.occurrence}"
+ )
+
+ # Bulk update the existing classifications
+ if classifications_to_update:
+ logger.info(f"Bulk updating {len(classifications_to_update)} existing classifications")
+ Classification.objects.bulk_update(classifications_to_update, ["terminal", "updated_at"])
+ logger.info(f"Updated {len(classifications_to_update)} existing classifications")
+
+ if classifications_to_add:
+ # Bulk create the new classifications
+ logger.info(f"Bulk creating {len(classifications_to_add)} new classifications")
+ Classification.objects.bulk_create(classifications_to_add)
+ logger.info(f"Added {len(classifications_to_add)} new classifications")
+
+ # Update the occurrence determinations
+ logger.info(f"Updating the determinations for {len(occurrences_to_update)} occurrences")
+ for occurrence in occurrences_to_update:
+ occurrence.save(update_determination=True)
+ logger.info(f"Updated determinations for {len(occurrences_to_update)} occurrences")
+
+
+@register_postprocessing_task
+class ClassMaskingTask(BasePostProcessingTask):
+ key = "class_masking"
+ name = "Class masking"
+
+ def run(self) -> None:
+ """Apply class masking on a source image collection using a taxa list."""
+ job = self.job
+ self.logger.info(f"=== Starting {self.name} ===")
+
+ collection_id = self.config.get("collection_id")
+ taxa_list_id = self.config.get("taxa_list_id")
+ algorithm_id = self.config.get("algorithm_id")
+
+ # Validate config parameters
+ if not all([collection_id, taxa_list_id, algorithm_id]):
+ self.logger.error("Missing required configuration: collection_id, taxa_list_id, algorithm_id")
+ return
+
+ try:
+ collection = SourceImageCollection.objects.get(pk=collection_id)
+ taxa_list = TaxaList.objects.get(pk=taxa_list_id)
+ algorithm = Algorithm.objects.get(pk=algorithm_id)
+ except Exception as e:
+ self.logger.exception(f"Failed to load objects: {e}")
+ return
+
+ self.logger.info(f"Applying class masking on collection {collection_id} using taxa list {taxa_list_id}")
+
+ update_occurrences_in_collection(
+ collection=collection,
+ taxa_list=taxa_list,
+ algorithm=algorithm,
+ params=self.config,
+ task_logger=self.logger,
+ job=job,
+ )
+
+ self.logger.info("Class masking completed successfully.")
+ self.logger.info(f"=== Completed {self.name} ===")
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
index 33f9901b9..a8f9b9010 100644
--- a/ami/ml/post_processing/small_size_filter.py
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -31,7 +31,7 @@ def run(self) -> None:
try:
collection = SourceImageCollection.objects.get(pk=collection_id)
- self.logger.info(f"Loaded SourceImageCollection {collection_id} " f"(Project={collection.project})")
+ self.logger.info(f"Loaded SourceImageCollection {collection_id} (Project={collection.project})")
except SourceImageCollection.DoesNotExist:
msg = f"SourceImageCollection {collection_id} not found"
self.logger.error(msg)
@@ -85,6 +85,11 @@ def run(self) -> None:
comment=f"Auto-set by {self.name} post-processing task",
)
modified += 1
- self.logger.debug(f"Detection {det.pk}: marked as 'Not identifiable'")
+ self.logger.info(f"Detection {det.pk}: marked as 'Not identifiable'")
+
+ # Update progress every 10 detections
+ if i % 10 == 0 or i == total:
+ progress = i / total if total > 0 else 1.0
+ self.update_progress(progress)
self.logger.info(f"=== Completed {self.name}: {modified}/{total} detections modified ===")
From 951960058b48cc12794389a57a018aa12e92fbe1 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 14:55:24 -0400
Subject: [PATCH 29/44] chore: remove class masking trigger (moved to
feat/postprocessing-class-masking branch)
---
ami/main/admin.py | 27 ---------------------------
1 file changed, 27 deletions(-)
diff --git a/ami/main/admin.py b/ami/main/admin.py
index 4519ae6db..5cfa03b79 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -641,32 +641,6 @@ def run_small_size_filter(self, request: HttpRequest, queryset: QuerySet[SourceI
self.message_user(request, f"Queued Small Size Filter for {queryset.count()} collection(s). Jobs: {jobs}")
- @admin.action(description="Run Class Masking post-processing task (async)")
- def run_class_masking(self, request: HttpRequest, queryset: QuerySet[SourceImageCollection]) -> None:
- jobs = []
-
- DEFAULT_TAXA_LIST_ID = 5
- DEFAULT_ALGORITHM_ID = 11
-
- for collection in queryset:
- job = Job.objects.create(
- name=f"Post-processing: ClassMasking on Collection {collection.pk}",
- project=collection.project,
- job_type_key="post_processing",
- params={
- "task": "class_masking",
- "config": {
- "collection_id": collection.pk,
- "taxa_list_id": DEFAULT_TAXA_LIST_ID,
- "algorithm_id": DEFAULT_ALGORITHM_ID,
- },
- },
- )
- job.enqueue()
- jobs.append(job.pk)
-
- self.message_user(request, f"Queued Class Masking for {queryset.count()} collection(s). Jobs: {jobs}")
-
@admin.action(description="Run Rank Rollup post-processing task (async)")
def run_rank_rollup(self, request: HttpRequest, queryset: QuerySet[SourceImageCollection]) -> None:
"""Trigger the Rank Rollup post-processing job asynchronously."""
@@ -692,7 +666,6 @@ def run_rank_rollup(self, request: HttpRequest, queryset: QuerySet[SourceImageCo
populate_collection,
populate_collection_async,
run_small_size_filter,
- run_class_masking,
run_rank_rollup,
]
From 21e664841a2f3e4aff069d02d668241911cc263a Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 15:40:19 -0400
Subject: [PATCH 30/44] feat: improved progress tracking
---
ami/ml/post_processing/rank_rollup.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index 9f1e3b8f8..70a821e80 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -147,8 +147,8 @@ def run(self) -> None:
else:
self.logger.info(f"No rollup applied for classification #{clf.pk} (taxon={clf.taxon})")
- # 🔹 Periodic progress updates
- if i % 50 == 0 or i == total:
+ # Update progress every 10 iterations
+ if i % 10 == 0 or i == total:
progress = i / total if total > 0 else 1.0
self.update_progress(progress)
From 6632c314c98f30020572e6652c3f8ae27aa171fa Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Tue, 14 Oct 2025 21:34:28 -0400
Subject: [PATCH 31/44] feat: add applied_to field to Classification to track
source classification
---
.../0078_classification_applied_to.py | 25 +++++++++++++++++++
ami/main/models.py | 12 ++++++++-
2 files changed, 36 insertions(+), 1 deletion(-)
create mode 100644 ami/main/migrations/0078_classification_applied_to.py
diff --git a/ami/main/migrations/0078_classification_applied_to.py b/ami/main/migrations/0078_classification_applied_to.py
new file mode 100644
index 000000000..b938f42bb
--- /dev/null
+++ b/ami/main/migrations/0078_classification_applied_to.py
@@ -0,0 +1,25 @@
+# Generated by Django 4.2.10 on 2025-10-14 21:32
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("main", "0077_merge_20251014_1426"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="classification",
+ name="applied_to",
+ field=models.ForeignKey(
+ blank=True,
+ help_text="If this classification was produced by a post-processing algorithm, this field references the original classification it was applied to.",
+ null=True,
+ on_delete=django.db.models.deletion.SET_NULL,
+ related_name="derived_classifications",
+ to="main.classification",
+ ),
+ ),
+ ]
diff --git a/ami/main/models.py b/ami/main/models.py
index 9556018f4..1a9028cb7 100644
--- a/ami/main/models.py
+++ b/ami/main/models.py
@@ -2226,7 +2226,17 @@ class Classification(BaseModel):
related_name="classifications",
)
# job = models.CharField(max_length=255, null=True)
-
+ applied_to = models.ForeignKey(
+ "self",
+ on_delete=models.SET_NULL,
+ null=True,
+ blank=True,
+ related_name="derived_classifications",
+ help_text=(
+ "If this classification was produced by a post-processing algorithm, "
+ "this field references the original classification it was applied to."
+ ),
+ )
objects = ClassificationManager()
# Type hints for auto-generated fields
From 23f80fb5311fc24d416e0e50f231de8cbd72d22b Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 15 Oct 2025 00:13:11 -0400
Subject: [PATCH 32/44] tests: added tests for small size filter and rank roll
up post-processing tasks
---
ami/ml/tests.py | 209 +++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 206 insertions(+), 3 deletions(-)
diff --git a/ami/ml/tests.py b/ami/ml/tests.py
index d14c04dfb..43e90fac0 100644
--- a/ami/ml/tests.py
+++ b/ami/ml/tests.py
@@ -1,13 +1,28 @@
import datetime
+import pathlib
import unittest
+import uuid
from django.test import TestCase
from rest_framework.test import APIRequestFactory, APITestCase
from ami.base.serializers import reverse_with_params
-from ami.main.models import Detection, Project, SourceImage, SourceImageCollection
-from ami.ml.models import Algorithm, Pipeline, ProcessingService
+from ami.main.models import (
+ Classification,
+ Detection,
+ Occurrence,
+ Project,
+ SourceImage,
+ SourceImageCollection,
+ Taxon,
+ TaxonRank,
+ group_images_into_events,
+)
+from ami.ml.models import Algorithm, AlgorithmCategoryMap, Pipeline, ProcessingService
+from ami.ml.models.algorithm import AlgorithmTaskType
from ami.ml.models.pipeline import collect_images, get_or_create_algorithm_and_category_map, save_results
+from ami.ml.post_processing.rank_rollup import RankRollupTask
+from ami.ml.post_processing.small_size_filter import SmallSizeFilterTask
from ami.ml.schemas import (
AlgorithmConfigResponse,
AlgorithmReference,
@@ -17,7 +32,12 @@
PipelineResultsResponse,
SourceImageResponse,
)
-from ami.tests.fixtures.main import create_captures_from_files, create_processing_service, setup_test_project
+from ami.tests.fixtures.main import (
+ create_captures_from_files,
+ create_processing_service,
+ create_taxa,
+ setup_test_project,
+)
from ami.tests.fixtures.ml import ALGORITHM_CHOICES
from ami.users.models import User
@@ -713,3 +733,186 @@ def test_labels_data_conversion_methods(self):
# Verify conversions are correct
self.assertEqual(test_data, converted_data)
self.assertEqual(test_labels, converted_labels)
+
+
+class TestPostProcessingTasks(TestCase):
+ def setUp(self):
+ # Create test project, deployment, and default setup
+ self.project, self.deployment = setup_test_project()
+ create_taxa(project=self.project)
+ self._create_images_with_dimensions(deployment=self.deployment)
+ group_images_into_events(deployment=self.deployment)
+
+ # Create a simple SourceImageCollection for testing
+ self.collection = SourceImageCollection.objects.create(
+ name="Test PostProcessing Collection",
+ project=self.project,
+ method="manual",
+ kwargs={"image_ids": list(self.deployment.captures.values_list("pk", flat=True))},
+ )
+ self.collection.populate_sample()
+
+ # Select example taxa
+ self.species_taxon = Taxon.objects.filter(rank=TaxonRank.SPECIES.name).first()
+ self.genus_taxon = self.species_taxon.parent if self.species_taxon else None
+ self.assertIsNotNone(self.species_taxon)
+ self.assertIsNotNone(self.genus_taxon)
+ self.algorithm = self._create_category_map_with_algorithm()
+
+ def _create_images_with_dimensions(
+ self,
+ deployment,
+ num_images: int = 5,
+ width: int = 640,
+ height: int = 480,
+ update_deployment: bool = True,
+ ):
+ """
+ Create SourceImages for a deployment with specified width and height.
+ """
+
+ created = []
+ base_time = datetime.datetime.now(datetime.timezone.utc)
+
+ for i in range(num_images):
+ random_prefix = uuid.uuid4().hex[:8]
+ path = pathlib.Path("test") / f"{random_prefix}_{i}.jpg"
+
+ image = SourceImage.objects.create(
+ deployment=deployment,
+ project=deployment.project,
+ timestamp=base_time + datetime.timedelta(minutes=i * 5),
+ path=path,
+ width=width,
+ height=height,
+ )
+ created.append(image)
+
+ if update_deployment:
+ deployment.save(update_calculated_fields=True, regroup_async=False)
+
+ def test_small_size_filter_assigns_not_identifiable(self):
+ """
+ Test that SmallSizeFilterTask correctly assigns 'Not identifiable'
+ to detections below the configured minimum size.
+ """
+ # Create small detections on the collection images
+ for image in self.collection.images.all():
+ Detection.objects.create(
+ source_image=image,
+ bbox=[0, 0, 10, 10], # small detection
+ created_at=datetime.datetime.now(datetime.timezone.utc),
+ ).associate_new_occurrence()
+
+ # Prepare the task configuration
+ task = SmallSizeFilterTask(
+ source_image_collection_id=self.collection.pk,
+ size_threshold=0.00001,
+ )
+
+ task.run()
+
+ # Verify that all small detections are now classified as "Not identifiable"
+ not_identifiable_taxon = Taxon.objects.get(name="Not identifiable")
+ detections = Detection.objects.filter(source_image__in=self.collection.images.all())
+
+ for det in detections:
+ latest_classification = Classification.objects.filter(detection=det).order_by("-created_at").first()
+ self.assertIsNotNone(latest_classification, "Each detection should have a classification.")
+ self.assertEqual(
+ latest_classification.taxon,
+ not_identifiable_taxon,
+ f"Detection {det.pk} should be classified as 'Not identifiable'",
+ )
+
+ def _create_occurrences_with_classifications(self, num=3):
+ """Helper to create occurrences and terminal classifications below species threshold."""
+ occurrences = []
+ now = datetime.datetime.now(datetime.timezone.utc)
+ for i in range(num):
+ det = Detection.objects.create(
+ source_image=self.collection.images.first(),
+ bbox=[0, 0, 200, 200],
+ )
+ occ = Occurrence.objects.create(project=self.project, event=self.deployment.events.first())
+ occ.detections.add(det)
+ classification = Classification.objects.create(
+ detection=det,
+ taxon=self.species_taxon,
+ score=0.5,
+ scores=[0.5, 0.3, 0.2],
+ terminal=True,
+ timestamp=now,
+ algorithm=self.algorithm,
+ )
+ occurrences.append((occ, classification))
+ return occurrences
+
+ def _create_category_map_with_algorithm(self):
+ """Create a simple AlgorithmCategoryMap and Algorithm to attach to classifications."""
+ species_taxa = list(self.project.taxa.filter(rank=TaxonRank.SPECIES.name)[:3])
+ assert species_taxa, "No species taxa found in project; run create_taxa() first."
+
+ data = [
+ {
+ "index": i,
+ "label": taxon.name,
+ "taxon_rank": taxon.rank,
+ "gbif_key": getattr(taxon, "gbif_key", None),
+ }
+ for i, taxon in enumerate(species_taxa)
+ ]
+ labels = [item["label"] for item in data]
+
+ category_map = AlgorithmCategoryMap.objects.create(
+ data=data,
+ labels=labels,
+ version="v1.0",
+ description="Species-level category map for testing RankRollupTask",
+ )
+
+ algorithm = Algorithm.objects.create(
+ name="Test Species Classifier",
+ task_type=AlgorithmTaskType.CLASSIFICATION.value,
+ category_map=category_map,
+ )
+
+ return algorithm
+
+ def test_rank_rollup_creates_new_terminal_classifications(self):
+ occurrences = self._create_occurrences_with_classifications(num=3)
+
+ task = RankRollupTask(
+ source_image_collection_id=self.collection.pk,
+ thresholds={"species": 0.8, "genus": 0.6, "family": 0.4},
+ )
+ task.run()
+
+ # Validate results
+ for occ, original_cls in occurrences:
+ detection = occ.detections.first()
+ original_cls.refresh_from_db(fields=["terminal"])
+ rolled_up_cls = Classification.objects.filter(detection=detection, terminal=True).first()
+
+ self.assertIsNotNone(
+ rolled_up_cls,
+ f"Expected a new rolled-up classification for original #{original_cls.pk}",
+ )
+ self.assertTrue(
+ rolled_up_cls.terminal,
+ "New rolled-up classification should be marked as terminal.",
+ )
+ self.assertFalse(
+ original_cls.terminal,
+ "Original classification should be marked as non-terminal after roll-up.",
+ )
+ self.assertEqual(
+ rolled_up_cls.taxon,
+ self.genus_taxon,
+ "Rolled-up classification should have genus-level taxon.",
+ )
+ self.assertEqual(
+ rolled_up_cls.applied_to,
+ original_cls,
+ "Rolled-up classification should reference the original classification.",
+ )
From 336636a01de2412bac041d05ace826756f7cb572 Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 15 Oct 2025 00:17:11 -0400
Subject: [PATCH 33/44] fix: create only terminal classifications and remove
identification creation
---
ami/ml/post_processing/rank_rollup.py | 40 ++++++++++-----------
ami/ml/post_processing/small_size_filter.py | 14 ++------
2 files changed, 22 insertions(+), 32 deletions(-)
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index 70a821e80..d23d718b1 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -4,7 +4,7 @@
from django.db import transaction
from django.utils import timezone
-from ami.main.models import Classification, Identification, Taxon
+from ami.main.models import Classification, Taxon
from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
logger = logging.getLogger(__name__)
@@ -35,8 +35,8 @@ class RankRollupTask(BasePostProcessingTask):
key = "rank_rollup"
name = "Rank rollup"
- DEFAULT_THRESHOLDS = {"species": 0.8, "genus": 0.6, "family": 0.4}
- ROLLUP_ORDER = ["species", "genus", "family"]
+ DEFAULT_THRESHOLDS = {"SPECIES": 0.8, "GENUS": 0.6, "FAMILY": 0.4}
+ ROLLUP_ORDER = ["SPECIES", "GENUS", "FAMILY"]
def run(self) -> None:
job = self.job
@@ -49,7 +49,7 @@ def run(self) -> None:
rollup_order = config.get("rollup_order", self.ROLLUP_ORDER)
if not collection_id:
- self.logger.warning("No 'source_image_collection_id' provided in config. Aborting task.")
+ self.logger.info("No 'source_image_collection_id' provided in config. Aborting task.")
return
self.logger.info(
@@ -72,10 +72,10 @@ def run(self) -> None:
self.logger.info(f"Processing classification #{clf.pk} (taxon={clf.taxon}, score={clf.score:.3f})")
if not clf.scores:
- self.logger.warning(f"Skipping classification #{clf.pk}: no scores available")
+ self.logger.info(f"Skipping classification #{clf.pk}: no scores available")
continue
if not clf.category_map:
- self.logger.warning(f"Skipping classification #{clf.pk}: no category_map assigned")
+ self.logger.info(f"Skipping classification #{clf.pk}: no category_map assigned")
continue
taxon_scores = defaultdict(float)
@@ -87,7 +87,7 @@ def run(self) -> None:
taxon = Taxon.objects.filter(name=label).first()
if not taxon:
- self.logger.debug(f"Skipping label '{label}' (no matching Taxon in DB)")
+ self.logger.info(f"Skipping label '{label}' (no matching Taxon Found)")
continue
for rank in rollup_order:
@@ -98,16 +98,20 @@ def run(self) -> None:
new_taxon = None
new_score = None
+ self.logger.info(f"Aggregated taxon scores: { {t.name: s for t, s in taxon_scores.items()} }")
for rank in rollup_order:
threshold = thresholds.get(rank, 1.0)
+ # import pdb
+
+ # pdb.set_trace()
candidates = {t: s for t, s in taxon_scores.items() if t.rank == rank}
if not candidates:
- self.logger.debug(f"No candidates found at rank {rank}")
+ self.logger.info(f"No candidates found at rank {rank}")
continue
best_taxon, best_score = max(candidates.items(), key=lambda kv: kv[1])
- self.logger.debug(
+ self.logger.info(
f"Best at rank {rank}: {best_taxon.name} ({best_score:.3f}) [threshold={threshold}]"
)
@@ -117,10 +121,11 @@ def run(self) -> None:
break
if new_taxon and new_taxon != clf.taxon:
- self.logger.info(f"Rolling up {clf.taxon} → {new_taxon} ({new_taxon.rank})")
+ self.logger.info(f"Rolling up {clf.taxon} => {new_taxon} ({new_taxon.rank})")
with transaction.atomic():
- Classification.objects.filter(detection=clf.detection, terminal=True).update(terminal=False)
+ # Mark all classifications for this detection as non-terminal
+ Classification.objects.filter(detection=clf.detection).update(terminal=False)
Classification.objects.create(
detection=clf.detection,
taxon=new_taxon,
@@ -128,20 +133,13 @@ def run(self) -> None:
terminal=True,
algorithm=self.algorithm,
timestamp=timezone.now(),
+ applied_to=clf,
)
occurrence = clf.detection.occurrence
- if occurrence:
- Identification.objects.create(
- occurrence=occurrence,
- taxon=new_taxon,
- user=None,
- comment=f"Auto-set by {self.name} post-processing task",
- )
- updated_occurrences.append(occurrence.pk)
-
+ updated_occurrences.append(occurrence)
self.logger.info(
- f"Rolled up occurrence {occurrence.pk}: {clf.taxon} → {new_taxon} "
+ f"Rolled up occurrence {occurrence.pk}: {clf.taxon} => {new_taxon} "
f"({new_taxon.rank}) with rolled-up score={new_score:.3f}"
)
else:
diff --git a/ami/ml/post_processing/small_size_filter.py b/ami/ml/post_processing/small_size_filter.py
index a8f9b9010..a21dbf649 100644
--- a/ami/ml/post_processing/small_size_filter.py
+++ b/ami/ml/post_processing/small_size_filter.py
@@ -1,12 +1,12 @@
from django.db import transaction
from django.utils import timezone
-from ami.main.models import Detection, Identification, SourceImageCollection, Taxon, TaxonRank
+from ami.main.models import Detection, SourceImageCollection, Taxon, TaxonRank
from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
@register_postprocessing_task
-class SmallSizeFilter(BasePostProcessingTask):
+class SmallSizeFilterTask(BasePostProcessingTask):
key = "small_size_filter"
name = "Small size filter"
@@ -58,7 +58,7 @@ def run(self) -> None:
img_area = img_w * img_h
rel_area = det_area / img_area if img_area else 0
- self.logger.debug(
+ self.logger.info(
f"Detection {det.pk}: area={det_area}, rel_area={rel_area:.4f}, " f"threshold={threshold:.4f}"
)
@@ -76,14 +76,6 @@ def run(self) -> None:
timestamp=timezone.now(),
algorithm=self.algorithm,
)
- # Also create/update Identification for the linked occurrence
- if det.occurrence:
- Identification.objects.create(
- occurrence=det.occurrence,
- taxon=not_identifiable_taxon,
- user=None, # auto-generated by post-processing
- comment=f"Auto-set by {self.name} post-processing task",
- )
modified += 1
self.logger.info(f"Detection {det.pk}: marked as 'Not identifiable'")
From 0d90cdef08d88887a49a2d65481d364a444195bb Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 15 Oct 2025 00:23:52 -0400
Subject: [PATCH 34/44] refactor: remove inner transaction.atomic for cleaner
transaction management
---
ami/ml/post_processing/rank_rollup.py | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index d23d718b1..da5177b4e 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -123,18 +123,17 @@ def run(self) -> None:
if new_taxon and new_taxon != clf.taxon:
self.logger.info(f"Rolling up {clf.taxon} => {new_taxon} ({new_taxon.rank})")
- with transaction.atomic():
- # Mark all classifications for this detection as non-terminal
- Classification.objects.filter(detection=clf.detection).update(terminal=False)
- Classification.objects.create(
- detection=clf.detection,
- taxon=new_taxon,
- score=new_score,
- terminal=True,
- algorithm=self.algorithm,
- timestamp=timezone.now(),
- applied_to=clf,
- )
+ # Mark all classifications for this detection as non-terminal
+ Classification.objects.filter(detection=clf.detection).update(terminal=False)
+ Classification.objects.create(
+ detection=clf.detection,
+ taxon=new_taxon,
+ score=new_score,
+ terminal=True,
+ algorithm=self.algorithm,
+ timestamp=timezone.now(),
+ applied_to=clf,
+ )
occurrence = clf.detection.occurrence
updated_occurrences.append(occurrence)
From 23469e261b50fff8e76fdf6edff5a8c9a709643f Mon Sep 17 00:00:00 2001
From: mohamedelabbas1996
Date: Wed, 15 Oct 2025 00:27:28 -0400
Subject: [PATCH 35/44] tests: fixed small size filter test
---
ami/ml/tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ami/ml/tests.py b/ami/ml/tests.py
index 43e90fac0..6340f7df8 100644
--- a/ami/ml/tests.py
+++ b/ami/ml/tests.py
@@ -807,7 +807,7 @@ def test_small_size_filter_assigns_not_identifiable(self):
# Prepare the task configuration
task = SmallSizeFilterTask(
source_image_collection_id=self.collection.pk,
- size_threshold=0.00001,
+ size_threshold=0.01,
)
task.run()
From 1b8700ee2d44a3ddab8b5886d1573cc96410d951 Mon Sep 17 00:00:00 2001
From: Michael Bunsen
Date: Thu, 16 Oct 2025 01:36:53 -0700
Subject: [PATCH 36/44] draft: work towards class masking in new framework
---
ami/main/admin.py | 75 ++++++++++++++++++++++++-
ami/ml/post_processing/class_masking.py | 26 +++++++--
ami/ml/post_processing/registry.py | 2 +
3 files changed, 98 insertions(+), 5 deletions(-)
diff --git a/ami/main/admin.py b/ami/main/admin.py
index ab4b7a698..301293957 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -5,13 +5,17 @@
from django.db.models.query import QuerySet
from django.http.request import HttpRequest
from django.template.defaultfilters import filesizeformat
+from django.urls import reverse
from django.utils.formats import number_format
+from django.utils.html import format_html
from guardian.admin import GuardedModelAdmin
import ami.utils
from ami import tasks
from ami.jobs.models import Job
+from ami.ml.models.algorithm import Algorithm
from ami.ml.models.project_pipeline_config import ProjectPipelineConfig
+from ami.ml.post_processing.class_masking import update_single_occurrence
from ami.ml.tasks import remove_duplicate_classifications
from .models import (
@@ -289,6 +293,7 @@ class ClassificationInline(admin.TabularInline):
model = Classification
extra = 0
fields = (
+ "classification_link",
"taxon",
"algorithm",
"timestamp",
@@ -296,6 +301,7 @@ class ClassificationInline(admin.TabularInline):
"created_at",
)
readonly_fields = (
+ "classification_link",
"taxon",
"algorithm",
"timestamp",
@@ -303,6 +309,13 @@ class ClassificationInline(admin.TabularInline):
"created_at",
)
+ @admin.display(description="Classification")
+ def classification_link(self, obj: Classification) -> str:
+ if obj.pk:
+ url = reverse("admin:main_classification_change", args=[obj.pk])
+ return format_html('{}', url, f"Classification #{obj.pk}")
+ return "-"
+
def get_queryset(self, request: HttpRequest) -> QuerySet[Any]:
qs = super().get_queryset(request)
return qs.select_related("taxon", "algorithm", "detection")
@@ -312,6 +325,7 @@ class DetectionInline(admin.TabularInline):
model = Detection
extra = 0
fields = (
+ "detection_link",
"detection_algorithm",
"source_image",
"timestamp",
@@ -319,6 +333,7 @@ class DetectionInline(admin.TabularInline):
"occurrence",
)
readonly_fields = (
+ "detection_link",
"detection_algorithm",
"source_image",
"timestamp",
@@ -326,6 +341,13 @@ class DetectionInline(admin.TabularInline):
"occurrence",
)
+ @admin.display(description="ID")
+ def detection_link(self, obj):
+ if obj.pk:
+ url = reverse("admin:main_detection_change", args=[obj.pk])
+ return format_html('{}', url, obj.pk)
+ return "-"
+
@admin.register(Detection)
class DetectionAdmin(admin.ModelAdmin[Detection]):
@@ -383,7 +405,7 @@ class OccurrenceAdmin(admin.ModelAdmin[Occurrence]):
"determination__rank",
"created_at",
)
- search_fields = ("determination__name", "determination__search_names")
+ search_fields = ("id", "determination__name", "determination__search_names")
def get_queryset(self, request: HttpRequest) -> QuerySet[Any]:
qs = super().get_queryset(request)
@@ -405,11 +427,60 @@ def get_queryset(self, request: HttpRequest) -> QuerySet[Any]:
def detections_count(self, obj) -> int:
return obj.detections_count
+ @admin.action(description="Update occurrence with Newfoundland species taxa list")
+ def update_with_newfoundland_species(self, request: HttpRequest, queryset: QuerySet[Occurrence]) -> None:
+ """
+ Update selected occurrences using the 'Newfoundland species' taxa list
+ and 'Quebec & Vermont Species Classifier - Apr 2024' algorithm.
+ """
+ try:
+ # Get the taxa list by name
+ taxa_list = TaxaList.objects.get(name="Newfoundland Species")
+ except TaxaList.DoesNotExist:
+ self.message_user(
+ request,
+ "Error: TaxaList 'Newfoundland species' not found.",
+ level="error",
+ )
+ return
+
+ try:
+ # Get the algorithm by name
+ algorithm = Algorithm.objects.get(name="Quebec & Vermont Species Classifier - Apr 2024")
+ except Algorithm.DoesNotExist:
+ self.message_user(
+ request,
+ "Error: Algorithm 'Quebec & Vermont Species Classifier - Apr 2024' not found.",
+ level="error",
+ )
+ return
+
+ # Process each occurrence
+ count = 0
+ for occurrence in queryset:
+ try:
+ update_single_occurrence(
+ occurrence=occurrence,
+ algorithm=algorithm,
+ taxa_list=taxa_list,
+ )
+ count += 1
+ except Exception as e:
+ self.message_user(
+ request,
+ f"Error processing occurrence {occurrence.pk}: {str(e)}",
+ level="error",
+ )
+
+ self.message_user(request, f"Successfully updated {count} occurrence(s).")
+
ordering = ("-created_at",)
# Add classifications as inline
inlines = [DetectionInline]
+ actions = [update_with_newfoundland_species]
+
@admin.register(Classification)
class ClassificationAdmin(admin.ModelAdmin[Classification]):
@@ -433,6 +504,8 @@ class ClassificationAdmin(admin.ModelAdmin[Classification]):
"taxon__rank",
)
+ autocomplete_fields = ("taxon",)
+
def get_queryset(self, request: HttpRequest) -> QuerySet[Any]:
qs = super().get_queryset(request)
return qs.select_related(
diff --git a/ami/ml/post_processing/class_masking.py b/ami/ml/post_processing/class_masking.py
index 04c621830..298688ba7 100644
--- a/ami/ml/post_processing/class_masking.py
+++ b/ami/ml/post_processing/class_masking.py
@@ -4,8 +4,8 @@
from django.utils import timezone
from ami.main.models import Classification, Occurrence, SourceImageCollection, TaxaList
-from ami.ml.models import Algorithm, AlgorithmCategoryMap
-from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
+from ami.ml.models.algorithm import Algorithm, AlgorithmCategoryMap, AlgorithmTaskType
+from ami.ml.post_processing.base import BasePostProcessingTask
logger = logging.getLogger(__name__)
@@ -26,10 +26,22 @@ def update_single_occurrence(
scores__isnull=False,
).distinct()
+ # Make a new Algorithm for the filtered classifications
+ new_algorithm, _ = Algorithm.objects.get_or_create(
+ name=f"{algorithm.name} (filtered by taxa list {taxa_list.name})",
+ key=f"{algorithm.key}_filtered_by_taxa_list_{taxa_list.pk}",
+ defaults={
+ "description": f"Classification algorithm {algorithm.name} filtered by taxa list {taxa_list.name}",
+ "task_type": AlgorithmTaskType.CLASSIFICATION.value,
+ "category_map": algorithm.category_map,
+ },
+ )
+
make_classifications_filtered_by_taxa_list(
classifications=classifications,
taxa_list=taxa_list,
algorithm=algorithm,
+ new_algorithm=new_algorithm,
)
@@ -38,6 +50,7 @@ def update_occurrences_in_collection(
taxa_list: TaxaList,
algorithm: Algorithm,
params: dict,
+ new_algorithm: Algorithm,
task_logger: logging.Logger = logger,
job=None,
):
@@ -58,6 +71,7 @@ def update_occurrences_in_collection(
classifications=classifications,
taxa_list=taxa_list,
algorithm=algorithm,
+ new_algorithm=new_algorithm,
)
@@ -65,6 +79,7 @@ def make_classifications_filtered_by_taxa_list(
classifications: QuerySet[Classification],
taxa_list: TaxaList,
algorithm: Algorithm,
+ new_algorithm: Algorithm,
):
taxa_in_list = taxa_list.taxa.all()
@@ -167,7 +182,7 @@ def make_classifications_filtered_by_taxa_list(
# Recalculate the top taxon and score
new_classification = Classification(
taxon=top_taxon,
- algorithm=classification.algorithm,
+ algorithm=new_algorithm,
score=max(scores),
scores=scores,
logits=logits,
@@ -211,7 +226,6 @@ def make_classifications_filtered_by_taxa_list(
logger.info(f"Updated determinations for {len(occurrences_to_update)} occurrences")
-@register_postprocessing_task
class ClassMaskingTask(BasePostProcessingTask):
key = "class_masking"
name = "Class masking"
@@ -240,6 +254,9 @@ def run(self) -> None:
self.logger.info(f"Applying class masking on collection {collection_id} using taxa list {taxa_list_id}")
+ # @TODO temporary, do we need a new algorithm for each class mask?
+ self.algorithm.category_map = algorithm.category_map # Ensure the algorithm has its category map loaded
+
update_occurrences_in_collection(
collection=collection,
taxa_list=taxa_list,
@@ -247,6 +264,7 @@ def run(self) -> None:
params=self.config,
task_logger=self.logger,
job=job,
+ new_algorithm=self.algorithm,
)
self.logger.info("Class masking completed successfully.")
diff --git a/ami/ml/post_processing/registry.py b/ami/ml/post_processing/registry.py
index c85f607f9..308be18ae 100644
--- a/ami/ml/post_processing/registry.py
+++ b/ami/ml/post_processing/registry.py
@@ -1,8 +1,10 @@
# Registry of available post-processing tasks
+from ami.ml.post_processing.class_masking import ClassMaskingTask
from ami.ml.post_processing.small_size_filter import SmallSizeFilterTask
POSTPROCESSING_TASKS = {
SmallSizeFilterTask.key: SmallSizeFilterTask,
+ ClassMaskingTask.key: ClassMaskingTask,
}
From a466a524230301318a93eaed376b08183a3be603 Mon Sep 17 00:00:00 2001
From: Michael Bunsen
Date: Wed, 18 Feb 2026 03:44:51 -0800
Subject: [PATCH 37/44] feat: add class masking tests, management command, and
fix registry
- Add 4 class masking tests: score redistribution, accuracy improvement,
no-op when all species in list, and softmax mathematical correctness
- Add `run_class_masking` management command with --collection-id,
--taxa-list-id, --algorithm-id, and --dry-run options
- Register RankRollupTask in post-processing registry
- Remove non-existent `register_postprocessing_task` import/decorator
from rank_rollup.py
Co-Authored-By: Claude
---
.../management/commands/run_class_masking.py | 83 +++++
ami/ml/post_processing/rank_rollup.py | 6 +-
ami/ml/post_processing/registry.py | 2 +
ami/ml/tests.py | 317 ++++++++++++++++++
4 files changed, 405 insertions(+), 3 deletions(-)
create mode 100644 ami/ml/management/commands/run_class_masking.py
diff --git a/ami/ml/management/commands/run_class_masking.py b/ami/ml/management/commands/run_class_masking.py
new file mode 100644
index 000000000..d87375d74
--- /dev/null
+++ b/ami/ml/management/commands/run_class_masking.py
@@ -0,0 +1,83 @@
+from django.core.management.base import BaseCommand, CommandError
+
+from ami.main.models import SourceImageCollection, TaxaList
+from ami.ml.models.algorithm import Algorithm
+from ami.ml.post_processing.class_masking import ClassMaskingTask
+
+
+class Command(BaseCommand):
+ help = (
+ "Run class masking post-processing on a source image collection. "
+ "Masks classifier logits for species not in the given taxa list and recalculates softmax scores."
+ )
+
+ def add_arguments(self, parser):
+ parser.add_argument("--collection-id", type=int, required=True, help="SourceImageCollection ID to process")
+ parser.add_argument("--taxa-list-id", type=int, required=True, help="TaxaList ID to use as the species mask")
+ parser.add_argument(
+ "--algorithm-id", type=int, required=True, help="Algorithm ID whose classifications to mask"
+ )
+ parser.add_argument("--dry-run", action="store_true", help="Show what would be done without making changes")
+
+ def handle(self, *args, **options):
+ collection_id = options["collection_id"]
+ taxa_list_id = options["taxa_list_id"]
+ algorithm_id = options["algorithm_id"]
+ dry_run = options["dry_run"]
+
+ # Validate inputs
+ try:
+ collection = SourceImageCollection.objects.get(pk=collection_id)
+ except SourceImageCollection.DoesNotExist:
+ raise CommandError(f"SourceImageCollection {collection_id} does not exist.")
+
+ try:
+ taxa_list = TaxaList.objects.get(pk=taxa_list_id)
+ except TaxaList.DoesNotExist:
+ raise CommandError(f"TaxaList {taxa_list_id} does not exist.")
+
+ try:
+ algorithm = Algorithm.objects.get(pk=algorithm_id)
+ except Algorithm.DoesNotExist:
+ raise CommandError(f"Algorithm {algorithm_id} does not exist.")
+
+ if not algorithm.category_map:
+ raise CommandError(f"Algorithm '{algorithm.name}' does not have a category map.")
+
+ from ami.main.models import Classification
+
+ classification_count = (
+ Classification.objects.filter(
+ detection__source_image__collections=collection,
+ terminal=True,
+ algorithm=algorithm,
+ scores__isnull=False,
+ )
+ .distinct()
+ .count()
+ )
+
+ taxa_count = taxa_list.taxa.count()
+
+ self.stdout.write(
+ f"Collection: {collection.name} (id={collection.pk})\n"
+ f"Taxa list: {taxa_list.name} (id={taxa_list.pk}, {taxa_count} taxa)\n"
+ f"Algorithm: {algorithm.name} (id={algorithm.pk})\n"
+ f"Classifications to process: {classification_count}"
+ )
+
+ if classification_count == 0:
+ raise CommandError("No terminal classifications with scores found for this collection/algorithm.")
+
+ if dry_run:
+ self.stdout.write(self.style.WARNING("Dry run — no changes made."))
+ return
+
+ self.stdout.write("Running class masking...")
+ task = ClassMaskingTask(
+ collection_id=collection_id,
+ taxa_list_id=taxa_list_id,
+ algorithm_id=algorithm_id,
+ )
+ task.run()
+ self.stdout.write(self.style.SUCCESS("Class masking completed."))
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index da5177b4e..9708b1c63 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -5,7 +5,7 @@
from django.utils import timezone
from ami.main.models import Classification, Taxon
-from ami.ml.post_processing.base import BasePostProcessingTask, register_postprocessing_task
+from ami.ml.post_processing.base import BasePostProcessingTask
logger = logging.getLogger(__name__)
@@ -26,7 +26,6 @@ def find_ancestor_by_parent_chain(taxon, target_rank: str):
return None
-@register_postprocessing_task
class RankRollupTask(BasePostProcessingTask):
"""Post-processing task that rolls up low-confidence classifications
to higher ranks using aggregated scores.
@@ -98,7 +97,8 @@ def run(self) -> None:
new_taxon = None
new_score = None
- self.logger.info(f"Aggregated taxon scores: { {t.name: s for t, s in taxon_scores.items()} }")
+ scores_str = {t.name: s for t, s in taxon_scores.items()}
+ self.logger.info(f"Aggregated taxon scores: {scores_str}")
for rank in rollup_order:
threshold = thresholds.get(rank, 1.0)
# import pdb
diff --git a/ami/ml/post_processing/registry.py b/ami/ml/post_processing/registry.py
index 308be18ae..28fa7fb2f 100644
--- a/ami/ml/post_processing/registry.py
+++ b/ami/ml/post_processing/registry.py
@@ -1,10 +1,12 @@
# Registry of available post-processing tasks
from ami.ml.post_processing.class_masking import ClassMaskingTask
+from ami.ml.post_processing.rank_rollup import RankRollupTask
from ami.ml.post_processing.small_size_filter import SmallSizeFilterTask
POSTPROCESSING_TASKS = {
SmallSizeFilterTask.key: SmallSizeFilterTask,
ClassMaskingTask.key: ClassMaskingTask,
+ RankRollupTask.key: RankRollupTask,
}
diff --git a/ami/ml/tests.py b/ami/ml/tests.py
index 99c6c5a39..aecf8859f 100644
--- a/ami/ml/tests.py
+++ b/ami/ml/tests.py
@@ -14,6 +14,7 @@
Project,
SourceImage,
SourceImageCollection,
+ TaxaList,
Taxon,
TaxonRank,
group_images_into_events,
@@ -21,6 +22,7 @@
from ami.ml.models import Algorithm, AlgorithmCategoryMap, Pipeline, ProcessingService
from ami.ml.models.algorithm import AlgorithmTaskType
from ami.ml.models.pipeline import collect_images, get_or_create_algorithm_and_category_map, save_results
+from ami.ml.post_processing.class_masking import make_classifications_filtered_by_taxa_list
from ami.ml.post_processing.rank_rollup import RankRollupTask
from ami.ml.post_processing.small_size_filter import SmallSizeFilterTask
from ami.ml.schemas import (
@@ -1006,6 +1008,321 @@ def test_rank_rollup_creates_new_terminal_classifications(self):
"Rolled-up classification should reference the original classification.",
)
+ def _create_classification_with_logits(self, detection, taxon, score, scores, logits):
+ """Helper to create a classification with explicit scores and logits."""
+ now = datetime.datetime.now(datetime.timezone.utc)
+ return Classification.objects.create(
+ detection=detection,
+ taxon=taxon,
+ score=score,
+ scores=scores,
+ logits=logits,
+ terminal=True,
+ timestamp=now,
+ algorithm=self.algorithm,
+ )
+
+ def test_class_masking_redistributes_scores(self):
+ """
+ Test that class masking correctly recalculates softmax after masking excluded species.
+
+ Setup: 3 species in category map (indices 0, 1, 2).
+ Taxa list contains only species at indices 0 and 1.
+ Original classification has species at index 2 as the top prediction.
+ After masking, the top prediction should shift to species 0 or 1.
+ """
+ import math
+
+ species_taxa = list(self.project.taxa.filter(rank=TaxonRank.SPECIES.name).order_by("name")[:3])
+ self.assertEqual(len(species_taxa), 3)
+
+ # Create a taxa list with only the first 2 species (exclude species_taxa[2])
+ partial_taxa_list = TaxaList.objects.create(name="Partial Species List")
+ partial_taxa_list.taxa.set(species_taxa[:2])
+
+ # Logits where excluded species (index 2) has the highest value
+ logits = [2.0, 1.0, 5.0] # species[2] dominates
+ # Compute original softmax
+ max_logit = max(logits)
+ exp_logits = [math.exp(x - max_logit) for x in logits]
+ total = sum(exp_logits)
+ original_scores = [e / total for e in exp_logits]
+
+ # Original top prediction is species[2] (the excluded one)
+ self.assertEqual(original_scores.index(max(original_scores)), 2)
+
+ det = Detection.objects.create(
+ source_image=self.collection.images.first(),
+ bbox=[0, 0, 200, 200],
+ )
+ occ = Occurrence.objects.create(project=self.project, event=self.deployment.events.first())
+ occ.detections.add(det)
+
+ original_clf = self._create_classification_with_logits(
+ detection=det,
+ taxon=species_taxa[2], # top prediction is the excluded species
+ score=max(original_scores),
+ scores=original_scores,
+ logits=logits,
+ )
+
+ # Create a new algorithm for masked output
+ new_algorithm, _ = Algorithm.objects.get_or_create(
+ name=f"{self.algorithm.name} (filtered by {partial_taxa_list.name})",
+ key=f"{self.algorithm.key}_filtered_{partial_taxa_list.pk}",
+ defaults={
+ "task_type": AlgorithmTaskType.CLASSIFICATION.value,
+ "category_map": self.algorithm.category_map,
+ },
+ )
+
+ classifications = Classification.objects.filter(pk=original_clf.pk)
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=partial_taxa_list,
+ algorithm=self.algorithm,
+ new_algorithm=new_algorithm,
+ )
+
+ # Original classification should be non-terminal
+ original_clf.refresh_from_db()
+ self.assertFalse(original_clf.terminal, "Original classification should be non-terminal after masking.")
+
+ # New terminal classification should exist
+ new_clf = Classification.objects.filter(detection=det, terminal=True).first()
+ self.assertIsNotNone(new_clf, "A new terminal classification should be created.")
+ self.assertEqual(new_clf.algorithm, new_algorithm)
+
+ # New top prediction should be species[0] (highest logit among allowed species)
+ self.assertEqual(
+ new_clf.taxon,
+ species_taxa[0],
+ "Top prediction should be the highest-scoring species remaining in the taxa list.",
+ )
+
+ # Scores should sum to ~1.0 (valid probability distribution)
+ self.assertAlmostEqual(sum(new_clf.scores), 1.0, places=5, msg="Masked scores should sum to 1.0")
+
+ # Excluded species score should be ~0.0
+ self.assertAlmostEqual(
+ new_clf.scores[2],
+ 0.0,
+ places=10,
+ msg="Excluded species score should be effectively zero.",
+ )
+
+ # New top score should be higher than original (probability mass redistributed)
+ self.assertGreater(
+ new_clf.score,
+ original_scores[0],
+ "In-list species score should increase after masking out the dominant excluded species.",
+ )
+
+ def test_class_masking_improves_accuracy(self):
+ """
+ Test the key use case: class masking improves accuracy when the true species is in
+ the taxa list but was originally outscored by an out-of-list species.
+
+ Scenario: True species is "Vanessa cardui" (in list). The classifier's top prediction
+ is an out-of-list species. After masking, "Vanessa cardui" should become the top
+ prediction, and the occurrence determination should update.
+ """
+ species_taxa = list(self.project.taxa.filter(rank=TaxonRank.SPECIES.name).order_by("name")[:3])
+ self.assertEqual(len(species_taxa), 3)
+ # species_taxa sorted by name: [Vanessa atalanta, Vanessa cardui, Vanessa itea]
+
+ true_species = species_taxa[1] # Vanessa cardui — the "ground truth"
+ excluded_species = species_taxa[2] # Vanessa itea — not in the regional list
+
+ # Taxa list: contains atalanta and cardui, but NOT itea
+ regional_list = TaxaList.objects.create(name="Regional Species List")
+ regional_list.taxa.set([species_taxa[0], species_taxa[1]])
+
+ # Logits: itea (index 2) is top, cardui (index 1) is close second, atalanta (index 0) is low
+ logits = [0.5, 3.0, 3.5]
+
+ import math
+
+ max_logit = max(logits)
+ exp_logits = [math.exp(x - max_logit) for x in logits]
+ total = sum(exp_logits)
+ scores = [e / total for e in exp_logits]
+
+ # Original top prediction is the excluded species
+ self.assertEqual(scores.index(max(scores)), 2)
+
+ det = Detection.objects.create(
+ source_image=self.collection.images.first(),
+ bbox=[0, 0, 200, 200],
+ )
+ occ = Occurrence.objects.create(project=self.project, event=self.deployment.events.first())
+ occ.detections.add(det)
+
+ self._create_classification_with_logits(
+ detection=det,
+ taxon=excluded_species,
+ score=max(scores),
+ scores=scores,
+ logits=logits,
+ )
+ # Occurrence determination is currently the excluded species
+ occ.save(update_determination=True)
+ occ.refresh_from_db()
+ self.assertEqual(occ.determination, excluded_species)
+
+ new_algorithm, _ = Algorithm.objects.get_or_create(
+ name=f"{self.algorithm.name} (filtered by {regional_list.name})",
+ key=f"{self.algorithm.key}_filtered_{regional_list.pk}",
+ defaults={
+ "task_type": AlgorithmTaskType.CLASSIFICATION.value,
+ "category_map": self.algorithm.category_map,
+ },
+ )
+
+ classifications = Classification.objects.filter(
+ detection__occurrence=occ,
+ terminal=True,
+ algorithm=self.algorithm,
+ scores__isnull=False,
+ )
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=regional_list,
+ algorithm=self.algorithm,
+ new_algorithm=new_algorithm,
+ )
+
+ # After masking, occurrence determination should be the true species
+ occ.refresh_from_db()
+ self.assertEqual(
+ occ.determination,
+ true_species,
+ "After class masking, occurrence determination should update to the correct in-list species.",
+ )
+
+ # Verify the new classification's taxon
+ new_clf = Classification.objects.filter(detection=det, terminal=True).first()
+ self.assertEqual(new_clf.taxon, true_species)
+ self.assertGreater(new_clf.score, 0.5, "Masked score for true species should be > 0.5")
+
+ def test_class_masking_no_change_when_all_species_in_list(self):
+ """When all category map species are in the taxa list, no new classifications should be created."""
+ species_taxa = list(self.project.taxa.filter(rank=TaxonRank.SPECIES.name).order_by("name")[:3])
+
+ # Taxa list contains ALL species
+ full_list = TaxaList.objects.create(name="Full Species List")
+ full_list.taxa.set(species_taxa)
+
+ logits = [3.0, 1.0, 0.5]
+ import math
+
+ max_logit = max(logits)
+ exp_logits = [math.exp(x - max_logit) for x in logits]
+ total = sum(exp_logits)
+ scores = [e / total for e in exp_logits]
+
+ det = Detection.objects.create(
+ source_image=self.collection.images.first(),
+ bbox=[0, 0, 200, 200],
+ )
+ occ = Occurrence.objects.create(project=self.project, event=self.deployment.events.first())
+ occ.detections.add(det)
+
+ original_clf = self._create_classification_with_logits(
+ detection=det,
+ taxon=species_taxa[0],
+ score=max(scores),
+ scores=scores,
+ logits=logits,
+ )
+
+ new_algorithm, _ = Algorithm.objects.get_or_create(
+ name=f"{self.algorithm.name} (filtered full)",
+ key=f"{self.algorithm.key}_filtered_full",
+ defaults={
+ "task_type": AlgorithmTaskType.CLASSIFICATION.value,
+ "category_map": self.algorithm.category_map,
+ },
+ )
+
+ classifications = Classification.objects.filter(pk=original_clf.pk)
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=full_list,
+ algorithm=self.algorithm,
+ new_algorithm=new_algorithm,
+ )
+
+ # Original should still be terminal (no change needed)
+ original_clf.refresh_from_db()
+ self.assertTrue(original_clf.terminal, "Original should remain terminal when all species are in the list.")
+
+ # No new classifications created
+ clf_count = Classification.objects.filter(detection=det).count()
+ self.assertEqual(clf_count, 1, "No new classification should be created when masking changes nothing.")
+
+ def test_class_masking_softmax_correctness(self):
+ """Verify that masked softmax produces mathematically correct results."""
+ import math
+
+ species_taxa = list(self.project.taxa.filter(rank=TaxonRank.SPECIES.name).order_by("name")[:3])
+
+ # Only keep species at index 0
+ single_species_list = TaxaList.objects.create(name="Single Species List")
+ single_species_list.taxa.set([species_taxa[0]])
+
+ logits = [2.0, 3.0, 4.0]
+ max_logit = max(logits)
+ exp_logits = [math.exp(x - max_logit) for x in logits]
+ total = sum(exp_logits)
+ scores = [e / total for e in exp_logits]
+
+ det = Detection.objects.create(
+ source_image=self.collection.images.first(),
+ bbox=[0, 0, 200, 200],
+ )
+ occ = Occurrence.objects.create(project=self.project, event=self.deployment.events.first())
+ occ.detections.add(det)
+
+ self._create_classification_with_logits(
+ detection=det,
+ taxon=species_taxa[2], # original top is index 2
+ score=max(scores),
+ scores=scores,
+ logits=logits,
+ )
+
+ new_algorithm, _ = Algorithm.objects.get_or_create(
+ name=f"{self.algorithm.name} (single species)",
+ key=f"{self.algorithm.key}_single",
+ defaults={
+ "task_type": AlgorithmTaskType.CLASSIFICATION.value,
+ "category_map": self.algorithm.category_map,
+ },
+ )
+
+ classifications = Classification.objects.filter(detection=det, terminal=True)
+ make_classifications_filtered_by_taxa_list(
+ classifications=classifications,
+ taxa_list=single_species_list,
+ algorithm=self.algorithm,
+ new_algorithm=new_algorithm,
+ )
+
+ new_clf = Classification.objects.filter(detection=det, terminal=True).first()
+ self.assertIsNotNone(new_clf)
+
+ # With only 1 allowed species, its score should be ~1.0
+ self.assertAlmostEqual(
+ new_clf.scores[0],
+ 1.0,
+ places=5,
+ msg="With only one allowed species, its softmax score should be ~1.0",
+ )
+ self.assertAlmostEqual(new_clf.scores[1], 0.0, places=10)
+ self.assertAlmostEqual(new_clf.scores[2], 0.0, places=10)
+ self.assertAlmostEqual(sum(new_clf.scores), 1.0, places=5)
+
class TestTaskStateManager(TestCase):
"""Test TaskStateManager for job progress tracking."""
From a1075979f28433afddecc16717b85a5acdb128be Mon Sep 17 00:00:00 2001
From: Michael Bunsen
Date: Wed, 18 Feb 2026 03:59:08 -0800
Subject: [PATCH 38/44] fix: address review feedback on class masking and rank
rollup
- Move numpy imports to module level (class_masking.py)
- Replace print() with logger.debug()
- Use .count() instead of len() for QuerySet evaluation
- Convert taxa_in_list to set() for O(1) lookups
- Add logits__isnull=False to queryset filters
- Set category_map on new classifications (was None)
- Fix error message clarity and remove extra parens
- Use logger.info() instead of root logging.info()
- Wrap bulk_update/bulk_create in transaction.atomic()
- Normalize threshold keys to uppercase in RankRollupTask
- Guard clf.score formatting against None
- Guard against null occurrence in rank rollup
- Remove commented-out pdb lines
- Fix admin.py threshold keys to uppercase
- Fix test scores to exercise intended thresholds
- Clean up stale TODO comments and dead code
Co-Authored-By: Claude
---
ami/main/admin.py | 2 +-
ami/ml/post_processing/class_masking.py | 92 ++++++++++---------------
ami/ml/post_processing/rank_rollup.py | 22 +++---
ami/ml/tests.py | 4 +-
4 files changed, 52 insertions(+), 68 deletions(-)
diff --git a/ami/main/admin.py b/ami/main/admin.py
index 95437b29b..4d7d76c09 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -739,7 +739,7 @@ def run_small_size_filter(self, request: HttpRequest, queryset: QuerySet[SourceI
def run_rank_rollup(self, request: HttpRequest, queryset: QuerySet[SourceImageCollection]) -> None:
"""Trigger the Rank Rollup post-processing job asynchronously."""
jobs = []
- DEFAULT_THRESHOLDS = {"species": 0.8, "genus": 0.6, "family": 0.4}
+ DEFAULT_THRESHOLDS = {"SPECIES": 0.8, "GENUS": 0.6, "FAMILY": 0.4}
for collection in queryset:
job = Job.objects.create(
diff --git a/ami/ml/post_processing/class_masking.py b/ami/ml/post_processing/class_masking.py
index 298688ba7..731776db3 100644
--- a/ami/ml/post_processing/class_masking.py
+++ b/ami/ml/post_processing/class_masking.py
@@ -1,5 +1,7 @@
import logging
+import numpy as np
+from django.db import transaction
from django.db.models import QuerySet
from django.utils import timezone
@@ -24,6 +26,7 @@ def update_single_occurrence(
terminal=True,
algorithm=algorithm,
scores__isnull=False,
+ logits__isnull=False,
).distinct()
# Make a new Algorithm for the filtered classifications
@@ -56,15 +59,12 @@ def update_occurrences_in_collection(
):
task_logger.info(f"Recalculating classifications based on a taxa list. Params: {params}")
- # Make new AlgorithmCategoryMap with the taxa in the list
- # @TODO
-
classifications = Classification.objects.filter(
detection__source_image__collections=collection,
terminal=True,
- # algorithm__task_type="classification",
algorithm=algorithm,
scores__isnull=False,
+ logits__isnull=False,
).distinct()
make_classifications_filtered_by_taxa_list(
@@ -81,32 +81,26 @@ def make_classifications_filtered_by_taxa_list(
algorithm: Algorithm,
new_algorithm: Algorithm,
):
- taxa_in_list = taxa_list.taxa.all()
+ taxa_in_list = set(taxa_list.taxa.all())
occurrences_to_update: set[Occurrence] = set()
- logger.info(f"Found {len(classifications)} terminal classifications with scores to update.")
+ classification_count = classifications.count()
+ logger.info(f"Found {classification_count} terminal classifications with scores to update.")
- if not classifications:
+ if classification_count == 0:
raise ValueError("No terminal classifications with scores found to update.")
if not algorithm.category_map:
raise ValueError(f"Algorithm {algorithm} does not have a category map.")
category_map: AlgorithmCategoryMap = algorithm.category_map
- # Consider moving this to a method on the Classification model
-
# @TODO find a more efficient way to get the category map with taxa. This is slow!
logger.info(f"Retrieving category map with Taxa instances for algorithm {algorithm}")
category_map_with_taxa = category_map.with_taxa()
- # Filter the category map to only include taxa that are in the taxa list
- # included_category_map_with_taxa = [
- # category for category in category_map_with_taxa if category["taxon"] in taxa_in_list
- # ]
excluded_category_map_with_taxa = [
category for category in category_map_with_taxa if category["taxon"] not in taxa_in_list
]
- # included_category_indices = [int(category["index"]) for category in category_map_with_taxa]
excluded_category_indices = [
int(category["index"]) for category in excluded_category_map_with_taxa # type: ignore
]
@@ -124,9 +118,6 @@ def make_classifications_filtered_by_taxa_list(
timestamp = timezone.now()
for classification in classifications:
scores, logits = classification.scores, classification.logits
- # Set scores and logits to zero if they are not in the filtered category indices
-
- import numpy as np
# Assert that all scores & logits are lists of numbers
if not isinstance(scores, list) or not all(isinstance(score, (int, float)) for score in scores):
@@ -137,26 +128,17 @@ def make_classifications_filtered_by_taxa_list(
logger.debug(f"Processing classification {classification.pk} with {len(scores)} scores")
logger.info(f"Previous totals: {sum(scores)} scores, {sum(logits)} logits")
- # scores_np_filtered = np.array(scores)
logits_np = np.array(logits)
- # scores_np_filtered[excluded_category_indices] = 0.0
-
- # @TODO can we use np.NAN instead of 0.0? zero will NOT calculate correctly in softmax.
- # @TODO delete the excluded categories from the scores and logits instead of setting to 0.0
- # logits_np[excluded_category_indices] = 0.0
- # logits_np[excluded_category_indices] = np.nan
+ # Mask excluded logits with -100 (effectively zero probability after softmax)
+ # @TODO consider using -np.inf for mathematically exact masking
logits_np[excluded_category_indices] = -100
logits: list[float] = logits_np.tolist()
- from numpy import exp
- from numpy import sum as np_sum
-
- # @TODO add test to see if this is correct, or needed!
# Recalculate the softmax scores based on the filtered logits
- scores_np: np.ndarray = exp(logits_np - np.max(logits_np)) # Subtract max for numerical stability
- scores_np /= np_sum(scores_np) # Normalize to get probabilities
+ scores_np: np.ndarray = np.exp(logits_np - np.max(logits_np)) # Subtract max for numerical stability
+ scores_np /= np.sum(scores_np) # Normalize to get probabilities
scores: list = scores_np.tolist() # Convert back to list
@@ -164,11 +146,8 @@ def make_classifications_filtered_by_taxa_list(
# Get the taxon with the highest score using the index of the max score
top_index = scores.index(max(scores))
- top_taxon = category_map_with_taxa[top_index][
- "taxon"
- ] # @TODO: This doesn't work if the taxon has never been classified
- print("Top taxon: ", category_map_with_taxa[top_index]) # @TODO: REMOVE
- print("Top index: ", top_index) # @TODO: REMOVE
+ top_taxon = category_map_with_taxa[top_index]["taxon"]
+ logger.debug(f"Top taxon: {category_map_with_taxa[top_index]}, index: {top_index}")
# check if needs updating
if classification.scores == scores and classification.logits == logits:
@@ -189,12 +168,15 @@ def make_classifications_filtered_by_taxa_list(
detection=classification.detection,
timestamp=classification.timestamp,
terminal=True,
- category_map=None, # @TODO need a new category map with the filtered taxa
+ category_map=new_algorithm.category_map,
created_at=timestamp,
updated_at=timestamp,
)
if new_classification.taxon is None:
- raise (ValueError("Classification isn't registered yet. Aborting")) # @TODO remove or fail gracefully
+ raise ValueError(
+ f"Unable to determine top taxon after class masking for classification {classification.pk}. "
+ "No allowed classes found in taxa list."
+ )
classifications_to_update.append(classification)
classifications_to_add.append(new_classification)
@@ -203,27 +185,27 @@ def make_classifications_filtered_by_taxa_list(
assert new_classification.detection.occurrence is not None
occurrences_to_update.add(new_classification.detection.occurrence)
- logging.info(
+ logger.info(
f"Adding new classification for Taxon {top_taxon} to occurrence {new_classification.detection.occurrence}"
)
- # Bulk update the existing classifications
- if classifications_to_update:
- logger.info(f"Bulk updating {len(classifications_to_update)} existing classifications")
- Classification.objects.bulk_update(classifications_to_update, ["terminal", "updated_at"])
- logger.info(f"Updated {len(classifications_to_update)} existing classifications")
-
- if classifications_to_add:
- # Bulk create the new classifications
- logger.info(f"Bulk creating {len(classifications_to_add)} new classifications")
- Classification.objects.bulk_create(classifications_to_add)
- logger.info(f"Added {len(classifications_to_add)} new classifications")
-
- # Update the occurrence determinations
- logger.info(f"Updating the determinations for {len(occurrences_to_update)} occurrences")
- for occurrence in occurrences_to_update:
- occurrence.save(update_determination=True)
- logger.info(f"Updated determinations for {len(occurrences_to_update)} occurrences")
+ # Bulk update/create in a single transaction for atomicity
+ with transaction.atomic():
+ if classifications_to_update:
+ logger.info(f"Bulk updating {len(classifications_to_update)} existing classifications")
+ Classification.objects.bulk_update(classifications_to_update, ["terminal", "updated_at"])
+ logger.info(f"Updated {len(classifications_to_update)} existing classifications")
+
+ if classifications_to_add:
+ logger.info(f"Bulk creating {len(classifications_to_add)} new classifications")
+ Classification.objects.bulk_create(classifications_to_add)
+ logger.info(f"Added {len(classifications_to_add)} new classifications")
+
+ # Update the occurrence determinations
+ logger.info(f"Updating the determinations for {len(occurrences_to_update)} occurrences")
+ for occurrence in occurrences_to_update:
+ occurrence.save(update_determination=True)
+ logger.info(f"Updated determinations for {len(occurrences_to_update)} occurrences")
class ClassMaskingTask(BasePostProcessingTask):
diff --git a/ami/ml/post_processing/rank_rollup.py b/ami/ml/post_processing/rank_rollup.py
index 9708b1c63..677c5ae1f 100644
--- a/ami/ml/post_processing/rank_rollup.py
+++ b/ami/ml/post_processing/rank_rollup.py
@@ -44,7 +44,8 @@ def run(self) -> None:
# ---- Read config parameters ----
config = self.config or {}
collection_id = config.get("source_image_collection_id")
- thresholds = config.get("thresholds", self.DEFAULT_THRESHOLDS)
+ raw_thresholds = config.get("thresholds", self.DEFAULT_THRESHOLDS)
+ thresholds = {k.upper(): v for k, v in raw_thresholds.items()}
rollup_order = config.get("rollup_order", self.ROLLUP_ORDER)
if not collection_id:
@@ -68,7 +69,8 @@ def run(self) -> None:
with transaction.atomic():
for i, clf in enumerate(qs.iterator(), start=1):
- self.logger.info(f"Processing classification #{clf.pk} (taxon={clf.taxon}, score={clf.score:.3f})")
+ score_str = f"{clf.score:.3f}" if clf.score is not None else "N/A"
+ self.logger.info(f"Processing classification #{clf.pk} (taxon={clf.taxon}, score={score_str})")
if not clf.scores:
self.logger.info(f"Skipping classification #{clf.pk}: no scores available")
@@ -101,9 +103,6 @@ def run(self) -> None:
self.logger.info(f"Aggregated taxon scores: {scores_str}")
for rank in rollup_order:
threshold = thresholds.get(rank, 1.0)
- # import pdb
-
- # pdb.set_trace()
candidates = {t: s for t, s in taxon_scores.items() if t.rank == rank}
if not candidates:
@@ -136,11 +135,14 @@ def run(self) -> None:
)
occurrence = clf.detection.occurrence
- updated_occurrences.append(occurrence)
- self.logger.info(
- f"Rolled up occurrence {occurrence.pk}: {clf.taxon} => {new_taxon} "
- f"({new_taxon.rank}) with rolled-up score={new_score:.3f}"
- )
+ if occurrence:
+ updated_occurrences.append(occurrence)
+ self.logger.info(
+ f"Rolled up occurrence {occurrence.pk}: {clf.taxon} => {new_taxon} "
+ f"({new_taxon.rank}) with rolled-up score={new_score:.3f}"
+ )
+ else:
+ self.logger.warning(f"Detection #{clf.detection.pk} has no occurrence; skipping.")
else:
self.logger.info(f"No rollup applied for classification #{clf.pk} (taxon={clf.taxon})")
diff --git a/ami/ml/tests.py b/ami/ml/tests.py
index aecf8859f..5563452f7 100644
--- a/ami/ml/tests.py
+++ b/ami/ml/tests.py
@@ -931,7 +931,7 @@ def _create_occurrences_with_classifications(self, num=3):
detection=det,
taxon=self.species_taxon,
score=0.5,
- scores=[0.5, 0.3, 0.2],
+ scores=[0.5, 0.2, 0.1],
terminal=True,
timestamp=now,
algorithm=self.algorithm,
@@ -975,7 +975,7 @@ def test_rank_rollup_creates_new_terminal_classifications(self):
task = RankRollupTask(
source_image_collection_id=self.collection.pk,
- thresholds={"species": 0.8, "genus": 0.6, "family": 0.4},
+ thresholds={"SPECIES": 0.8, "GENUS": 0.6, "FAMILY": 0.4},
)
task.run()
From da9b0816f6ff3aa964e49f36be9dde48ecde0a95 Mon Sep 17 00:00:00 2001
From: Michael Bunsen
Date: Wed, 18 Feb 2026 04:23:47 -0800
Subject: [PATCH 39/44] feat: replace hardcoded admin action with dynamic class
masking form
Replace the hardcoded `update_with_newfoundland_species` admin action
with a generic `run_class_masking` action that shows an intermediate
confirmation page. Users can now select any TaxaList and Algorithm
from dropdowns before running class masking on selected occurrences.
Co-Authored-By: Claude
---
ami/main/admin.py | 107 +++++++++++-------
.../main/class_masking_confirmation.html | 60 ++++++++++
2 files changed, 126 insertions(+), 41 deletions(-)
create mode 100644 ami/templates/admin/main/class_masking_confirmation.html
diff --git a/ami/main/admin.py b/ami/main/admin.py
index 4d7d76c09..e6ddce103 100644
--- a/ami/main/admin.py
+++ b/ami/main/admin.py
@@ -2,9 +2,11 @@
from django.contrib import admin
from django.db import models
+from django.db.models import Count
from django.db.models.query import QuerySet
from django.http.request import HttpRequest
from django.template.defaultfilters import filesizeformat
+from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.formats import number_format
from django.utils.html import format_html
@@ -426,59 +428,82 @@ def get_queryset(self, request: HttpRequest) -> QuerySet[Any]:
def detections_count(self, obj) -> int:
return obj.detections_count
- @admin.action(description="Update occurrence with Newfoundland species taxa list")
- def update_with_newfoundland_species(self, request: HttpRequest, queryset: QuerySet[Occurrence]) -> None:
+ @admin.action(description="Run class masking (select taxa list & algorithm)")
+ def run_class_masking(self, request: HttpRequest, queryset: QuerySet[Occurrence]) -> TemplateResponse | None:
"""
- Update selected occurrences using the 'Newfoundland species' taxa list
- and 'Quebec & Vermont Species Classifier - Apr 2024' algorithm.
+ Run class masking on selected occurrences.
+ Shows an intermediate page to select a TaxaList and Algorithm.
"""
- try:
- # Get the taxa list by name
- taxa_list = TaxaList.objects.get(name="Newfoundland Species")
- except TaxaList.DoesNotExist:
- self.message_user(
- request,
- "Error: TaxaList 'Newfoundland species' not found.",
- level="error",
- )
- return
-
- try:
- # Get the algorithm by name
- algorithm = Algorithm.objects.get(name="Quebec & Vermont Species Classifier - Apr 2024")
- except Algorithm.DoesNotExist:
- self.message_user(
- request,
- "Error: Algorithm 'Quebec & Vermont Species Classifier - Apr 2024' not found.",
- level="error",
- )
- return
+ if request.POST.get("confirm"):
+ taxa_list_id = request.POST.get("taxa_list")
+ algorithm_id = request.POST.get("algorithm")
+ if not taxa_list_id or not algorithm_id:
+ self.message_user(request, "Please select both a taxa list and an algorithm.", level="error")
+ return None
- # Process each occurrence
- count = 0
- for occurrence in queryset:
try:
- update_single_occurrence(
- occurrence=occurrence,
- algorithm=algorithm,
- taxa_list=taxa_list,
- )
- count += 1
- except Exception as e:
+ taxa_list = TaxaList.objects.get(pk=taxa_list_id)
+ algorithm = Algorithm.objects.get(pk=algorithm_id)
+ except (TaxaList.DoesNotExist, Algorithm.DoesNotExist) as e:
+ self.message_user(request, f"Error: {e}", level="error")
+ return None
+
+ if not algorithm.category_map:
self.message_user(
- request,
- f"Error processing occurrence {occurrence.pk}: {str(e)}",
- level="error",
+ request, f"Algorithm '{algorithm.name}' does not have a category map.", level="error"
)
-
- self.message_user(request, f"Successfully updated {count} occurrence(s).")
+ return None
+
+ count = 0
+ for occurrence in queryset:
+ try:
+ update_single_occurrence(
+ occurrence=occurrence,
+ algorithm=algorithm,
+ taxa_list=taxa_list,
+ )
+ count += 1
+ except Exception as e:
+ self.message_user(
+ request,
+ f"Error processing occurrence {occurrence.pk}: {e}",
+ level="error",
+ )
+
+ self.message_user(request, f"Successfully ran class masking on {count} occurrence(s).")
+ return None
+
+ # Show intermediate confirmation page
+ taxa_lists = TaxaList.objects.annotate(taxa_count=Count("taxa")).filter(taxa_count__gt=0).order_by("name")
+ algorithms = Algorithm.objects.filter(category_map__isnull=False).order_by("name")
+
+ # Annotate algorithms with label count
+ alg_list = []
+ for alg in algorithms:
+ alg.labels_count = len(alg.category_map.labels) if alg.category_map else 0
+ alg_list.append(alg)
+
+ return TemplateResponse(
+ request,
+ "admin/main/class_masking_confirmation.html",
+ {
+ **self.admin_site.each_context(request),
+ "title": "Run class masking",
+ "queryset": queryset,
+ "occurrence_count": queryset.count(),
+ "taxa_lists": taxa_lists,
+ "algorithms": alg_list,
+ "opts": self.model._meta,
+ "action_checkbox_name": admin.helpers.ACTION_CHECKBOX_NAME,
+ },
+ )
ordering = ("-created_at",)
# Add classifications as inline
inlines = [DetectionInline]
- actions = [update_with_newfoundland_species]
+ actions = [run_class_masking]
@admin.register(Classification)
diff --git a/ami/templates/admin/main/class_masking_confirmation.html b/ami/templates/admin/main/class_masking_confirmation.html
new file mode 100644
index 000000000..8c37ec361
--- /dev/null
+++ b/ami/templates/admin/main/class_masking_confirmation.html
@@ -0,0 +1,60 @@
+{% extends "admin/base_site.html" %}
+
+{% load i18n admin_urls %}
+
+{% block title %}
+ {% translate "Run class masking" %} | {{ site_title|default:_("Django site admin") }}
+{% endblock title %}
+{% block breadcrumbs %}
+
+{% endblock breadcrumbs %}
+{% block content %}
+
+
+ {% for obj in queryset %}{% endfor %}
+
+
+
+
+{% endblock content %}
From fc3f9e183cbd8445793fb63abfa02b03d92fa518 Mon Sep 17 00:00:00 2001
From: Michael Bunsen
Date: Wed, 18 Feb 2026 04:29:04 -0800
Subject: [PATCH 40/44] docs: add class masking screenshots for PR review
Co-Authored-By: Claude
---
docs/screenshots/admin-class-masking-form.png | Bin 0 -> 60949 bytes
.../admin-classifications-after-masking.png | Bin 0 -> 557779 bytes
.../ui-occurrence-43498-identification.png | Bin 0 -> 67423 bytes
3 files changed, 0 insertions(+), 0 deletions(-)
create mode 100644 docs/screenshots/admin-class-masking-form.png
create mode 100644 docs/screenshots/admin-classifications-after-masking.png
create mode 100644 docs/screenshots/ui-occurrence-43498-identification.png
diff --git a/docs/screenshots/admin-class-masking-form.png b/docs/screenshots/admin-class-masking-form.png
new file mode 100644
index 0000000000000000000000000000000000000000..ac67a5949d320572315c46273dddf824f99eeb61
GIT binary patch
literal 60949
zcmZsjWl&v9)2PwlPH=a3cM0z9PJ+9;L-644?(Pl&f;$9vceem{b8>RNyjAxHRWN(4
zndzRM?wO~16Dlt&1_zA=4Fm)PCn5ex5eNu0==}#01kj>bJ9!TTga{<@Nl@7p_@o0y
zTkFtm-)C>jAr?xJDIh)qNdiOg6q>%o)9kY;(kNY$WlO#YN90BqxBqI5w1v^DQS|~_
zXGG+w{)H#Q9oopEf8k5O^b!$V(y6@}#3%If`Jd(`3bum6%s(vi;mVf%X?d0X<-Cwm&TzjoM+{`@VZKCV-K_Cl&hyy_p0F(HFeFH*8<
zfskFfz~rO(a4StezK?QI50&lpK2|p~VK?s!#L<+=blgwKzh*H{06dvHCiiG}y3{aN
zDYl?Lm^1R%3lTpJFleS
z0Qr9{?_<{@x~BNF-%Lx%8N%Hj#7*|K{vH7_3aF46Fv(viK(whp__wyVvq&^&^W=mO
zj*0%&5TIlI7i8DVe)R3FEkh%tyxiQ(K#-#;@*FV*b?pBX33zN{2r?Vbi^=;>YbG49
zB{)EjD$*N4=(@fw(;?QIL;}26QLyi0{-geXW=#{?z0Y^gsotRH>9HNj-n%FScxH4s
z->B>_HK=QW>D%ATsp-8`gz!3_tzgp8&E{?X(C7LsC(i^J0~?@ERRrMKys{$tNtC|^
zD3<`q^18#kuO1A5KJ{m;hzg~oF{`%j!
zs&fI;go4ep0w?3Y+=uY0C&CRv-kNeSB}e_Oe}MS_lzV+wL(Kf2}A{BU*~OJRAptyD|Onu
zPZzx5@{hI{0XrZaVegMwxZ*+T!-{WEBKe9uR(bAHV`5_Jy*(|U%LeuSG8GaMvbVnq
zhQmg)1DGxd#|>FV&2lrnpYFHobka{>(vKOkq$54YdwXsFZzd*UK&bmRzXohPH(~UA
z$qXJ}(E;-UESP#?5HF4}^k}B{ZJC}51*$y3N@J{heAJXSZY%3O#?kp1gASiYHSbkR
zlK*C_(QUpM86KmPHr>zqfmXv?pigpt#^8Aa6R;Tg{=NNf$pVq8cmkld>Y}232>u7Z
zA!Bn3i{tb2w}ZTBf!N;m!@aRIJ-6MD6lnHZ61n3#?
ze4k&J6W(`q0AowSi{2mPe`U<)z=5Z@c%1OTBca3zTv64G9_Q*GTj}CnS+#dQU#kmH
zR*WGdG-QK8Q1$J;nJnmNLr6Qfr=DbE3;oaZ%Ar8$+b?>{syd-7dym>L$g?&$Y(X=q
z@`Z=!y)VQ4H2en3M5CWFw#XosZQP01zs7vSRD9Kyo#}Mjil@18Nkg!_mze)C>#$eW
z8kl^VN}MEv`_g2+igk3(kU7I^xtg_xA)mKw$
zAA_HIi1|J+Y)Q4T&RTJyJMx3#w&*x)^s3ZNey;$xn~8zqgrEcZ*b@LhGZ)u7n+$aB
zRdG#^1O=YN-C8A-hx)PRh;3ifR_
zP0!S=aT%Q8rKLNEq{15${DW
z(J-gNF6VM`a#4d}j6>IK4RM?4TcvFnK$?n@Hb_;f=QKlcsCyj+XSOl1H!favy(_mfP0c$f9b_Q!7N@JAOfq$HT{eXs
zXRjz?w3pb!uw>yKlzpFmzs91DW{h@xYfr$F=Xu3YO|XC4s-ZB7opS{;0*>P`_ym%7
z5d|0BLTJT6z;wPdCLxQA!bE2nn%2oKBQgMa4`Vx{OM!ijC@3yQf|~BZKC|q-73h|u
zOe*?FzK?g<>P9b6ZhtQ-T6h?#@GF8IHzEEh*yX}~-BXEw;B6z9_yR>PK2@!?S;iv4|>?1U%49_
z(|6fO`!dD$IQySVCMDAozkgD={~+d{uk;X1WT<$b(Pn-|;39%7N4q|dzZK8RKBXVdWaPyfA
zun^Esoa}+biQom;2e=}BUAkd@1KEKTRHD7I@NyrXpoezEo-X{c|2?_+nMeX*dklc-
zTi-|-;1P)G0pffAi-s95*Yka5qm|TKz#b$0J|1?8wlrE0Oo&n6E{(Ja(I*zx?F{k^
zv_xV`2A7Amx%(@lb}t!|dv9{74u@s7+vPE%I`@>c6TT9eiDe6%aH{5&DbQh?^^>aH
zFK4L;rqc}}RdVF?m@cau7-U#vbeXaCT@vRi{gTzQGPlk#9a
zbUSB}u^#1jdDMd*9^Y2VFaTJX8ov!(+h;
zCgptc+-wqRpI~4Y4=3`!Euvr|W8w&+Z{5LlIX~m2GZzDS%gYk6m{Lf~P4?*X0%o^W
zbR?mXnV?OEnB6}^NRz{Jg>W>HolYnC2t6u&hly~`J2ObKdjhFDkA8)3Bp05`CgwiK
z2iaPJ@)MHXah&~!%=&5P&G_0eH!YCrv(5K^yr-qz=PP1jcK&^VYBX=3nx6%k7v1eMtB>+JJ`CkK)qWIbG?QC>_K$^jvb;ttVtOXEO>?2krmQcOH>l
z^!DL}H+Qw(P*6;_=>#VZVd#|D6k5CIzXW)sT}R{4ijz9n5ZJQpo*$7I`QSeYpNnk4
zWc06|Gh=)Z0pDLE=4Z)0{2H0oNM86Q|HHZ#l#>Y`PP-fZm5i6*mS}3c-wPwAuU3Avtp*rre=kf26}vNY?UI=K0Vea<$zHNq
zxk-M7EB)AX*cL!+DI};cUI>{Q6|tCEbH367QhFZ1*uzO>=+P5H+?xJL2>xgiy9qu1
z7^OWxhnVoSo5JPZ{j?y4V%0A9^?Aqn{YtY_-gSp(>x<>-ZJTv+kf*}ofd5uJ3^H!{Lr|wvBDlGGDIYe!pd2vXbQ@Adi_uNO5v{Yqr$S)7Bn^OIR
z{E6al8<6|!YWn-*$Qf`!Fa9{J-csAMWfn!tCOPEpte8BzT8qQe8|;$Kl7`IC&4StR(M%yR6ev
zPCBc(x$V*}#HTVXySOLakFXp&t80flt5itJX=CYy`e*|1*B0VP)82-6Yf{4ZH=XNuFdvzWSb(!Q}j^Xu=I=T5Zb3TSae+;6hHD!y7KDKT8Hn57{
zTF(uNwbz77TiwEMxKhE*{ppIZX?I7LP(YDDr0zN2?hGLi`8YMsE1!gvr}=r6oRw0u
zcx((dYtHzD^;$62?3jQ|$SkaFxt3elgK|Tz-+qT~aTN<#EbPv9q&g|r#0LICr4Rj&
zBN5+dA-))q^SNByS_XLiyRZWXJE+>^F})P@SIMI(x5|y>{cpYie;8|6)WK)>h9mlw
zkX97v`nUe|GZ2ADpIqKt3@^uCo8E3v_Q?Ua<=$gG4=X&h*}QAtjpffI0@qz8S8Ey1
z4izJ&I5LF)U7z_ts9&!ZO}ZSpw8L%iUk_e0x>7v-+zGd&vat#8Br?fNle`I^#H+J;F%Zx?;4@X?1!%Fx>=B<=jqlbML$Dtm96Iw
z>iWDTMc;WL^ljno&f#wVRj+mQOEzAHh#!QUXg9apbs;yIN!EFj4!iprz1&q}*Y%yj
zn@q9X7d#7M;4q;N|6U{|p?q&VPe*cXNQUDakf*PjihQ0Ie6E|``q*x(EEPq41b;oE
z>u#_f(wAhpy>{GB1i!U980%sbi|qsVG(}7Kw(%Qx+6wGHGsay!mw+Y(@4bwk*i{(u
z{@*gBAi35D>kHkebs}XvjL}Zn=|w$kQDY}3=`CI4J~Ys`zux}PllZDyKuN^zeHn!A
zDuFcsP!fYa1LmIt|9ceQFaXp4g8FH{>X+sX3_WuH<2zBQoA7>IECQ_VBK@Xdhd>9>
z>q^wD-eautgVJfvSPmZC7`PH3M!LR2=?DIn_`(1nqI3
z#{>K~w=MiLPI7Jjm!H=#XXvx^B}we+fsn6n
zt0Z*YA4L%J-nd@suveWeqF>G-aQRzrjh6MI5u74lWzM6$`T9C9wqRl-^`2@^=4{av
zqJxP84*#dGa3oiN@VW`!z>=O%hbAsIJ~#9;f^Vk?UuWgC*Ft@4g@ALlyPilnAX@-(
zg@U8}6Z@m3fvErCHuJcC=*1Iap76Qe-LG3&MSOk%ZUDP~nSRW9p3(EX1Bw3yLdjX>
zdx3Ua+zovHKSM{00xnZ8k4Xl2H|suVcU^|Rc1^mB-{9=^9#*Kado_HNd9lXf|Cg(k
zvjdR8uPZ6UYm+SRyRC5gz!m+1SKxaX`N~mhmU!5|CrK>}g!>N;EB^>WKbe)+^+c@)
z-y$+7saH5ON&z`zdzp;~C-!0H&uQi#f$(I3_V`4qykD^;Y6zR&vfZ~~h`!iSQ&)(g
z-@`ln&!d?x09cuIk2mn}&*lXIUik+BCT4ZhFA4e{~K-x^(^0rL%P>
zczkNxn90tIMJFpQ_TSb6^r!9&0>CSa(@nM&d4J7PpAjTEZ;F!n8N6o<>04p#J_iQ5HxFz|pFm0jN-i+bbyZ
z4^(^KLvAF#?v7UXRPNF5Q~!-9{dRxTNt1~?KX*qr55$V!o~-*?0w_-i~wi@cU{OY)1s^ZaM
z@w*GwaI8B+CPBazldK4EtmgZQsu0;l$3Y~I$_@v~e)102buk!n?BC~%Gp;#KFKj)U
zKi-BVk{%zWwgp?SV&da>G0!?xT*j+5fUMivMpPUybPxQt9Q~+n=tG67G2;UR6|Pg&
z_4KGu6?0VDxWVv)!;(RqOcPt;x#DU=m}H-na(}WUtPGNUVk6_l18$m=cm82|&8K_1
zTiP$aJu{iUT+%gw?$5ZLc|pny>Ls)R7*IlH2#%
zUA%nWWrPUxNqrdmFG$X@^{thaN*ZQ79o*HPZIl(2=c;wGJsyGz4A}F|WychR;()b&
zN*!qs!)~+gL0zq=+{@F*#|IO3@Q5KQYv0twk8hb<@HF>E`oX*OV^E6Yfp+qWMK9x0
z%WL82W@r+;oCGtE`%gYLG*Q_Vq-(P^WBMiS!uDb;bn*Th~3@fl)sG
zlM1}j`Ivjb*F+2Kkf-c7Gs%3ex%q|cg-r8cLP0Xh9_nuC7>MfmXH9%_3l7aSAZ_%u
z96|;2GB-AczD4Z{DKzsqvZp0kvs*aNW|t>|ITZbP-0w
z5{d&435p0*PR{P_N#+2E0g+7dhOlQYRNJfJHSawkO(wr1dpAv#B$fFn9K
zubdx0mw`lL|Gi6G6+x&A#VE(ure-%4j;gxqK=eKK}=96~vcTU(nR6Z6-z8q&8em*>$c1=3`nOW<8+(pGwG;wU4
zC(lsyouQ!Iw3&l?`4=3mA(du9K3DW^kumE0&(9+J$6-IjW{Ew{8Z#-^)){weRX=|p
z>@UDOa(Yg?TY~kFOeN#VQ@d&!=(nOab7GR}>6Islm&+PAqPc~*$6=3r9krp~DN|O-
zkH)Q_9QCEJB$D|o;#@5rD0k966Nw&{&yrrxo*WeYx`Q->GbXU;{Y{>_YvnpI!0
zkgj6(YlTV>B_${6ZAK$hQ3#6FoZ8U?-)f42;*5C%_r8W9g#}3&Q2O552ldh>8
zieeg-xGMH~niof%l4+}zxVTuK>I4<^f77UVGm6${l=
z)hLQ0K67&Rv*t}b1-G!}z%S~fs%DLUDQ6Y9(k~P9YTabj|jgZJ9>>%I;MvW6X
zv`&~&hc8qNj>rS;GKk&14bV3n3g*{8e4}rmEpqf1vGGeQHdsM%Ecw~wdom%
zoi&4O;J~8}2=5h&hCgO
zs4TCsbMDAiof}q^8XRW7Mo6%eWig0Vq$aO0wNRJauDWoocfh2eKP>a7!?bbpzG5UZ
ztOYv`m%7CfSVLb>-JTS=!tDK$pYNi6trB#C_Q}Ym-hq09C~c>XUgLgqO+#H>L&E}}
zO-h;rd@^jIE_RJR7>s=ZtUfcW^tM}vl5ElHk~zyA7e`o*AeB=5;I2J)dA`q{mj8!E
z%J7~^Gl!oZV=}xb1U7ekUN$XF%b&Sj
z446AO^@yVr|AS%GTu*dwLFRZ)8%Mm_=44E!KE*@kaqxwV=u9_=J+Sv5;>JVfmoS@$
zx_`jv*sSakh6tLYbrk!
zZDz+IL-})&CGUlXIJ{YnG$w5%y?JoIt$GFCDSsB-$8eI@*nCJHbjDqK*$1)*j%cIa
ze#yMgxy9r+IGV6|PN5=ySU?*sCd_e4wL19&!b$bOFU%8dRMe4LRN^lJ5>F14k~B&`
zilNf1{L`dhxN=E{+wpK)lKX(J|*ZYE?B(dE!vEs~;!8CQZR)`8CBzP5c
zbX>;M$%w-+0d87!Ayo?+^v&BT{%$IY!MD{o{a;03+jB^VNgE-94B|?uC(u7FAC-*~
zf-&sWRYJX`)5BCK8s=hQ)@b)OXKMvD2jKGw~)((~hyr+nD4
zs73_BAUnH0f8m}#u8x+KmhPSD$RijGq*r4S6Z2GrTVM=4Io@?yLKigQzwqOt8-XLYc}K1IvoVh}$2mqpl*4}vuLVagw!)Top*
zGgEmvxtq>t%u`b9297~;Fhu-#Cr(Ro)XAc+`Ry$N5*^T)cbkNCHHN7lXig*)Cs8z^
z-4Ser+~9}8t)$q-?Xzk6G>F{o=;$GTXkWGGar#~}>Em8I_PIrVw+;k;-`CKk>XW{G=Ms;W$_c)ys|+92bMZUlp6+JIo-BD*Gnsj5dm@uPZX*)iaA?dIwm%F-|p#IZ3A
zla40zi17%MRX@-qcLY3gyxKa8N$u85;i+qu+u&BW8MIO@nS5yswC!%{{Zxjks41@A
ztT&4aTG!lJDldyE(_@_Jvh=8)DZEk5);0-D2+l`51blAGL6a|VK<ifC=
zrwdN2r!7^=cAmcZ61tf!nMi1XBq#Hff|kfx)PF%N9N2a@}A)AuLMm)5IJc
z4eVe#j5Z?m-m06Rf+rWT5~K^SK#%X(0u}#&PV)@+M)-(|G&0ysuT*_MYxK&g9gI
z6TKv-!OkB(PL
z2}=m`?{&Hw2TWBv`UAvf2Oyf5OiNMMQ?*D}PAjVxW0nm$E{{}wbY%1bhqDu?^5+kZv!*nz~Q(y+2@~KjKCgC{1ho`fdl1x`i6HKM2
zXIhb!d1{e+lavjA(cz)mg|SA%&Ade%HCvwI+hMimm}uG6EHspHjX{mTR5ft4fJ#iQvkf8!SELiqwx(5IGIyjNykO(fb}(@;shQY7zd
z;gMJ;cY};fQzy)AyB2lP6553VN&+EGN
z(v;N}*49^xlNOIhqEd$3iT6)RIKSAIj^9VG1;dt%S;DDDqWj-zMt*=%vMum&^sLty
zys20BYI1{Kbnr0U)n}AWFQKlwUq9&dE>Nb;8rg|PW9*l?oyF$W*zf7tgD$8u^U*-j
zID$xvK522O@6*rxO4mB=)z%UFX2-tHaq^?0K{Ty-!|*xs?D|lV3I~k>%Xu4Tuhi%Z
zTh-;jLF8GuTSUL-a(^&w2LUMBB0?h0vB4F=^y}T;uz&VCHxk5{((v%z@^;?KoEEtz
zmniRd4ymC!s?=7SXyK~_M;ECC;7?e@%g`40O*vZDEa>vMthc<;mwDgB%wtl$Y_rA7
zxYV6Q3ci^66akAa)zrGTeR3)2=z8%)TQ2dpvTm@0HA5*Dwj{p5*iBHP{ifsIY6wBAL=s6_=mbxBR=
z^|;~3R4)k(^=zu{0TdU(QV{6&zCShnSqNY8;Ix{dGerzdF95gp^{?Lvoo8n
z7+iMr7b6F*?$N_#X7<1UxaN#W{!qfGaKw78B5t(Q8oo1wi)4_mH0$&
zHgFGXQ_4{HU7shu4eqGlYJ{dT|R(%@!h_>9W
zlVP!idqfchCcqkP2dBd%jQJ%KkH4Op@jhY>=86yla^I3xmLx-96pXKE9ZT;9WWdeRn51e?S^@@E5X=$tG1{K6Wo)Q1!pps
z<&)EPV_%Y|4Vn&-VGG{O7ArN{73t^yr?LU{XQoRO_`LNVv~vASC9j2Fl-%UF2GkiLeOr)+U})ufLQ6Q
zwal#zG_0yF%IrEnEFvYL>yu#WdYD)ZRVw4(k20Y=7Rbxfq|ZQ8kMz1BqbD^VN7(#~
zn<*-n$AH!HEI~A}o5a|D_IQ;5X0`CjGnG0c6t*&;^4%3giuRni*q%8~cHpd$|F)_>
z&9IKS0Yy%Ok9onPo0!%dp588`HhzohD0gu{3LnUm5l}6_>LGxxl+&U4HOj#;@iVj<
zl)WGoqxxnnXaLR!R3F{g`OAMxs^p|M`pFx8KYZ(UkoHZa+RCGho;lV~{HFdVV+KIh
zjsSp&pMg2=+6nbFe(B2IDpis$?nm4ZT-<2<9M!LvkbQ332>5pgR2>YNT{eLZV2BfH
zK87qKg1(_+mF?3jt3G+pVX|kYi+ZZ~jt=|wEPibN2mLev-J2+E03&ZeedB*QYztws
zmNNWuiFxHCeaSiD9YXsD8>%A!Do!gybO$j0W3GPvkos%G$O62rwZCede$$BGeD@v1
zE<8haKP;dCNBmQw`d#Fb35>-H!+Ys{QQcKmAia`){hn)g3I;W#n)6`y01bw?YBn=juzq8lm6qCm^HW
zC0TyA5`ZqAPFy0e{v)aCU0|wOJAhJGt>|q2zgVrh-n-OQuLGtr-R?xbHNUmdy+rWN
zPj67%|F1TVz;8}+L*hk<_bDH&hVOqQi9-a)M{-k52WUMG1sVwZExuexH=y*ZUeU*@
z_*l8tSg^gGr>e57f=fn}{)hW)AdnK#TuG&t)vhFx$Vfw^uos-hi2j?xOV8Ec^XmfJ
zJDxK6BowQ<%HDZlSH>uJXU~Do5j_Ks&=dlrW;k>>Vro8QWhnSZfA25*$-jeDcgq1`
zj~_-pDubxUwc3kvYbT3(Ms_bm@oAo^%jOi^#`SKG>~f7-qDpdmkn;eF2`JI+{_%VD
z1qj|@ro(bY$Ig^dL!0{i=Z0_W9iYza{EtK8Dg|R+P5L0Ws%qC$G@M;R_|=)RQn9yA
z3$5=kwrg%-UOtm79#M$Mp-Onc5xO&0+^OEevwfuLRgT(mOxSDAOaVb7wdk|PoQ|+n
zr>4KgZ;t=RXnEj3s9nh;@+8;^OyM9
zdd*W=bb6zp0Ur3WfG}0+T(dCbe)6(hN-E8gsE-P$v?|3rsKy?FvEiqfqN6m4(Qbb!
zR{&&0NMGehAq30-dTol6sro_VjZ{nOZca(IKnqQ5^7c~-FNv|J#xj&JS{R7ukNr4@q|Jl2$
zQ00=m!>#Ekub6U)pDB||jX{9h{^zO*&z$Agf(kjUkffdymM~B{s~A1EVsQJZETLx0
zG(qU3)ykt72d*~Q=8v8T-giePncrNAm<9qinJHn4Lp|PbP=kA4v9d>JM~5JGd**pY
z^-t|-*8-t}#2U76T()TG3T3J$NnW;NXCZySe{?w1;#%j@otJF%3JCg4?6kZd7E1sSPT@m#z8`cN+#?-rt5#
z_3-><#Wdu8tR-TUqKztmyWQQv!$IU8NNHYS=ircn9|B^2lMd>3jn>J|#J&TUk5a)O
z7C@(7Yl3M$%=OkJ?Umh55lp4<8I{{U2TZR@}xRqPiLH@FHAWV*wskaz{~I9i}ts
z_)BeuLn9hFS)wBsWtc;97>d&7w`)xc7?f?#Xh6Jk;qsrO$i7-KcWC+^6XuCl(nf1N
zLKk3lSD_zvr5SBvJyOx&2703O@a!(|^`Gi1eGRB=#&>xHI6wf|tiH#f|4jrQ2LO^p
zyQe1g{}kf?&e8mH$^;btW6{$@{&pf|0YL740N;8o4De~!Gf5awf4QD{qW41ewuMRV
zzx2j){{Gd0bKZ!{fAc$vkouT_&sC!U<0TkH)BaE0ygcgB)GaGCgXUj7l(PZc`(K{&
zf(Nl11E4%W@%n6@lt~cje-Yx{0u!^|B9h0
zQ)IsGH*RM-5pe%GX91A1_de#<#g2!B{|hI8@qFH$a~I#>*S`j=PJK^!$#nR268sw-
z0IJh}0fd;=*C$V2k%)hN_cjlzWPx6ULF9x0%KX1CFaXx*{b@PL_g3WZ
zPk;GvfIu|-0I5eNQ2&dRfG0(PEUM@M^5M62nH9V)O|o|Qdt1VEh7qZ_IcN!IdO`)0
zm=#qvs1yw)%FmkoTI`stZEtO~5L3w&vrvbTP~!K3xt4;>
zXWjh=gcnQB
zB|HuhkEf`+QXZ)^<4mgPGd{}D7p`wMj|Bmmt_AS%T{cwf$o1N?(m_k6+n>DATc!k}
z*2BW*0AVEJXZ&s#xPC6MPB?S
zPDi0?*>h;RNBkXLktKmZiE^*Bh82qLMY028wJe8b>6{xz$DOQE5v?OW-P}$oEjkZm
z($rY@nYw75>2Htw4XCi(8pJ|3Itqe6Q8W!4vzLn1yd?XIBKc4;{B$0wi=g1M*)E(F
zAe52b143B|kGFw*qAJ|Tl!lvF+Vk0pIW>ZI5?YVBm7ZQ6ooh?td&05ucD2=^>|8TL
zmNdsqLse&evcbL+W7Z7shE5LO80MnAho*ilO)Q!R;QjisfDZ>(r$d
zoLeo%+cX<3V6k<|xaTI>z-C%qf|sy{U&j9Es+%bOI7{X6|~+
zUJIoS#;<7F<~T=O39w2n@*Ndrmc8=`s+4T;ey&`@lSS)r&)=Dve2=kj@p=O8{3
zF^VvDmofJZtOvlfpU9-<7g${cw=!}n2l6!Rk-%V9A=%C%)-OS?XrIJ}YD<^Xs2e4l
zRDmFDHDl%`7t{vjqON~^#Wj7B#ZD0|BBkP8kJ;XC60et>0*Q$EHHO^_&4juzcbTn!
zp&S=Q?j2$zG=k0{5%!#!Lj?CRsft4%(Vc3x8*!*k|0GrrjwR|#44+ng`ve&W9uK5h
z7pTjbfN#++ow8YW7gp1{hshAEE4H^i1q{$Rr%uL6)gq}8ZqjzY*ULy%Js0tA!{$qz
zSl{h612$uv
zra3bP8cBM~Cf_d?pB!D3hQ>2m8dT|(Nz>FbRWvG$q(k7bK=%htgZ6IRI%ZXs(v{Rd
zCmbEqp?z<4H!r0zlo9b$%$-y|Aem1sjcOT^)lAgYOdYSs8(GKdVtxD8!{mPc2FRt^
zWTqZ1W+CG@Rpie-`m!8&FilsO2aTvF#>M!Qhp2o^Fy9u0p0+n-OFQI9%!*MmO#UjX
zcFFj3B%hw=fdQ8@isVpgm)rcI6*dJ}KS}c4H)};ZJMPn)>UZd31YzG06QO
zFl7hT$1cq-vK@>DtM+|Id#IKIW)6k
z7#zygbg(GC0u^eUNID75&!8B^a1}x)aXDHHjCzua+D*sS!Fqmj(tUmM1n>aIfq+1a)VS@SwqLj{J@
zN48I1{*xK&?3~%FzQ;R;po5
zJp|_y&@V>O)S4MMFs*1<*FQBQG6aC(hd#s&iy9$>4$KB`ul%$-G7R6{fnwY=GdcZY1aT8lRcTS*XNp(NU5*=^2^BrEWE^NPv|!NBH01d;XBASU+XPSV
zpR)nAI8d6dF37P;wBku%K_B7Ja9ENrky5h<_vs0ahGyab{bX<0>1m4(XbvvPM3#gLM_T?JK+_15t`HmfkFE*_Qt9z(<*KWfB97FbB;DGO-)ghfnMu
zN)(E>@rtQvn5FxVuv=~F=MLkBB8W&rR(a?aEbG2|*v9!yxP{$5co}!lz41ckRD8Rd
zJ~*Lf7ws9jTpkm-RIE`43l^B;u*%*CDXpQ4p2QguLcvRt>xxSRm3&0dsG3<|LG_K4
zLx4$~$XEvEWjuD~@RAw*3@L{Ed96g$Tf(dUxt=Svd4yB?87A6E(JW!%`8ic?x5vC5
zfY-DY9kfb$*VoqP)H4UxLkTb-+lnx2;xDCTp!}Y~V!&P3Ux4!#s2j)OAoRQ^!y)Ty
zmN&0NMq6>L6EBepgc#hu#t{+~;3E2ez}L+m8~mR(O@*M5$IPwqtGqt0;=8J=#Pagzoloz;=nG-jEA
zpI~CWmMs~tGBX&?>3K|J4&@R)ylCUkrY{`&&uyeXG%t!-)0Xeg)H-IU@;P}BX_igs
zW`E;onsaRDU?%MvuAI!GQ@L(}5)l4m*E7<`?wu-66O^LZNkfcGUzsWXSTa>8q~3o2
zoZ_BNGjTna=V0iw99E*`D1##5L!J!_a@IL|XXCY>m$`)vkqyoJ95FI)QNzh&_*pm8Urcx;o(eqF@4ER!2@hdE
z2P8I~VQv^i{>T7f957A|wAGrbVfYQ!X!0;=#Ec=~sR$tBa>*Ao@j*YG8GDD5R?T9j
zAZmPE;Vtl1TX20US2j4}Mq=APSmbgM9D3fc@dLbvI6@ORoKRKA;E}???V|BlK{!nk
zq^XN1<;F>{Oi3lve+v1kbHV04wgIP9J_!geD_k`6?HXrwFJ=LM#-@IC8>C~b<>%nm
z<7r+rtp_ubPLTbx3Qp_0VrqY~jrPVw;X!G<+mbWx>Pk-Uq7KE5M{Mwf0%#AfgvB#w
z$JyCs)PjUM_m9$ywe@+Yzd+t1kKJ2~2K1f!wk$2mBBxkZeS>Cr7C%rlJPa(34Z=G!
zJnb!SyF8W6DK&;ZTCAGtzWp_KU%Jz=7l$1bF@SQ
z&08?1GyeQo)FuYiz8ndDSr;3U#;|~uF`T#X9Q>3fRlpX`;6tl=fc4M@FWNRyhBJ@~
z{|uw3V#b{1*w*yW+4M|+b^h^J$CaTo~{#a3b+zR4X+|
z5iy?Oh_L?qgJsg?g_{Ez0FUQxXR-K7mW+a6164hnA35$VW_UyBNPb$lFzHRw2@RSI^w^N5)5D;$Ty`o##@Q8HfI#wfCou@f
zb%P_d{y|1c5Ij-xh^>?MDtnon&SUOQN#ho%7AL*ynuJeSxyduLH)ava##Stpzg!f
z`oFI%f9rN9W#G34e8VAh!{a|;-qEqiJ3Oz;kF)}qe|09L{=ZD8ng!5x(g2jy?_KfE
zO8|rL;DX4Nie*Kq{r*1{|0$*8orwRsV*y-s;hi!h(RdZU)(b8CoGVsCrmiPxY0t4T
z_4NFu>fq*Pq~_2PM#7)y6;s;YQb8yu%ddJmSV1De?Nk9E$4u^`qARUq4tiZd1lbXsa5L72hzcqztq#T?9x^Q4`|D1-zC~5C7-%M{
zEjU{N7k=v;mqIOxa)x8INb*D*EqlFa@?4;2tH!t9+9V~>fb;TlGOGeyD7s;}Ms@e5s8Olu6?V(ea2d3s*WF;xkdR
zRWvixlL}oec{_~tK!6kl!4fSEoId7%K6K9*wxU>RaWat@`Ti~o1qf6KO?7!F(P!06
zssk9f{aUAF=?*C#bk#Qubv%Tur^j2wlUVSV`tm-Y$oGX?0aH!G(>Z$(`kD7xqX?TG
zjZ*u)mXcfP3Sr(wvmAaR&a-lQ$~;3iFP<-^5N`9RR2;BZA~V>&<)IQMhEs&hiPlr@
zHr%IbR-B1-qe+K4XlcoGI2+z$1`lyUNEZS_xXKI`ollvJ^viWk>;Zj1aSb-0w@^|n
z2~6=<*tm0F)g@>tAHW~^iL*Ia`bv=ixJ^QLxUaFxjn~{b7~Dqt?OtABrt5_p6n&P=
z&`qQHj91`LLj$lXY6<UqQO$?
zWwF8YZ($5@iREQ&^Df8Z6`dANTq+kU)Budrs#JX;lgJbDj$@7o>pzw7&aF5dkK^*G
z!2dtC&N`rqwp;sxbV+weNq0(0ch{!7yIV@SySux)Te?BI8wF_)@f+0VJr$C%m}vcHdn+ekuU=Y!IPI@%EHwC!y_C9KtX3h
zTgY`N5DZGgr-~}1Aoo;5yKx%J^)W5lr0_U!Be1J%5iVb$U|x_$Pyb0)S2I8d+sx>)
z+Yf=S29L1`Vs)CScm=Rx$D%-gJAqkq&sP>y-L(aS!Un^zh?SpehAvt7I)R~Cs7X_`
zNqEi$N%Npr*ntv6+pxIcCP_n!`s$md^@6+lK4Q$ATBbCYFeBLD%~f7dpSAYH!hors
zPkCh&0(TcH`Xb#mvSO|XDjT{jcOq?WAo1uX*Q?aF2bI1z*j*SB8qR8J0bLo_N13@z
zTm%swalHX#hx}W?XxhSlnM#p}j8@7RkrRl|2I!EP`|V6YswcTi<>h!1&3#k@**^!8
z&Re%=-ySlPE2^>{&IyAuA7o2a5x
z@suXV>3}u#{RvH!CV3gI@lGqr5a+w517b6;S9wkTe%h#HHjIpMy=n!Hk0+_d1k
zL(>)L;|EQzWk~K6l6H_9b^QXmNdY?EyS+lh$U@{V)TP1kT@R>Bf;`B5wy)a{Ogo4OaYg_>*pNH1%1P8
zYfAK?=@ygn55n;D42%}~jM-)4rg|L(1O3KZUv4(|wMG}D^QbGUlszXr9rXN0_Tq-2
z<%B&Bvvu(g9Y6?-*gLKVOJ9uA6u+v*E-g9?9VsU!mF+V(5>{~^ut3pdKv;z+@M4LE
zDt4L+G!7100&ChD
zeqXCK(0(y&KdDeY*ekhb5M}?Zel0jZQP5Bj1kdR6yQWPAo
zBxw&Z#%u2x182^!opeD_3Rt?lc3!k-bADbr`eLgnb;fPUMDc%4|1%zBg|;vVX>?bUjL8&tDC-R`a;$
z7GB<)S>)|+3H1AVhiPGh+(McxoGli;p0H{2I4cMU*j3A7!p&>7s0;~Ip`|Q>-0RSi
zXhc6Ery#z=qlpU*I7Gry4)}sd*U1i@aUB!|D~U>nG{^1O&jc|PQwy!-BB-qsXGm(O
zVqRMng^NAT7}X)m85r;r-kKfNBlX(zr|-JN8vBY?I5F|N^;<9p{rxpG)$mLVv2b5F
z%311sO>K4P&8N;G7AS%3VBCcP!7rvTxcXxTP(V6Xp48Jemrr|m$!L%&j9(|Eg@&et
zpWPo)s91`-STwxNw4zfQ7dB`BW%AD0+@p>VrnEU7&Opd(@vqqFg%<|A~pDhQI`grV4@fF#X9m#n2;=yXNN~yBJCigQO6iA3bi@TSF^>0fl
z;W+7>xO>W6`z-E0E02N-HG02ra|wu3&WC}>W6z>%d1R`4MIopGclOFCv|}p9<8I
z`zM$UVqu^#20fnOvToY?ybre{#V{6;jaARI-?zNs8#o;k+ASf06v6tWA4
z<(0(27IpLnBf5Te`vi4(p4-ByH}9U|D}u0DDgJ)Z{I#)k6nK_aN{M>)Ds;k?bT1Bd
zB6D60VOWlY$z=blIXi4F%5#5vBoR{%CR%=ZG~P<~dmAw+jPElQ{Rj8P4T>0F>yUbs
z`OynTO6B|{jq?)WhKzT8kf4uqIg*^+e`-SzaQqtA1%=*l1@h(Q;9AL(IK4&KgVBVE
zcvCJoIR}}Hh4LW?$%*rI6`3IXv$t7&ETeSJrgtOf;_fNWsIGNyxp?e;`;>XZF9UW2
z6=Lk_b+19eLEFL#854m>MdcEO&K?~-K^R=WWw!`mtGx2I=G%3NYoAH5$hj4{y4Rac
zu|0qtaPp;|#pX5^G<$56Q0QEIl`?4!+IPw3wKdKGPQ*YDni>zyuEa^!Jndeb
zWWc{5D#ML_3+x-_%v}4TUKAUpjbS}&cvm6^SQo$HZh_fwFXs6;jJ1d
zNV*aNl);N0XrJz=DKi|-qI6k)2KVz=vGG$L=b`U+d3_5YBv+Pk7H(3fv7sLLw#bAQ
z@}cx$!7brux7vyEO$^K^Zt}3uy;E>IYF`3Ft%}*wbFeE8X6G!R~;C@p}1FGjO(kUTNENk^PSwLhOQ
z@y){zrB|9Il{OH{jHyCYK1cDFc3B9P=UKj^>xU2D`VhN#XpQmdDmar->0??^;TfNq
zS*wkCca2zojYK^$py&mnxCTgDZD?ur+V(^Akbz3oYatV*Z);aT+L
zD=axd$maZmy>RhVnWO~QMOkQrMDw#iqqzGx0MgXEK>=NP%rmTeRllJ)Yihcc7H2!e
zle3J+@TV*!&x2I#0YqMfve{YI?YWFRkpg7WSV?s;x#)#&1qKVaDRHRuEpMf8?rGyu
zS=K%^x>9RsYHa;bhfvlw{EFRED-_m8s>-!_4@~ei#B|u`!i#8jXZfO_tzDc
z;0GGK$jN%dPlQRI>5xRVW9U9fg$p|!9Z;s|=b2DV;OxOzsOC03dhL1cXdZ;&qC874ZkUoPe$r2nK*
zgy|iUZ_KE^^jAD5^x=RNf&Aj!*MW69HAhd&CGb0rmy6yUtU!b>G(+U)pQ
z1~7@7ejR!Fd{RiJh~R>0`!MlEgD!jx6J<+Vetrk!Oo-FgAYC~}#k+`fR2g%K;`ZgW
z=_?$%r&q-yPvKjjqa0!Qp;q~{GyO)sq-+%O>*FK0)*KG?6o?yfl`;@unp`O43Y;@L
z0y@4y12tL8%W?)=GudE{#H{l+og_FGrXfS%mgx?*imzpR`>OHUTV
zlm}WM%2gh_WX4;u-9GlZMwq=?HS~**36)#Amy%L06m!nYGBhR+=AxXKQM_z#p3e7}
zvPtqKnd@
z^t}vTmEe;~Mekk2IxoHr9}%=Ed7JRs4U$E+raMIEr9w}=u()4&=LBC_)j>Cd&kC3Q
zUW2{vZ9k0s}Ts(2KW5Mon(G
zTS550%Ka}Wr2j5Ez~xd`1qLhJ|SaiLs7*r5h&-qV<8r!x)$l|rQQwR
zGHch7uAn~%^vrpdPgJR}4XWQ0HOv~8cNyPXfkExs0JqG6S$j#P+WCwIkC?_y}XSgan<``cRVn)6S$)hoN1YpC_
zzQgLX7`hO9f4yj(0e%n_|Q=3SE!AqoH6@9E?mNKmW;?|~Ks
zr7^B`vh0At%iY1&Qm@XIdDM7<4nI`#Pc
zc2goL3s%4Rv9t%lFl|r*ImimBW%V_sjOLWlJIu@L9HEM$U2?CrRYmw{*Og9W%KbA)W^!RhdGBL>5bpoKzYzQN;}?xz
z6d5U*jnv7Lt|c^^SS#fDy)R|;S7{p(923S5h~(`KEc&!X-?*HLXp_&8VBq@>Cx)y?
zc;_YOiHRB+hd9M7)lZt%SYF$F9CY2Lyb+{$cv=D4MoKv2n@{pT=yWt`U?bZN#c-hS
zH#KHa?M}r3eHBQlR!<*pp`5DZ$D0q&il!AjJL;EI~)ahiAM>
zDU`z4$snj*7n_hEIEz}a-KM^ncg6e-VzLeM0|viZa2OMMa5{`NEpqv(M=dMYMKab7
z@U-MZtsFBkzAP~k;y=N1o=ZbQI4*_t)W4#JF&k;cq(+`ix`4nVW(?{ZbnQGR+W|Hx
zdOSE%PZYd9c5@L)SY9WBi?RHP1AHa
zn66r@(`jzMbE9wlbT5h!Q8q5rIf>KEHc)cAOvX@;GGXvW;*ili$a?lToMIj;o}oTXe=O~{VrCx!M)U1AWO^~
zv{Cg2F3-Y(zG?36)Z?25dYgB=KP?+PFEyY|=+lwc|8UsqFCoV4RL+QTp{Hb@Ko$U+??#(i*=p-rj9oi|W0_N7ti_7!H8+zn1
zi`TVj-kM%8knWSR2t8)??JWyKSU-O|0C`lvPvxTdR5Cj3HHzks8%Gp_e1hA9(cCNs
zIK`?<7fwFzO}TJsRL)r@DC(a~S)Q}HtjI?x_JRmEs;BA(EN&z7F!2O?vm9+K%S*9-
zl`osVcd4l?MD1}9-!ex95=|$b`|m~-da#YH=Nj7Dj+=Chp5^5N)p0O!pOVN<@!CJl
z7tOlWRugFC%`R!5wmSn>U)9Y*j#=jglno
z%opDPuIjrws;{34F}sj6F}vXBmm9>RF0H{vt7W<`!0l?&=%?1eQwe}uHZ!@K+tTFx
zHVY)WR2`igry(Gei}(C5asb!J
zK=v7x>N1MmU(GeB4fg?*ERG=9xu}|%tg`Wn>_=HV+U!Z&3T_!*6$-vM3t@fG8t{2FdKR9v4tu14i{MfajCg7zomjDl
zlYF8-ly@F-dbCY(|5Gj*#VyhT6XQ@w}gF1@}&JRE_pA#1%>4h;LsnNrWf_t8{8aZqApmPQt9By;E|
zj^=J|uSdD(;%MJ%jj~y+&ve6A&F_*LxQOH>#;iH_A#zcn-U
zwilJuS79g&NruCT+Y4)xY%IRf`zV__QwW%=q|q2sQ#iS_t8=+VnVX6Taqp=_LU$M7
z$yAj4~RK#+#y0hM+(HGX1O&?;t1B*w>mvItTi*`Xv0Xj}eQA)$*H2P#&6
zf%{E(<~WzsE=B@_MKFU!n2whfvL!KI9#$6B--65g44XVqW1PKv9$>jl%PW*joHwk$
zKhFhi;i0jMhJ~t>mTZ&>I#6>bGUCr2_{bm+mnG05%p?G{5c*D3SpP0vh68c?qDp;B4h;D#(`woKG-w
zYR;uD8a~)X9S|{tM0pDWau{u1f}mgH%jk(G9RmuFDr_!mq~HlHU0eOc<+7s?GnKZu
z#`&d&l?)`!;@VQ6sxLfgPEgbOe)6=#x>1@5wbU((7kDu%C;*u~Qh7h09{X)e-oi+&
z<6PidJT~gu3HdozJy$NMG>a2D1alU}*ecX$+{t;@mEMlk<6A{Ve3+_E$e0-Z^C
ziM5WVgHB_>_!&I$UNg*DJ%X;&g#kK@jDgoEEvNWlwMjLV={
z>2Y5-akUy=Ha5D~vbz2MsP>Wj|55Fo{HI>Wzyn)we~VhKjN1)Nq2ZEnqkmNarhRJ{
zh*gVd4u~})Qic}|$UcNHg&Al!sRpFlS)12MyS?*3@cLk4cuGkg8;+AZf5sVEVNvUeZQhU~-4mMuDHlp2MVHCAi0zSET~U;lMu@5SY2!M=<--;G?8qNwxCm6;
zFVuU7m&ly
zx{|GtPHE;?{rgi4gwMg<#}5<8!NhqVKYu}v_+&<@+|b
zLIo0z#9d&)Wm|KpCSsnRlz`pKIxP62jV9M`kps&M3I^w)BY1DG9egR%tmRt`J=WOg
z1q35oBf8d$1u+X~A*6jtIx`<3DjFM?VSb)rq>TUYdm
z=;+&Qxn#+~u2t>BzThe2B!eC$QJBqR$5@b_FmowuSIGnW04v|e-LQA6)NwI&WN2Bv
zRds~L)AW$XilvWEca)_ZfRYZ5Z4l(uCrG|2ng%Zv;k@!;`(d;f{?xt}zoUp3PL3eRD^i7T;S01x0h-gE#XT(_YPL7WMc|J>~NW5-waJ0}6XDlH1GL8q+l)wFSq
z07i9XX-mOD(@R%icX=((Kqjsuy*=Tko}r$ObfcbpAR|T{+96`x`^&8;pMaUcwqD0=
z-cyGb^&t+j!>81*-nq&x-XSGK?Cfqz#JF(F=h^8^>RLeiqfTlODz^t0cO@atc+6j+
z-c2*tC44{}D(t*rhewJ29*QKB1Dpz5t*dvkZL1+zN%?+~_4obHTk6xMeBuMg#}+^`
zb({K0K2k`!m*~fUyhSVmg%=_4zZ8BRpT*?r;NOjIJr=($d8aoQBxqGzg2mHsCkGst
zTt)G}Dq*7w6>+c2MlbC;DH$AV)q&*cY8c(SG$w5q7adL!Jj#q8~CXul#StoLbiR~hHC<@cI2^tAu3XW_JgO;mY#&V(#vr&Y89xzh9c)7f#!v*EQzr~F2n9B5@g6ohFDC638R0jG
z!^x_}dGhUKe&Hsy&;<1L1$>fiBVK?ZOD&Ad16C*sX$
za?NXYl}yvt$cc-I#@EQInxj(JjP|Ju3=Lv4c3>zFE}SdJ(F}IBFc~$mX9V2A5|~u8
z@Uw;wZyYZbCOPRa;m22gTX+4XAw>oiX5B^>2!4HCcttLXe
z^63L>@@&Gq`!jGzOD4_YqOZKFa#l&IbkdH~^5#`!w!UP&dr8T0<%vckDXImhUQrK`
z!Jap$yKtR+nk{!IUU9xZ?F!%Xc{vL-#i=Edy|U?B;l`+>qeG565Q7v@KdVrTmsRyM
zPt{0AvJ4sEf%Q{NOnNMaGmo8Sgr8H8A{RZQT}7KOOmM!w27Q&KR;^SJWAU0&1cc~R
zMf$aV##P)YI(E4ubY^5duLg~G5XAhN7Pr1hR4Q<@U-8Ea{H<<$Rj7{ypjOXHcGwo@
z;d9<#A(bH)xtSig8h|bxC;st6XRhf(P|!3nBju^UW30MY;o7{ioPpk3s^9a
z=g~xK5t_L->ENB#Ijvw#!@iFTWHH_j1h;x@dOQ47ER%ECsE!-k7kLS9RK!bmxZD
zsR!=c*^JK4?vfOqHzby02S0of=ojv=b@uA`;l-{HUH{P$@E;4^+R@im(^Av;n#dq*W=0((r
zj@~q{R=&DiRYUYI;{X|Zjpu*$sGT5Lvxh(aM?UV_ex^xtL-vgp9bX!%>u8}H@(2el
z!DxTcPX6Zio+B&UwPDUkANEOeqD|M^y@~4X!_Qpma?mEiw@tJ0E9}vzR6Vcp185>f
zMU~QN^}llIvcLh&unfq9$K9xA-G;x?4%=efh5kAR&Y0SW0(?7W&zmj|ixVtLEfplw
zAIZJjoPM*jSoc!~%yG3qJjE0l^Kzfos5LkuGR9e)0!shzuXqxfhcaHU@9?sLe%*{xEU@@iDV)&wLZ^uCu
zUpe!xD;a{W52N5u4dGt&6Fm{w3L##&j?NeHvUIg+V9;7(oR*e*&I0}(Eir56u}_$J
zxs9$KqUyNM=@XtXZ8kPQ4k@p&QnSeI9Z~?WF->l2CMFgnDesY_j02%Ae}I5iHXc{J
zlrM3bmN$Z>V7uR6I4wxCic->7CYqcSwDpM>4=*C)4+=xh-)bh6jV99!@AL*vF9%}+
zXz{clN8Xy4wb2-_Mc(U3UjKl;&Svt3{NlQRYWh~|OGc-0X(@T8V|Muq+(cdfoid~n
zpneh)jrKNj2A6U&U2#Huc=j?lCFUcc#7}njdQ}wO5v9|gyR#xue0{a{yRuie0
zK}W*W`0iZ_`p`EBjLKUOU*b|uiT1YMw0tFE@#|zzCh??y74FSdB4&~wd>>b4Hd}vf>JU1m$3E#Qh{o{;?gGMl@dj(-c?anHLr?*{t3t0dy_;SX
zMa_*bxc`>$J}?6En;(zA8tQ9D0GGfM4Y8;x-Rr9QDU8pJjYk<)`VR))Z=V9t1s_uL
zME)h+L7?{R4!(d+{xv!6kNa91NPe^FbsM4}7XQs**?nyJaq|B)tMG+mkQwFSROtC?_1}K>
zN5IeSrT`B`_ojwqfa4?qOs0V#98AhhYF
zqV_*H7!cu$>pYlLfTk5AX3n*+ac!j@q_P2nkO9Q5Yj#u(8kcHW!@j{6I_pi$nUp)rC
z{3VFyh5j}R^wdOZ3pEdI+m}~YwrUi|(YCJ?7^^!kozxjw7OM5g#nyGSwrWxN3;k_P
z1Gd5d-hl@Vqz@c^mWCQ0olt75HsHq{J^<>wsi7UC%e;@1oCWr*ilelpXVg?=VKPc1
z3K3Ot8y`6SbB2D!5_*9*lxUR+;l&;&Bq0B_l{r7QvG?{hp75yDRe^zndwQ`nOlfOm4YhkhrwDK^Dfx}K@g}G&L|)0J*dhW7apJVTo8oNDS}H^eb?!?augD`>2moF
z%z^~2qBukiCPu3TRUedTyr?lHK0h(|s537bGB0wU+31pM+u2bx%Q4B&IwzxK@7qXWHnyoz>4!o{J=pYgRzoT)y
zk(zQrz-Yk-&!j(4VEMB@P++Glr}>)V0`oqW-}XabYN09x1{WyIfbyM&+Nsv
zZ5}CQ{}&Ty$w7`?>Hg1S@%tI+sY89aQt!DfqEcJWhQ56}XuIUWV3>LPAyqQp#40E)
z39P+;W7Xg&HLh$9gO3;HA!m1I!5vOb%-XwrVY|i!J3>`5wkj+#M8$G`XZ6O|GYU_W
zn_E-U^J||IjV{%76am}PlIu!_Ylq2XYIo^Ngu
zSJZhhlt(2J!ywQ{0HsuoxWW=E3PGwS
z%7XrRrsMjEN(qtb0Nacy<;*~(O;Hja>d8|9SS^YO{VRyUNQ-G)&{mUJX2>Pz`}VDC
zNHq37^d3}v|9NzPhno_t`_gT(nx;YwV7y#sQH=uz*;;_$Jaj6vKfvZ#uKUBmw}W<{icS*d_Qe#Kk=gEte;6XJJgDr&77FQ!En{aV@k
z4tveWAVTUNlAfNsRN86hldpTj;D3*x^-~dfl?l8w0V51DYk&P_bqnFX(81cRtdRQ0
zXP}$($cdcRC}8$dI`!`C6Qs+pB*06xw}7u3;GbwQ>ex}8?4!XjHrQN_Xv06{FA3xbS
zy;`QFz~0@IpVy+(1@HK_!_yeRAy42myc*H=8L_SIUhc
ziF$urSj-wjefbXHmoBuS$yI%Xw1;~cKPXt5|^I!4npZJ8CSk_VID@9}uI
zF8*>P0n55`u`EwiNY0{{QTj~gV2q1B8aKE0Y0#)}z7PX)#35BWS5it<(rj`cVxf3^
za)TNzR2cpm2jBy1q!lLgga14pwK`5H9R4`P2I-(y6cbQYhylpX&e*BP_RE=X)i7pA
zfY}4rg=wJdpn{}ZJ55b)(ZS`!#5RE2`A!X>YdViGr|n?4H&Dw}#`um|9qD1Q6DwMb
zIh(_U#3l{+H<)PCZwL7WiZK0o@p_^3n7zcn_7@;+9eTZr>*Ef_C8usKz%eKPp?Rhg
z=6I)?c*ew;=Zv_{@wl2kX}LQnV7ZAtxU31Z@to$)13l%oPP*NSWl8ZizL2K@DAEfaVjh|ToRmF|MP;hy0NRGVCdZ>
zw@+txcl$%04tYK(io_+7-M)A57H|DG+gh
zK)CrQ#n5Yfi8d#VxNvAc?_}N`U=ndGuVwUvI3i$=0)7D4*3v;JywA!nY5IZ>Db?Xi
zIet=Ry$=iM#w<~4IiDS)ptu|X`aj8H9i0eA_ht4MH3C=Z16A*NEz22pI={qexOL3+
z9tKp=N$n#JTaVGbckEznoYsS0>R8-OU3GZ~gc>uK5S
zZ)N2HN5MBOx)&RS^nSR&kD4|c9_0+0dpv-Ji11lo+(i>{u;VpgZ^)tY?wy2ZZ^3{w
za{5X;)neWL7TXRdV*6hhDnAvetwEJj9Ik(Urne8H_cBkKhx;YtB(Svl&y3EO#ji&U
zqV(7dsWbi&N*ZpZh`}du!M~CK1^ImIeyR7Sz
zF4NzJ*XDY$LV
z@FmZldSP3M>CvHvK7ZYO{u(dG=YFx(ORB6XC&KibEZCz#{7TKCC97XWn-;SgBYQ@U
z?4knz5I(~2!h1NGh~5)P5anWG{sjoCNWH2$K|w(&U+fgY@23T_HXyNlgz1!+WA)1Z
z_z4d5cLs|p2Uts4f1|haj}U-Bc=!UbI_!>Ka`<$_{hyZtFcus5-={k!(U8A+CNSZM
zkq8zTFI+uwqK2gc3ly~aLr3g`C=oBMyyci(
zbYry@i~HZ%q6lRJqPI{>2N5{gAL8C3vR_^vNvSgK5hF12x(A&{2MuC+d|8}hMXzcv9-fT^5~gW;>k1Lz=UL=%cBt!A||5@vKT}^sy_gPOqJxzwz#Dhd1YId(#d954z<&I^ze%B3^vezr#R`UpxL!*Z9Yu8
zJ|Aa4!?GS_e}BX2+stSSr(V9|?DVY^tp`dK%TJ|H%v_}RA!yWTNVS`v-}n>|?enwr
zH1>u8wt!MWSxLKM?&bCT*~l``AFD8iK7v$*d`SRUnd)iC3u}h@j5516)<5|`P?Nor
z{=Ivx@CFCwW!vlPZ;KImU#mQW8K7@G?@{op|A^IxK(Qg*`$r1^M#+)Xq+L_eCDQEA
zm(-Sx&RaTq48{RA=7p;kb9E2%z?B83nxQJ%%Fpk*t^8*J%6fTj5nIh|5iMFnV+;q)gk2yMvU;IWCjkEqsyMoyA<|3|p0YntP1?ySZ?h^Qpg1cw5F`c4E?-duSp3GQ8&}
zGc%>kKJA9^;PBSpJ8fsVSFDucv*%s_$+qU3{#)N-p;wiBIDQKSVpKK_{nOhL-CuaQ
z_*S%0tSygI^{B#P1
zxQxqmOr>#GXuowUxoH$u5heg_*8SYPi(j-eFptU{ebh)ewU(r5d8G*WB6rF$v6{*2
z$Bhp0T!FyreqyzGE7|B4ZSL75&e)1fZP)bjWN`X|uodf#1IX=C62M5!@Vk
zoZ3fV{j0^lJRFEI3kyU?i2s&JK7fCEb)p~$mi=7DWGx&Lczl9be4!XePj3Pf*N*vrUGWPAF6sL4V!mu$0DL85*J1kq#Q1)&
z7aKvv+n4`bGQ)r$azo|E^Nk@zP+_ag_s
zc$<+j2>G{!APiB%{3l3%0`x0ZRxIg%`SSoSGdn~Q0HMXJuEqHCF@Fzo82A$83n`e8
zfFCd7f&IBV7acF3*l|rZ^e-{VuTes;@pPt90;BW4Vmyc%_$pH5f&0Jj$`l9KU%a#8
zi&YTa-Rj@>Tih2J1nH3wHS*q}G5^4J$P%ESHad%Q4-_g2uy_DQ2x3{GLTqRgc6YjPm{tt
z8?dNW&fi`2d38Hh{IxuGZzJZhpDmET@2Ler9x#FZS+hxrJ)@^Hk)fEkH
z=T)5_uQtXFp*UG)opz+>KJTjfjH_Z*56QSJWR-kvo0^~zwOxy?9+L|}kVHzwRyJ7g
z%;nhc{$bN~6o6)HzEyiSJCTy-xL+!js3Zr!v>VZI-1~@Ty83OB2&R{QmPvxkTh^aU
zJ^(5Sh9d)f3}$#cEnNIF6}5O6+>)=nyUWzNHMeIUBPVbA1p-o^`*Vnl*g%K6asqCD
zj79XEZUG@*cFHT<-z$Umn9XHWmWwDjPMP<{
zig@REH!Xi>JW=E;XV_JO1BZm@oH6F+{d!P7);39Z|8TeJF)i1SSr*s(fF7MJhAKHE
z1)s0f$LzBgS!_WJqnX{2|M3Y}uRb67yTi}>u8syrAu@J)tj-?-qa3?`o|=1jA8@#!
zcbYP1UA4c?f*=i!y
zB7F=?zlTEtbDRx<{or38lS4Y#9-Px25f7)I5_W5QL~9*a
z*G2f9^yZFn&vnNqu@OtUMU)IVF8Vw`dO(al)x%7ppFQLBty#(Ee9bfb)MpkYIl2dY7e8`V7*&OUZcj_
z@gTOmXbc*)dq@{Y*&gR`{2A8WBOVDV=-ol%15$wXKlrm{G>|*5htItSQS!sLeRwe^
zoO;Vzr#;c8uQlx4=b4!7O4Y&9RVmWm0()=CjL)lxRx@;KNSO^hXTT7>e`a<>Bn+Tu
z+S?egGUtA1He3OQ40E5`ac62I;B?F6uYNqA)sYdEeAG03d}m5gdK8(u|HCC>!J?i8
z6%80W?DOQCN2U-_dqzgD0;l(wfIoO_CR6Rk9xX$kLyMPNJu6QhK|h4xVWD#o%`p{cZEg0Z*18+
z^A`x%H!LCz%AV`!o@b8qjXEAdl%icd3~DsX}d4|*Plj$enIl&P?XeeoRGK4G5qb(;xLzo=d?jm-iC(S6dSXIDZKhc=SZ?)?6z^!
zC`=ORC7$y|1t~CG)cW;D8(aKOr^WR}eT0m0p9((rvaC4Ce7&|kf%lhFED?dx4Q9H|
zMDvbqeNgbGXvF1YkVx(g(P2c-_4k2?hl8`b5?$&j?_J>MSkxtYsHt{ob~z8s%@%|V
zdmJ3@*Cv$~JTM_?=Aa-Kk#+g`Gl)Zob3A#7;7@$!Q39u_7Arm8mh!y1#eMuj@aUMl
ziDAO`{(SFkEb-c&-V#hnShbJ!T+}DZk6ryvxvW(0sNWsjRNHRQw()cFyw@EP7AF7d
zMrzXj>_2k-4De_b6kv7Rme#%b^*7rOk@m>s=jI$Oq|oD$2O|+S5XJL447I-sm)gz6
z&n1v4Uau4td`jVnyVYh-r6-z*2n}1lPFylXCFvyXCqm`jJ*VRmRjS(Lbj$siiycV9
zLaH60b>erPwU&${Br`Sc{>nX<9KuFyzCNKkeWPj!T)`_kJ;3iy+!6A8+kntP=k}f&6Asf$O^90SAnm?hhd+!uX%Z74WoQZD-RnfLo+X$tU9m(I
z^3(1yo%kl$zoSX?X_3n2F)JNaWHRt>F19M&O3!Z=`G6lMijJGxj#!>3PIa
z%&Al%BbkU3c!`>g6ctP`rNhZ~*?Roanu#hYnPAHP!8k6S9vPGenxsQ`Kgs>o}mollFWVgFn#H~XNK?22?`L$v$_ZvA`W%jb(P
zay}?~vPT@WKenOimV)1z9({(SpbABVc@r&gC&(|;9yphl=JKP>s#hN2|Izi~l}`$2cwhC$XCkT&h+P)uIc=
z55igk9&C8HQpsZP%f|-8cv7T}HA(GRyJU#9641m+G2r?}Ugtb)k!v$rW;-h8(WOYG
zxHP#11q{`K@WD2v#E_Ievhq^6_2+vglV1>RY$f|sql8`3j{YYc6kmTlP?_Q9vU`Go)~`zFd$lw(~*GgGPd@DSdg
z66a<+wbY>6NXcJbEf_qTF9w5dY3cgGA+4pQv4TpOkm=$Oc8+g%U(Sv2@xG_bv8n8T
zMl`@?*s`H{jhow5LS*RXk&p+|&zbKUCd{XX;v&{iEQb-x+4p91>8qtQYR|Fojn?~$
zB>fGKP#sm7XgJW8NoP!nQri)*j}_nGKD_D^&av?_zS>#otflE`GQ9F29*x
zM_&Fu0XOIU*?fn}!Swe`5j;Fy6nVF@Xr)9(EF@o4u1~gV>YV@niI$0*m6@8Yk+bcP
zTCD#iJGVufQgZn3_@ug53=AnDuY|*)Wdj37YP53k8qYg-yJF^K9c^7qF;qBz{wlF?=7hus8j*^tnDSuJaO_^r=slj3C^SieAV$&w|52
zOPC~?-OF|Z-VoE={^PzT-c6Jn!GhvYOX3k{=>t-&hE=V{<;a3ZV2G5+;X4EH2
zJ^bXXpq{d%PmR(Jf%z=20t)+=-&|0(RqIlXoYA%;Jdd@TOClv;E
zlrT*wpb%R!KPca8QW$-(TND;>-`?J%aDBo*dgK*|MxhN*O!-Cg5*!8*kbQg3m!8S(
zAJsEMqI_UE3~G%j2Z!gH<)_nA);^RlMf4gH+MpClrn)8=rPZ(R0eG
zNWmJ^#85gMkNMkuQAlfB&aK!`Bg1mk`+$xusA$#
z?$q%&F5<@J0E@`I*vvSS%pI6=Bb#0KtM5_O-lC;@FT@r#TI0lwp<+Jmge}}p
z`oa#-M=r&vLzT`(5F`h55ExPBW^G}Y
zK-6G?;`^KV1x|iOL)z^-y}$g^*XpEsd8&`lzt{?K3vOaG=OequyZF)Rslj@0!Dd7p
z)sMkz2Cqe-MnUB{DEcY$!b;Y>Qbg@HyD{o}y3|$I%psOnui$QYaF%9_6a|OE{Sk|D
z&ujEjVpT}?${X>>C|H*4E7P4Ic1|E9iaIDe
z%@FfasWmpzYc^uY=TmKLtrMehzI)tG0>%6ckB_)j#m0T1=DLFWFQ#VJ?7js})KNUK
zgh?oeB?xH=){JyT*bJ%+iloNTvl3g9sU>&zt#Q{T
zAQWl8%puct@5)&u3zQ#U*kBaSd|l4z>EZA25!{^7OXXCA~Gg7SD_LtneeT2I10i1#KFhEoByVRFg1v>886f#L}2-|dlf-y
z{9V$sC%{LsB}dZhIKwq4*DKNuLfj$&3=7c2N-Zjr-)CojLl?lYl1)Kr??4`9CfhxV
zCpR06uQ-1x3_iN$%k5fr9_u5g+etDB!NC=CMw9hm4z7()H01KolL7*h!o#ojno5=M
zue~>lsRh(ZmM%B`KS+Y7B-N;{=C>)6GHipTZs^8kA*}rd_{tyVd`(n>68;I|Jn?7JEd*&7Y>fz7t_oQ|nROAnJPfk9Z
zO%mZqNeJc9ea>VDuRU&$9j>U|yz%t#jd2&V^^Yxql-IFENPEM9>z1Hzv~=g@YCq^b
z-370W1-^9+f5)|@b+fX03|ryWF`#k{wArRx!>`mjFFu4MMO?G;@%|#w?`S&K%Ow1C
z?3c$4bxV(1DU7|F<(sYi!Uf(16h0E8RM?Lab9)9?+#>@nIZ?)pyoV%=4Soed7g8csCfvx(#inqY|?%tD{w}8DJ
z(1OF={NVAs2NO33-FT9!6pLrJ#5nzDt_Ivx&3yk}h3Dg#-zsK}J?z4@=(9
z_rM#q58nS-Ea?l%7oTvHPFWXA%!t)};_%kGTlBv46O&hO^9s~w4f9RCf3B2ZK5*Th
z@-Sj^-W_C=^yut;>RNp}F}kGqBY5Ap!^tV!Mk!~9+%=C!$ImXTeRFIT(^B2Rm@94F;si$=kPCN_jN<6J|&Jt+&6py`eqoS*TtTf)f{Y=rBs5K
zOQwY8KR-14*7iM*dF7he-CGiV9~y6S=4>Pe>*_tE`U9S7R*dE2ugDT};e-WxX$X
ztBbC}c2$l`@1(|!Y%}@kw?GBjlM;ZU7+E>F=Dr+dO^$&C*pXSY3`hvR*^yU<(n|$BqT3mZCX1?&5p{_1v_2<_8z|`y|@GSZ@=9?fdI+~_sP>|Jn`>LkE
zhDB&_vmmqjFSKdMkWo?EL~KGqHt&OL$ezo`)oMhHj1CE(2Zs}jFAyc#efD_vtNm~N
zP|D4pC&g%8GiAw<91_>`et!Fs+^G?PY%#T57xQ-eYRp
zgNOz?UwBG-`NPYR->;$$54HZt7d2a&&Z`Cgc6aBNFcZ4W(l%*hzmjBP_)tN!1E+fQ
zDX18n`)L{SCpFEi&_Fnvg3^qkRhr|t_x@qLthP3I?)bijhK9O2na1m{=ZH4koSZg;
zD5UjuhRa@HEoekj__9v;AMl)~N&2U-d`{Hku|V`bdd9y1i9J40TEwp_Q2a^LG_=XeOeXVHQu1{d@DVqxYp_Ic*d`Y
zg2pY{+c7E~U$jM2hgD1-H3*un4n>$ggz=wWRh#Z9z2wR;XR6QEOn+B6_1ZhKp8>JW
z5B?q>Dw)_(Vrxps$jD?4mvTxsDJXL`b~(Y@hNBzI4T|7dK%7Vd8t_USk4KBeaa;GzxcDNYcD0#Em5#i-XTeo+!oj?bC21NG
zx_y7cEi(+EGc|gj?sHpkh+7v$q&?Jsknr;wS*Q277Tq<%Py)8{`wQ+*sj3zq6T1?y
zCg;mu5B=$FZgqHTlZHR>mOVCM{ELXFY{F^?)15S8I`JFr^z2CK5FY{?d1wA<8
zRsz+Z7yGF1TpGMfNZO6FG}+hvu?itFzC#MI
zGW*ciueyHT>$t-j!{Ves_vA4TDiKyHI%iL19_egDgp4X6Z8IPxhx)nf{yxq`UuF9=
zcanpi(OmgU5wh?>598@;c~9<_M0-Z<+R~ykc9Jl=P@^gKIWn=a*(T?(RvaHIuk|I(^__
z*9P77m8GPOizJ9$Ng6qah8(7ITZ%4yZ{cHNHA`kYTv?7FC@5$#nHB0u%gU>rQG`)8J8swal)=ju`k-`HDH5l4ecvr8GvFMm1X
zei@$hs(+K)_WjzzJ4S&H&2h6bXIcx9HMR5T2V#qr$x(FX9I>7-&|mVBUaYj?OXbTd
zGG}iC;SLQJC}GRjNxPB>!pPXp!LPFwW;IDiQzm~-vf|C>h#D_i#OfO8lC72m1l}$m
zGg|%IX;)LxC{v<#g;WEV-rJlm`s&VIL_eb(iV;qKqQSX(k!54(P62LE5mTo%tJwdso+WM?Uda*<1m
zx=3Qmr2(k~MIPA3M8cLY4h=xQ6TkaEX<}mVQ0xty))orzYVtF*TTgX$wOwJ&wR1%q
zqWcgd8g&rYmaPzHDx5@gW#ZVk3z<;mw1&9pnvJ=+c};N^
z6V^*$A1o|_MW+uc^yXw>juwr5cOE1+KD5Ncp1Ty&rON^q#e?Z9MMPnb>{4*%{f3ok>%h(!0mxJun*N^qD;PQedkfy_H
zW&K_*rYkjY1G(s|qN(nE2R+S2;amODBYae26qm7?N9G1`rZ{K#N)2PbaD&i`DZ-o)
z+gs_9j+-lHhF?t+Q(XPEdZpwPagFYPBdg!cs!hE@$z%&1$(NlZ$GT}ClG3%D_Ts4l
zk%gR^dblzTh0|Zb!9)yMM;}9#MQ-Z`ism%^ztu)YCap%w6qgvIa_(!+eXoemxe!gd
zONh3xm%jN1RkhmPgxU_B{#)ih$_1XjLP1zle+>^`P4zjST~ws@BTiKKEo2}I36DWu
z6W;mpi;OVEl4n{`-;lDG0SMEZrWTUoa^{99_uHmpm3Avt$!Ndg^)#2;8sn(NJsIEX
zS?xRm(UAw<*_Er(%6&Sj!G#eA{tt5`JPUWfy;@}x>FED6dpT(*e6pI&I3a`b;I`6`
z4-?#eY=kDHd6_Uk@hqVpkrb0pW0CwWtCW@6DV#JmTmJgF{(cd^&wFqklB6+Tyrih{
zn3`90i)Ib#E~XX1H2v#0XP&F{(5d>I<3wzmpk95kbFXQlc}f*6NQPDEG9E1Aks>^Hw!c(|Fp?qECR
zKC{xEx?VVsFysmx-EJMk*UP%-*zDtJ{ES_*Su*tRW(=F2L`X=d@`Cw1YZYB8+r1+5
zxWB1U@mms#Iyb-*QiFYcPj4AYyLVekz9C)_X62Y|GK>8^p?TgRmphTI#%t$COo#a&
zy=14M>n90&BJ-QD&jHWg{}AmcO(xDO;=Gw+qDy%sWGLl+p+A#ww#K(M&e0MsXe<@`
zr{m`20UBR3`YD_(oERP3lyLwl%M(WKqPSu
zE@;U0`vUX5n6aCj0t{U^A0heKR|)@+PDmV$Z_GO7G<-BChv_CA-;G{3>SLdp`HpzR2)I9kd{`+yuGM$)=~7K*8?EcU
z9>M30-|4a;rc$?^?uJYzIybu{X+#H*V_t0B#<;rPy?wX6DtKwtB(&