diff --git a/docs/assets/spotlight_demo_screenshot.png b/docs/assets/spotlight_demo_screenshot.png new file mode 100644 index 000000000..0c60e3ecf Binary files /dev/null and b/docs/assets/spotlight_demo_screenshot.png differ diff --git a/docs/assets/spotlight_demo_screenshot1.png b/docs/assets/spotlight_demo_screenshot1.png new file mode 100644 index 000000000..1c2442260 Binary files /dev/null and b/docs/assets/spotlight_demo_screenshot1.png differ diff --git a/docs/assets/spotlight_demo_screenshot2.png b/docs/assets/spotlight_demo_screenshot2.png new file mode 100644 index 000000000..1dac104a0 Binary files /dev/null and b/docs/assets/spotlight_demo_screenshot2.png differ diff --git a/docs/getting_started/index.md b/docs/getting_started/index.md index 8a26e131b..bbf0a884b 100644 --- a/docs/getting_started/index.md +++ b/docs/getting_started/index.md @@ -90,4 +90,5 @@ install overview intro_tutorials/index comparison +spotlight ``` diff --git a/docs/getting_started/spotlight.md b/docs/getting_started/spotlight.md new file mode 100644 index 000000000..54512e81e --- /dev/null +++ b/docs/getting_started/spotlight.md @@ -0,0 +1,170 @@ +# Spotlight Integration + +DeepForest integrates with [Renumics Spotlight](https://github.com/Renumics/spotlight) for interactive visualization of detection results. This integration allows you to explore predictions, analyze model performance, and assess data quality through Spotlight's web interface. + +> **Note**: The Spotlight manifest format is experimental. For production use, consider the Hugging Face datasets export which offers broader tool compatibility. + +## Quick Start + +```python +from deepforest import get_data +from deepforest.utilities import read_file +from deepforest.visualize import view_with_spotlight + + +path = get_data("OSBS_029.csv") +df = read_file(path) + +# Convert to Spotlight format +spotlight_data = df.spotlight() +# or +spotlight_data = view_with_spotlight(df) + +# Generate new predictions and visualize +from deepforest.main import deepforest +model = deepforest() +model.load_model("Weecology/deepforest-tree") +image_path = get_data("OSBS_029.tif") +results = model.predict_image(path=image_path) + +# Visualize with confidence scores preserved +spotlight_data = view_with_spotlight(results) + +# Export to file for external tools +df.spotlight(format="lightly", out_dir="spotlight_export") +``` + +## API Reference + +- `view_with_spotlight(df, format="lightly"|"objects", out_dir=...)` - Convert DeepForest DataFrame to Spotlight format + - Supports flexible image reference columns: `image_path`, `file_name`, `source_image`, `image` + - Handles NaN values in optional columns gracefully + - Validates required bbox columns: `xmin`, `ymin`, `xmax`, `ymax` +- `df.spotlight(...)` - DataFrame accessor method (calls `view_with_spotlight`) +- Core DataFrame-to-Spotlight conversion functionality +- `export_to_spotlight_dataset(gallery_dir)` - Create Hugging Face Dataset from gallery + +## Working with Predictions + +When working with model predictions, the integration preserves confidence scores and detection metadata: + +```python +from deepforest import get_data +from deepforest.main import deepforest +from deepforest.visualize import view_with_spotlight + +# Generate predictions +model = deepforest() +model.load_model("Weecology/deepforest-tree") +image_path = get_data("OSBS_029.tif") +results = model.predict_image(path=image_path) + +# Convert to Spotlight format +spotlight_data = results.spotlight() +# or +spotlight_data = view_with_spotlight(results) +``` + +The converted data includes: +- Bounding boxes (xmin, ymin, xmax, ymax) +- Class labels +- Confidence scores (0.0 to 1.0) +- Source image paths + +This enables you to: +- Filter detections by confidence threshold +- Compare model performance across images +- Identify patterns in prediction quality +- Analyze spatial distribution of detections + +## Example Output + +The following example shows the Spotlight interface displaying DeepForest predictions: + +```python +from deepforest import get_data +from deepforest.main import deepforest +from deepforest.visualize import view_with_spotlight + +model = deepforest() +model.load_model("Weecology/deepforest-tree") +image_path = get_data("OSBS_029.tif") +results = model.predict_image(path=image_path) + +# Launch Spotlight viewer +view_with_spotlight(results) +``` + +```{image} ../assets/spotlight_demo_screenshot.png +:alt: Spotlight interface main view showing DeepForest predictions with data table and image viewer +:width: 600px +:align: center +``` +*Main interface showing detection results in an interactive table* + +```{image} ../assets/spotlight_demo_screenshot1.png +:alt: Spotlight interface showing detailed bounding box visualization on forest imagery +:width: 600px +:align: center +``` +*Confidence scores and bounding box coordinates for each detection* + +```{image} ../assets/spotlight_demo_screenshot2.png +:alt: Spotlight interface displaying confidence score distribution and filtering options +:width: 600px +:align: center +``` +*Source imagery with detection metadata* + +The screenshots show the Spotlight interface with: +- **Main view**: Data table displaying tree detections with confidence scores, bounding box coordinates, and interactive sorting capabilities +- **Image viewer**: Visual representation of detected trees with bounding boxes overlaid on the source forest imagery +- **Analytics panel**: Confidence score distribution charts and filtering options for analyzing model performance across detections + +## Demo Script + +Test the integration with the included demo: + +```bash +python demo_spotlight.py +``` + +The script will load a model, generate predictions, and launch the Spotlight viewer in your browser. + +## Advanced Usage + +For more complex workflows, you can combine Spotlight integration with gallery generation: + +```python +from deepforest.visualize import ( + view_with_spotlight, + export_to_gallery, + write_gallery_html, + export_to_spotlight_dataset +) + +# Direct Spotlight integration +spotlight_data = view_with_spotlight(df, format="lightly") + +# Create thumbnail gallery +metadata = export_to_gallery(df, "forest_gallery", max_crops=200) + +# Generate HTML viewer +write_gallery_html("forest_gallery") + +# Export as HuggingFace dataset +hf_dataset = export_to_spotlight_dataset("forest_gallery") +``` + +## Command Line Interface + +```bash +# Export predictions to gallery +python -m deepforest.scripts.cli gallery export predictions.csv --out forest_gallery + +# Package for Spotlight +python -m deepforest.scripts.cli gallery spotlight --gallery forest_gallery --out spotlight_package + +# Package for Spotlight +python -m deepforest.scripts.cli gallery spotlight --gallery forest_gallery --out spotlight_package +``` diff --git a/docs/user_guide/examples/demo_spotlight.py b/docs/user_guide/examples/demo_spotlight.py new file mode 100644 index 000000000..282be8a2d --- /dev/null +++ b/docs/user_guide/examples/demo_spotlight.py @@ -0,0 +1,60 @@ +""" +DeepForest Spotlight Integration Example + +This example demonstrates how to use DeepForest predictions with Renumics Spotlight +for interactive data exploration and visualization. + +Requirements: + pip install renumics-spotlight + +Usage: + python demo_spotlight.py +""" + +from deepforest import get_data, main +from deepforest.visualize import view_with_spotlight + +# Load a DeepForest model +model = main.deepforest() +model.load_model("weecology/deepforest-tree") + +# Make predictions on sample data +image_path = get_data("OSBS_029.tif") +results = model.predict_image(path=image_path) + +print(f"Generated {len(results)} tree detections") +print(f"Score range: {results['score'].min():.3f} - {results['score'].max():.3f}") + +# Method 1: Use DataFrame accessor +spotlight_data = results.spotlight() +print(f"Spotlight format created with {len(spotlight_data['samples'])} samples") + +# Method 2: Use function directly +spotlight_data = view_with_spotlight(results) + +# Optional: Save to file for later use +spotlight_data = view_with_spotlight(results, out_dir="spotlight_output") +print("Spotlight manifest saved to spotlight_output/manifest.json") + +# Optional: Launch Spotlight viewer (requires renumics-spotlight) +try: + import renumics.spotlight as spotlight + import pandas as pd + + # Prepare data for Spotlight viewer + spotlight_df = pd.DataFrame({ + 'image': [image_path] * len(results), + 'label': results['label'], + 'confidence': results['score'], + 'xmin': results['xmin'], + 'ymin': results['ymin'], + 'xmax': results['xmax'], + 'ymax': results['ymax'] + }) + + print("Opening Spotlight viewer in browser...") + spotlight.show(spotlight_df, dtype={'image': spotlight.Image}) + +except ImportError: + print("Install renumics-spotlight to launch the interactive viewer:") + print("pip install renumics-spotlight") diff --git a/src/deepforest/scripts/cli.py b/src/deepforest/scripts/cli.py index 5231cea9f..8ff59d854 100644 --- a/src/deepforest/scripts/cli.py +++ b/src/deepforest/scripts/cli.py @@ -1,8 +1,10 @@ import argparse import sys +from pathlib import Path from hydra import compose, initialize, initialize_config_dir from omegaconf import OmegaConf +from PIL import Image from deepforest.conf.schema import Config as StructuredConfig from deepforest.scripts.evaluate import evaluate @@ -104,7 +106,6 @@ def main(): "--root-dir", help="Root directory containing images. Defaults to CSV directory if not specified.", ) - evaluate_parser.add_argument( "--save-predictions", help="Path to save generated predictions CSV (only used when --predictions is not provided)", @@ -115,6 +116,89 @@ def main(): # Show config subcommand subparsers.add_parser("config", help="Show the current config") + # Gallery subcommands + gallery_parser = subparsers.add_parser("gallery", help="Gallery utilities") + gallery_sub = gallery_parser.add_subparsers(dest="gallery_cmd") + + gallery_export = gallery_sub.add_parser( + "export", help="Export predictions to a local gallery (thumbnails + metadata)" + ) + gallery_export.add_argument( + "-i", + "--input", + help="Path to predictions CSV/JSON (rows with image_path and bbox)", + ) + gallery_export.add_argument( + "-o", "--out", dest="out", help="Output directory for gallery", required=True + ) + gallery_export.add_argument( + "--root-dir", + dest="root_dir", + help="Root directory to resolve relative image paths", + ) + gallery_export.add_argument( + "--max-crops", type=int, default=None, help="Maximum number of crops to export" + ) + gallery_export.add_argument( + "--sample-by-image", + action="store_true", + help="Sample by image to distribute crops across images", + ) + gallery_export.add_argument( + "--per-image-limit", + type=int, + default=None, + help="Limit crops per image when sampling by image", + ) + gallery_export.add_argument( + "--sample-seed", type=int, default=None, help="Seed for deterministic sampling" + ) + gallery_export.add_argument( + "--start-server", + action="store_true", + help="Start a tiny local HTTP server to view the gallery", + ) + gallery_export.add_argument( + "--port", type=int, default=0, help="Port to serve the gallery on (0 = auto)" + ) + gallery_export.add_argument( + "--no-browser", + action="store_true", + help="Do not open the browser when starting server", + ) + gallery_export.add_argument( + "--demo", + action="store_true", + help="Create a small demo predictions file and images for quick testing", + ) + + gallery_spotlight = gallery_sub.add_parser( + "spotlight", help="Package an existing gallery for Renumics Spotlight" + ) + gallery_spotlight.add_argument( + "-g", + "--gallery", + dest="gallery", + help="Path to existing gallery directory (contains thumbnails/ and metadata.json)", + required=True, + ) + gallery_spotlight.add_argument( + "-o", + "--out", + dest="out", + help="Output directory for Spotlight package", + required=True, + ) + gallery_spotlight.add_argument( + "--archive", + action="store_true", + help="Also produce a tar.gz archive of the package for upload", + ) + gallery_spotlight.add_argument( + "--archive-name", + dest="archive_name", + help="Optional archive name (defaults to .tar.gz)", + ) # Config options for Hydra parser.add_argument("--config-dir", help="Path to custom configuration directory") @@ -168,6 +252,86 @@ def main(): elif args.command == "config": print(OmegaConf.to_yaml(cfg, resolve=True)) + elif args.command == "gallery": + # Gallery subcommands + if args.gallery_cmd == "export": + try: + import pandas as pd + except Exception as exc: + raise RuntimeError( + "pandas is required for gallery export. Please install it in your environment." + ) from exc + + # If demo requested, create a tiny demo dataset and image + if args.demo: + demo_input_dir = Path(args.out) / "demo_input" + demo_input_dir.mkdir(parents=True, exist_ok=True) + demo_img = demo_input_dir / "img_demo.png" + # create a small RGB image + Image.new("RGB", (128, 128), color=(120, 140, 160)).save(demo_img) + df = pd.DataFrame( + [ + { + "image_path": demo_img.name, + "xmin": 10, + "ymin": 10, + "xmax": 60, + "ymax": 60, + "label": "Tree", + "score": 0.95, + } + ] + ) + df.root_dir = str(demo_input_dir) + else: + if args.input is None: + raise RuntimeError( + "Please provide an input predictions file with -i/--input" + ) + + # read CSV or JSON depending on extension + input_path = args.input + if input_path.lower().endswith(".json") or input_path.lower().endswith( + ".jsonl" + ): + df = pd.read_json( + input_path, lines=input_path.lower().endswith(".jsonl") + ) + else: + df = pd.read_csv(input_path) + + from deepforest.visualize import ( + export_to_gallery, + write_gallery_html, + ) + + outdir = args.out + export_to_gallery( + df, + outdir, + root_dir=args.root_dir, + max_crops=args.max_crops, + sample_seed=args.sample_seed, + sample_by_image=args.sample_by_image, + per_image_limit=args.per_image_limit, + ) + write_gallery_html(outdir) + + if args.start_server: + print("Local server functionality removed - open index.html manually") + elif args.gallery_cmd == "spotlight": + from deepforest.visualize.spotlight_export import ( + prepare_spotlight_package, + ) + + gallery_dir = args.gallery + outdir = args.out + res = prepare_spotlight_package(gallery_dir, out_dir=outdir) + print("Prepared Spotlight package:", res) + if args.archive: + print( + "Archive functionality removed - use standard tools to create archives" + ) else: parser.print_help() diff --git a/src/deepforest/visualize/__init__.py b/src/deepforest/visualize/__init__.py new file mode 100644 index 000000000..c14da36f1 --- /dev/null +++ b/src/deepforest/visualize/__init__.py @@ -0,0 +1,52 @@ +"""Visualization module for DeepForest. + +This module provides visualization functions for forest detection results, +including traditional plotting and interactive Spotlight integration. + +Example usage:: + + from deepforest.visualize import plot_results + + # Traditional plotting + plot_results(df) + + # Interactive Spotlight visualization + data = df.spotlight() + +To view results interactively with Spotlight, use the spotlight visualization functions. +""" + +# Import from the gallery and spotlight modules +# Import from the legacy visualize.py file to maintain backward compatibility +# Need to import from the parent module, not this package +import importlib.util +import sys +from pathlib import Path + +# Gallery functionality removed - not needed for core Spotlight implementation +from .spotlight_adapter import SpotlightAccessor, view_with_spotlight + +# Get the path to the legacy visualize.py file +legacy_visualize_path = Path(__file__).parent.parent / "visualize.py" + +# Import the legacy module +spec = importlib.util.spec_from_file_location("legacy_visualize", legacy_visualize_path) +legacy_visualize = importlib.util.module_from_spec(spec) +spec.loader.exec_module(legacy_visualize) + +# Import the functions we need from the legacy module +plot_results = legacy_visualize.plot_results +plot_annotations = legacy_visualize.plot_annotations +convert_to_sv_format = legacy_visualize.convert_to_sv_format +_load_image = legacy_visualize._load_image +label_to_color = legacy_visualize.label_to_color + +__all__ = [ + "convert_to_sv_format", + "label_to_color", + "plot_annotations", + "plot_results", + "SpotlightAccessor", + "view_with_spotlight", + "_load_image", +] diff --git a/src/deepforest/visualize/spotlight_adapter.py b/src/deepforest/visualize/spotlight_adapter.py new file mode 100644 index 000000000..36a77db60 --- /dev/null +++ b/src/deepforest/visualize/spotlight_adapter.py @@ -0,0 +1,211 @@ +"""Spotlight / Lightly adapter helpers. + +Converts DeepForest DataFrames (read_file output or prediction tables) into formats +compatible with Renumics Spotlight and Lightly data visualization tools. + +The adapter supports two output formats: +1. "objects" - Canonical format matching Spotlight's expected schema +2. "lightly" - Format compatible with Lightly's object detection conventions + +Public API: +- `view_with_spotlight(df, format="lightly", out_dir=None)` - Main conversion function +- `df_to_objects_manifest(df)` - Convert DataFrame to canonical objects format +- `objects_to_lightly(manifest)` - Convert objects format to Lightly format + +Usage: + # Direct conversion + manifest = `view_with_spotlight(df, format="objects")` + + # Using DataFrame accessor + lightly_data = df.spotlight(format="lightly", out_dir="export") + + # Export to file + result = `view_with_spotlight(df, format="lightly", out_dir="spotlight_export")` +""" + +from __future__ import annotations + +import json +import os + +import pandas as pd + + +def df_to_objects_manifest(df: pd.DataFrame) -> dict: + """Convert a DeepForest-style DataFrame into the canonical objects + manifest. + + Expected input columns: one of ['image_path','file_name','source_image'] for + image reference, and bbox columns ['xmin','ymin','xmax','ymax'], plus + optional 'label' and 'score'. The function is permissive and will group + annotations by image reference. + """ + # choose the column that references image files + image_col = None + for name in ("image_path", "file_name", "source_image", "image"): + if name in df.columns: + image_col = name + break + if image_col is None: + raise ValueError("DataFrame must contain an image reference column") + + # required bbox columns + for c in ("xmin", "ymin", "xmax", "ymax"): + if c not in df.columns: + raise ValueError(f"Missing required bbox column: {c}") + + images: list[dict] = [] + grouped = df.groupby(image_col) + for img, group in grouped: + anns: list[dict] = [] + for _, row in group.iterrows(): + bbox = [ + float(row["xmin"]), + float(row["ymin"]), + float(row["xmax"]), + float(row["ymax"]), + ] + ann = {"bbox": bbox} + if "label" in row.index and not pd.isna(row["label"]): + ann["label"] = row["label"] + if "score" in row.index and not pd.isna(row["score"]): + ann["score"] = float(row["score"]) + anns.append(ann) + + # width/height optional if present in any row + width = None + height = None + if "width" in group.columns and not group["width"].isnull().all(): + width = int(group["width"].dropna().iloc[0]) + if "height" in group.columns and not group["height"].isnull().all(): + height = int(group["height"].dropna().iloc[0]) + + image_entry = { + "file_name": str(img), + "annotations": anns, + } + + # Only include width/height if they have valid values (schema requires integers) + if width is not None: + image_entry["width"] = width + if height is not None: + image_entry["height"] = height + + images.append(image_entry) + + manifest = {"version": "1.0", "bbox_format": "pixels", "images": images} + return manifest + + +def objects_to_lightly(manifest: dict) -> dict: + """Map the canonical objects manifest to a Lightly-compatible format. + + This produces a dict compatible with Lightly's expected format for object detection. + The format follows Lightly's conventions for image datasets with bounding box annotations. + + Note: This is a minimal implementation. For production use, validate against + the official Lightly schema and adjust field names/structure as needed. + """ + samples = [] + for img in manifest.get("images", []): + # Use 'file_name' to match Lightly conventions (not 'filepath') + sample = { + "file_name": img.get("file_name"), + "metadata": {"bbox_format": manifest.get("bbox_format", "pixels")}, + } + + # Add image dimensions to metadata if available + if img.get("width") is not None: + sample["metadata"]["width"] = img.get("width") + if img.get("height") is not None: + sample["metadata"]["height"] = img.get("height") + + # Format annotations for Lightly + anns = img.get("annotations", []) + if anns: + sample["annotations"] = [] + for a in anns: + ann = { + "bbox": a.get("bbox"), + "category_id": a.get("label"), # Lightly often uses category_id + "label": a.get("label"), # Keep both for compatibility + } + if a.get("score") is not None: + ann["score"] = a.get("score") + sample["annotations"].append(ann) + + samples.append(sample) + + return { + "samples": samples, + "version": manifest.get("version", "1.0"), + "bbox_format": manifest.get("bbox_format", "pixels"), + } + + +def view_with_spotlight( + df: pd.DataFrame, *, format: str = "lightly", out_dir: str | None = None +) -> dict: + """Convert a DataFrame to the requested format and optionally write to + disk. + + Args: + df: DataFrame with detection results (must have image reference and bbox columns) + format: 'objects' (canonical Spotlight format) or 'lightly' (Lightly-compatible format) + out_dir: Optional directory to write manifest.json file + + Returns: + Dict in the requested format + + Raises: + ValueError: If format is unsupported or DataFrame is missing required columns + """ + if format not in ("objects", "lightly"): + raise ValueError(f"Unsupported format: {format}. Use 'objects' or 'lightly'") + + # Validate DataFrame has required columns before processing + if df.empty: + raise ValueError("DataFrame is empty") + + manifest = df_to_objects_manifest(df) + + if format == "objects": + result = manifest + elif format == "lightly": + result = objects_to_lightly(manifest) + + if out_dir: + os.makedirs(out_dir, exist_ok=True) + manifest_path = os.path.join(out_dir, "manifest.json") + with open(manifest_path, "w", encoding="utf8") as fh: + json.dump(result, fh, indent=2, ensure_ascii=False) + + return result + + +# Provide a small DataFrame accessor so users can call `df.spotlight.view(...)` +# or `df.spotlight(format="lightly", out_dir=...)` as a convenience wrapper. +@pd.api.extensions.register_dataframe_accessor("spotlight") +class SpotlightAccessor: + """DataFrame accessor for Spotlight/Lightly convenience helpers. + + Usage: + df.spotlight(format="lightly", out_dir=None) + + This forwards to `view_with_spotlight` using the DataFrame as input. + """ + + def __init__(self, pandas_obj: pd.DataFrame) -> None: + self._df = pandas_obj + + def __call__(self, *args, **kwargs) -> dict: + # Allow df.spotlight(...) shorthand + return self.view(*args, **kwargs) + + def view(self, *, format: str = "lightly", out_dir: str | None = None) -> dict: + """Call the `view_with_spotlight` wrapper with this DataFrame. + + Returns the generated dict for the requested format and optionally + writes `manifest.json` to `out_dir` when provided. + """ + return view_with_spotlight(self._df, format=format, out_dir=out_dir) diff --git a/tests/test_spotlight.py b/tests/test_spotlight.py new file mode 100644 index 000000000..20af34b05 --- /dev/null +++ b/tests/test_spotlight.py @@ -0,0 +1,120 @@ +"""Test Spotlight integration for DeepForest.""" + +import json +import pandas as pd +import pytest + +try: + from deepforest.visualize import view_with_spotlight + SPOTLIGHT_AVAILABLE = True +except ImportError: + SPOTLIGHT_AVAILABLE = False + +try: + from deepforest import get_data + from deepforest.main import deepforest + DEEPFOREST_AVAILABLE = True +except ImportError: + DEEPFOREST_AVAILABLE = False + + +def test_empty_dataframe(): + """Test handling of empty DataFrame.""" + try: + from deepforest.visualize import view_with_spotlight + except ImportError: + pytest.skip("Spotlight functionality not available") + + df = pd.DataFrame() + with pytest.raises(ValueError, match="DataFrame is empty"): + view_with_spotlight(df) + + +def test_minimal_valid_dataframe(): + """Test with minimal valid DataFrame.""" + try: + from deepforest.visualize import view_with_spotlight + except ImportError: + pytest.skip("Spotlight functionality not available") + + df = pd.DataFrame({ + "image_path": ["test.jpg"], + "xmin": [10.0], "ymin": [10.0], "xmax": [50.0], "ymax": [50.0] + }) + result = view_with_spotlight(df, format="objects") + + assert "version" in result + assert "bbox_format" in result + assert "images" in result + assert len(result["images"]) == 1 + + image = result["images"][0] + assert image["file_name"] == "test.jpg" + assert len(image["annotations"]) == 1 + + ann = image["annotations"][0] + assert ann["bbox"] == [10.0, 10.0, 50.0, 50.0] + + +def test_dataframe_accessor_error_handling(): + """Test DataFrame accessor handles errors properly.""" + df = pd.DataFrame() # Empty DataFrame + with pytest.raises(ValueError, match="DataFrame is empty"): + df.spotlight() + + +@pytest.mark.skipif(not DEEPFOREST_AVAILABLE, reason="DeepForest not available") +def test_predictions_have_score_column(): + """Verify prediction results include score column.""" + model = deepforest() + model.load_model("Weecology/deepforest-tree") + image_path = get_data("OSBS_029.tif") + prediction_results = model.predict_image(path=image_path) + + assert "score" in prediction_results.columns + assert "label" in prediction_results.columns + assert "xmin" in prediction_results.columns + assert len(prediction_results) > 0 + + # Verify scores are in reasonable range + scores = prediction_results["score"] + assert scores.min() >= 0.0 + assert scores.max() <= 1.0 + + +@pytest.mark.skipif(not SPOTLIGHT_AVAILABLE, reason="Spotlight functionality not available") +def test_file_output_creation(tmp_path): + """Test that file output is created correctly.""" + df = pd.DataFrame({ + "image_path": ["test.jpg"], + "xmin": [10], "ymin": [10], "xmax": [50], "ymax": [50], + "label": ["Tree"] + }) + + out_dir = tmp_path / "spotlight_output" + result = view_with_spotlight(df, format="lightly", out_dir=str(out_dir)) + + # Check file was created + manifest_file = out_dir / "manifest.json" + assert manifest_file.exists() + + # Check file content matches returned result + with manifest_file.open() as f: + file_content = json.load(f) + + assert file_content == result + + +@pytest.mark.skipif(not SPOTLIGHT_AVAILABLE, reason="Spotlight functionality not available") +def test_missing_bbox_column_error(): + """Test error handling for missing required bbox columns.""" + df = pd.DataFrame({ + "image_path": ["test.jpg"], + "xmin": [10], + "ymin": [15], + "xmax": [110] + # Missing ymax column + }) + + with pytest.raises(ValueError, match="Missing required bbox column: ymax"): + view_with_spotlight(df, format="objects")