diff --git a/.cursorrules b/.cursor/rules/.cursorrules similarity index 100% rename from .cursorrules rename to .cursor/rules/.cursorrules diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d1b0e86f..7bcc122b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,9 @@ repos: # Use same config as CI - read from pyproject.toml # CI uses: uv run mypy abses/ # This ensures local and CI use identical mypy settings + # Check entire abses/ directory to match CI behavior + pass_filenames: false + args: ["abses/"] - repo: https://github.com/econchick/interrogate rev: '1.7.0' # Check for the latest version hooks: diff --git a/abses/__init__.py b/abses/__init__.py index 8eb3c696..e3ee39b5 100644 --- a/abses/__init__.py +++ b/abses/__init__.py @@ -32,19 +32,14 @@ "raster_attribute", ] -import os - -# Disable loguru default output by setting environment variable BEFORE any imports -# This prevents loguru from adding default handlers automatically -os.environ["LOGURU_AUTOINIT"] = "0" - +import warnings from importlib.metadata import PackageNotFoundError, version try: __version__ = f"v{version('abses')}" except PackageNotFoundError: - # Fallback for development mode when package metadata is not available - __version__ = "v0.7.5-dev" + __version__ = "v0.10.0-dev" + warnings.warn(f"Package metadata not found, using fallback version {__version__}") from .agents.actor import Actor, alive_required, perception from .agents.sequences import ActorsList @@ -56,7 +51,3 @@ from .space.nature import BaseNature, PatchModule from .utils.data import load_data from .utils.errors import ABSESpyError - -# Configure loguru to be silent by default -# The LOGURU_AUTOINIT environment variable set above prevents automatic handler creation -# Users can explicitly enable logging via model configuration (log.console: true) diff --git a/abses/agents/container.py b/abses/agents/container.py index 3bba2dc9..546d634f 100644 --- a/abses/agents/container.py +++ b/abses/agents/container.py @@ -11,6 +11,7 @@ from __future__ import annotations +import logging from functools import partial from typing import ( TYPE_CHECKING, @@ -26,7 +27,6 @@ import geopandas as gpd import pyproj -from loguru import logger from mesa import Model from mesa.agent import AgentSet from shapely.geometry.base import BaseGeometry @@ -43,6 +43,9 @@ from abses.space.cells import PatchCell +logger = logging.getLogger(__name__) + + class _AgentsContainer: """Base container for managing agents in ABSESpy models. diff --git a/abses/conf/absespy.yaml b/abses/conf/absespy.yaml new file mode 100644 index 00000000..ebcef5e4 --- /dev/null +++ b/abses/conf/absespy.yaml @@ -0,0 +1,79 @@ +defaults: + - _self_ + +hydra: + job: + name: ${oc.select:exp.name,ABSESpy} + run: + dir: '${oc.select:exp.outdir,out}/${oc.select:exp.name,ABSESpy}/${now:%Y-%m-%d}/${now:%H-%M-%S}' + sweep: + dir: ${oc.select:exp.outdir,out}/${oc.select:exp.name,ABSESpy}/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.id}_${hydra:job.override_dirname} + + # Hydra logging configuration (auto-generated from log.hydra) + # This will be populated from log.hydra configuration at runtime + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + datefmt: '%H:%M:%S' + handlers: + console: + class: logging.StreamHandler + level: WARNING + formatter: simple + stream: ext://sys.stderr + root: + level: INFO + handlers: [console] + disable_existing_loggers: false + + # Verbosity settings + verbose: false + +exp: + outdir: out + name: ABSESpy + repeats: 1 + +# Unified logging configuration +log: + # Logging mode for repeated runs: once | separate | merge + mode: once + + # Experiment-level logging (progress, summary, etc.) + exp: + stdout: + enabled: true + level: INFO + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + datefmt: '%H:%M:%S' + file: + enabled: false + level: INFO + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + datefmt: '%H:%M:%S' + + # Model run-level logging (each model execution) + run: + stdout: + enabled: false + level: INFO + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + datefmt: '%H:%M:%S' + file: + enabled: true + level: INFO + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + datefmt: '%H:%M:%S' + name: model # Log file name (without extension) + rotation: null # e.g., "1 day", "100 MB" + retention: null # e.g., "10 days" + # MESA-specific logging configuration + mesa: + level: null # If null, uses run.file.level + format: null # If null, uses run.file.format + +# Note: outpath is dynamically set by Experiment at runtime +outpath: null diff --git a/abses/core/experiment.py b/abses/core/experiment.py index 49157dd8..3721abe5 100644 --- a/abses/core/experiment.py +++ b/abses/core/experiment.py @@ -12,6 +12,7 @@ import copy import inspect import itertools +import logging import os import random from copy import deepcopy @@ -31,15 +32,13 @@ TypeVar, ) -import pandas as pd -from loguru import logger - try: from typing import TypeAlias except ImportError: from typing_extensions import TypeAlias import numpy as np +import pandas as pd from hydra import compose, initialize from hydra.core.global_hydra import GlobalHydra from hydra.core.hydra_config import HydraConf, HydraConfig @@ -49,6 +48,11 @@ from abses.core.job_manager import ExperimentManager from abses.core.model import MainModel +from abses.utils.exp_logging import EXP_LOGGER_NAME, setup_exp_logger +from abses.utils.log_parser import get_file_config, get_log_mode + +# Use experiment-level logger, separate from model run loggers +logger = logging.getLogger(EXP_LOGGER_NAME) Configurations: TypeAlias = DictConfig | str | Dict[str, Any] T = TypeVar("T") @@ -153,6 +157,22 @@ def __init__( self._manager = ExperimentManager(model_cls) self.cfg = cfg + # Setup experiment-level logger (separate from model run loggers) + # This ensures experiment-level messages don't mix with model run logs + # Pass DictConfig directly, don't convert to dict (log_parser needs DictConfig) + if isinstance(cfg, DictConfig): + # Create a copy to avoid modifying original + cfg_dict = OmegaConf.to_container(cfg, resolve=True) + if isinstance(cfg_dict, dict): + cfg_dict["outpath"] = str(self.outpath) # Convert Path to string + cfg_copy = OmegaConf.create(cfg_dict) + setup_exp_logger(cfg_copy) + elif isinstance(cfg, dict): + # Create a copy to avoid modifying original input + cfg_copy = cfg.copy() + cfg_copy["outpath"] = str(self.outpath) # Convert Path to string + setup_exp_logger(cfg_copy) + @property def model_cls(self) -> Type[MainModelProtocol]: """Model class.""" @@ -370,6 +390,66 @@ def _get_seed(self, repeat_id: int, job_id: Optional[int] = None) -> Optional[in r = random.Random(self._base_seed + job_id * 1000 + repeat_id) return r.randrange(2**32) + def _get_logging_mode(self) -> str: + """Get logging mode from experiment configuration. + + Returns: + Logging mode: 'once', 'separate', or 'merge'. Defaults to 'once'. + """ + return get_log_mode(self._cfg) + + def _get_log_file_path( + self, log_name: str, repeat_id: int, logging_mode: str + ) -> Optional[Path]: + """Get log file path for a specific repeat. + + Args: + log_name: Base log file name. + repeat_id: Repeat ID (1-indexed). + logging_mode: Logging mode. + + Returns: + Path to log file, or None if logging should be disabled. + """ + from abses.utils.log_config import determine_log_file_path + + return determine_log_file_path( + outpath=self.outpath, + log_name=log_name, + logging_mode=logging_mode, + repeat_id=repeat_id, + ) + + def _log_experiment_info( + self, cfg: DictConfig, repeats: int, logging_mode: str = "once" + ) -> None: + """Log experiment-level information to experiment log file. + + Args: + cfg: Configuration dictionary. + repeats: Number of repeats. + logging_mode: The logging mode being used. + """ + try: + from abses import __version__ + except ImportError: + __version__ = "unknown" + + # Get model class name + model_name = self.model_cls.__name__ + + # Log experiment information + logger.info("=" * 60) + logger.info("Experiment Information".center(60)) + logger.info("=" * 60) + logger.info(f"Model: {model_name}") + logger.info(f"ABSESpy version: {__version__}") + logger.info(f"Total repeats: {repeats}") + logger.info(f"Output directory: {self.outpath}") + logger.info(f"Logging mode: {logging_mode}") + logger.info("=" * 60) + logger.info("") + def _batch_run_repeats( self, cfg: DictConfig, @@ -378,6 +458,32 @@ def _batch_run_repeats( display_progress: bool = True, ) -> None: """运行重复实验""" + logging_mode = self._get_logging_mode() + run_file_cfg = get_file_config(cfg, "run") + log_name = str(run_file_cfg.get("name", "model")).replace(".log", "") + + # Log experiment-level information to experiment log file + # Check if exp.file is enabled - if so, log experiment info for all modes + exp_file_cfg = get_file_config(cfg, "exp") + if exp_file_cfg: + # exp.file is enabled, log experiment info + self._log_experiment_info(cfg, repeats, logging_mode) + # Also log framework banner to experiment log + from abses.utils.logging import setup_logger_info + + setup_logger_info(self) + elif logging_mode == "separate": + # In separate mode, even if exp.file is disabled, log to experiment log + self._log_experiment_info(cfg, repeats, logging_mode) + from abses.utils.logging import setup_logger_info + + setup_logger_info(self) + + # For merge mode, log separator before first repeat + if logging_mode == "merge" and repeats > 1: + # Note: This will be logged in the model's logger setup + pass + if self._is_hydra_parallel() or number_process == 1: # Hydra 并行或指定单进程时,顺序执行 disable = repeats == 1 or not display_progress @@ -386,6 +492,24 @@ def _batch_run_repeats( disable=disable, desc=f"Job {self.job_id} repeats {repeats} times.", ): + # Log separator for merge mode + if logging_mode == "merge" and repeat_id > 1: + # Note: Separator will be logged in model setup + pass + + # Get log file path for this repeat + log_path = self._get_log_file_path(log_name, repeat_id, logging_mode) + + # Display log file location for separate mode + # This should only go to stdout, not to model run log files + if ( + display_progress + and logging_mode == "separate" + and log_path is not None + ): + # Use print instead of logger to avoid writing to model run log files + print(f"Repeat {repeat_id}: Logging to {log_path}") + run_single( model_cls=self.model_cls, cfg=cfg, diff --git a/abses/core/model.py b/abses/core/model.py index 4ca164fe..de68bfb0 100644 --- a/abses/core/model.py +++ b/abses/core/model.py @@ -43,7 +43,14 @@ from abses.utils.args import merge_parameters from abses.utils.config import apply_validation, normalize_config from abses.utils.datacollector import ABSESpyDataCollector +from abses.utils.log_parser import ( + get_file_config, + get_log_mode, + get_mesa_config, + get_stdout_config, +) from abses.utils.logging import ( + log_repeat_separator, log_session, logger, setup_logger_info, @@ -143,6 +150,7 @@ def __init__( self.set_state(State.INIT) # Setup logging if configured + # Check if new log structure exists log_cfg = self.settings.get("log", {}) if log_cfg: self._setup_logger(log_cfg) @@ -286,38 +294,89 @@ def _setup_logger(self, log_cfg: Dict[str, Any]) -> None: """Setup logging for the model. Args: - log_cfg: Logging configuration dictionary. + log_cfg: Logging configuration dictionary (legacy, kept for compatibility). """ - if not log_cfg: - return + # Get logging configuration from new structure + run_stdout = get_stdout_config(self.settings, "run") + run_file = get_file_config(self.settings, "run") + mesa_cfg = get_mesa_config(self.settings, "run") + logging_mode = get_log_mode(self.settings) - # Parse logging configuration - # Only setup logging if console or file logging is explicitly enabled - name = str(log_cfg.get("name", "model")).replace(".log", "") - level = log_cfg.get("level", "INFO") - rotation = log_cfg.get("rotation", None) # e.g., "1 day" - retention = log_cfg.get("retention", None) # e.g., "10 days" + # Check if logging is enabled + stdout_enabled = bool(run_stdout) + file_enabled = bool(run_file) and self.outpath is not None - # Default: no output unless explicitly configured - console = log_cfg.get("console", False) - file_logging = self.outpath is not None - - # Only setup logger if at least one output is enabled - if not (console or file_logging): + if not (stdout_enabled or file_enabled): return + # Extract configuration values + name = ( + str(run_file.get("name", "model")).replace(".log", "") + if run_file + else "model" + ) + level = ( + run_file.get("level", "INFO") + if file_enabled + else run_stdout.get("level", "INFO") + ) + rotation = run_file.get("rotation", None) if run_file else None + retention = run_file.get("retention", None) if run_file else None + console = stdout_enabled + + # Extract format and datefmt from config + console_format = run_stdout.get("format", None) if run_stdout else None + console_datefmt = run_stdout.get("datefmt", None) if run_stdout else None + file_format = run_file.get("format", None) if run_file else None + file_datefmt = run_file.get("datefmt", None) if run_file else None + console_level = run_stdout.get("level", None) if run_stdout else None + file_level = run_file.get("level", None) if run_file else None + + # MESA configuration + mesa_format = mesa_cfg.get("format", None) if mesa_cfg else None + mesa_level = mesa_cfg.get("level", None) if mesa_cfg else None + # Setup integrated logging for ABSESpy and Mesa setup_model_logger( name=name, level=level, outpath=self.outpath, console=console, + console_level=console_level, + console_format=console_format, + console_datefmt=console_datefmt, rotation=rotation, retention=retention, + logging_mode=logging_mode, + repeat_id=self.run_id, + file_level=file_level, + file_format=file_format, + file_datefmt=file_datefmt, + mesa_format=mesa_format, + mesa_level=mesa_level, ) + # For merge mode, add separator for repeats after the first one + if logging_mode == "merge" and self.run_id is not None and self.run_id > 1: + # Get total repeats from exp config if available + # Both dict and DictConfig support .get() method + total_repeats = 1 + exp_cfg = self.settings.get("exp", {}) + if isinstance(exp_cfg, (dict, DictConfig)): + total_repeats = exp_cfg.get("repeats", 1) + log_repeat_separator(self.run_id, total_repeats) + # Display startup info - setup_logger_info(self.exp) + # In separate mode, setup_logger_info should only go to experiment log + # For model run logs, only log model-specific info + if logging_mode == "separate": + # In separate mode, don't log framework banner to model run log + # It will be logged to experiment log file instead + pass + else: + # In once/merge mode, log to model run log + setup_logger_info(self.exp) + # Always log model-specific info to model run log self._logging_begin() def add_name(self, name: str, check: Optional[HowCheckName] = None) -> None: diff --git a/abses/human/links.py b/abses/human/links.py index 1135d000..b732fe91 100644 --- a/abses/human/links.py +++ b/abses/human/links.py @@ -10,6 +10,7 @@ from __future__ import annotations import contextlib +import logging from abc import abstractmethod from functools import cached_property from typing import ( @@ -27,7 +28,6 @@ import numpy as np import pandas as pd -from loguru import logger with contextlib.suppress(ImportError): import networkx as nx @@ -42,6 +42,8 @@ from abses.core.protocols import LinkContainerProtocol from abses.core.types import Direction, LinkingNode, TargetName, UniqueID +logger = logging.getLogger(__name__) + def get_node_unique_id(node: Any) -> UniqueID: """Gets a unique ID for a node when importing actors from graph. diff --git a/abses/space/patch.py b/abses/space/patch.py index 6d877f40..cd2cbc99 100644 --- a/abses/space/patch.py +++ b/abses/space/patch.py @@ -10,6 +10,7 @@ from __future__ import annotations import functools +import logging from typing import ( TYPE_CHECKING, Any, @@ -30,7 +31,6 @@ import rioxarray import xarray as xr from geocube.api.core import make_geocube -from loguru import logger from mesa.space import Coordinate from mesa_geo.raster_layers import RasterLayer from numpy.typing import NDArray @@ -55,6 +55,8 @@ Raster, ) +logger = logging.getLogger(__name__) + class PatchModule(BaseModule, RasterLayer): """Base class for managing raster-based spatial modules in ABSESpy. diff --git a/abses/utils/__init__.py b/abses/utils/__init__.py index 1a6d9e9e..e537932d 100644 --- a/abses/utils/__init__.py +++ b/abses/utils/__init__.py @@ -2,6 +2,7 @@ # -*-coding:utf-8 -*- """Utils module for ABSESpy.""" +from .analysis import ExpAnalyzer, ResultAnalyzer from .data import load_data from .errors import ABSESpyError from .func import with_axes @@ -12,4 +13,6 @@ "ABSESpyError", "with_axes", "ListRandom", + "ResultAnalyzer", + "ExpAnalyzer", ] diff --git a/abses/utils/analysis.py b/abses/utils/analysis.py new file mode 100644 index 00000000..5d2789c0 --- /dev/null +++ b/abses/utils/analysis.py @@ -0,0 +1,542 @@ +#!/usr/bin/env python3 +# -*-coding:utf-8 -*- +# @Author : Shuang (Twist) Song +# @Contact : SongshGeo@gmail.com +# GitHub : https://github.com/SongshGeo +# Website: https://cv.songshgeo.com/ + +"""Analysis utilities for Hydra multirun experiment results. + +This module provides tools for analyzing and aggregating results from +Hydra multirun experiments, including reading configurations, loading +data files, and performing data aggregation. +""" + +from __future__ import annotations + +import logging +from functools import cached_property, lru_cache +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List + +import pandas as pd +import yaml # type: ignore[import-untyped] +from omegaconf import DictConfig, ListConfig, OmegaConf + +try: + from typing import TypeAlias +except ImportError: + from typing_extensions import TypeAlias + +if TYPE_CHECKING: + pass + +PathLike: TypeAlias = str | Path + +logger = logging.getLogger(__name__) + + +class _BaseAnalyzer: + """Base analyzer class for experiment results. + + Provides basic functionality for path management, configuration reading, + and value selection from Hydra configuration files. + + Attributes: + path: Path to the experiment directory. + config: Loaded configuration from YAML file. + """ + + def __init__(self, path: PathLike) -> None: + """Initialize the base analyzer. + + Args: + path: Path to the experiment directory or configuration file. + """ + self.path = path + + @property + def path(self) -> Path: + """Path to the experiment directory. + + Returns: + Path object pointing to the experiment directory. + """ + return self._path + + @path.setter + def path(self, path: PathLike) -> None: + """Set the path property. + + Args: + path: Path string or Path object to set. + """ + path = Path(path) + self._path = path + + @property + def config(self) -> DictConfig | ListConfig: + """Configuration loaded from YAML file. + + Returns: + DictConfig or ListConfig object containing the configuration. + + Raises: + FileNotFoundError: If the configuration file does not exist. + """ + if not hasattr(self, "_config"): + raise AttributeError("Configuration has not been loaded yet.") + return self._config + + @config.setter + def config(self, yaml_file_path: PathLike) -> None: + """Load configuration from a YAML file. + + Args: + yaml_file_path: Path to the YAML configuration file. + + Raises: + FileNotFoundError: If the configuration file does not exist. + """ + yaml_file_path = Path(yaml_file_path) + if not yaml_file_path.is_file(): + raise FileNotFoundError(f"Configuration file not found: {yaml_file_path}") + self._config = OmegaConf.load(yaml_file_path) + + @cached_property + def subdir(self) -> List[Path]: + """List of subdirectories in the experiment path. + + Returns: + List of Path objects for subdirectories. + """ + if not self.path.is_dir(): + return [] + return [dir_path for dir_path in self.path.iterdir() if dir_path.is_dir()] + + def select(self, key: str) -> Any: + """Select a value from the configuration using OmegaConf select. + + Args: + key: Configuration key path (e.g., "model.density"). + + Returns: + The value at the specified key path. + + Raises: + AttributeError: If the key is not found in the configuration. + """ + return OmegaConf.select(self.config, key=key) + + +class ResultAnalyzer(_BaseAnalyzer): + """Analyzer for a single Hydra run result. + + This class analyzes the output of a single Hydra experiment run, + including reading configuration, loading data files, and extracting + reporter information. + + Attributes: + data: Raw data loaded from CSV or datacollector output. + configs: Full configuration dictionary. + model_reporter: Model-level reporter configuration. + agent_reporter: Agent-level reporter configuration. + final_reporter: Final reporter configuration. + """ + + def __init__(self, path: PathLike) -> None: + """Initialize the result analyzer. + + Args: + path: Path to the single run output directory. + + Raises: + FileNotFoundError: If the path is not a valid directory. + """ + # Initialize attributes + self.configs: Dict[str, Any] = {} + self.model_reporter: Dict[str, Any] = {} + self.agent_reporter: Dict[str, Dict[str, Any]] = {} + self.final_reporter: Dict[str, Any] = {} + + super().__init__(path=path) + if not self.path.is_dir(): + raise FileNotFoundError(f"{path} is not a directory.") + self._hydra = self.path / ".hydra" + if self._hydra.is_dir(): + self.config = self._hydra / "config.yaml" + self._load_hydra_cfg(self._hydra) + else: + # If no .hydra directory, try to find config.yaml in the path + config_path = self.path / "config.yaml" + if config_path.is_file(): + self.config = config_path + self._load_hydra_cfg(self.path) + else: + raise FileNotFoundError( + f"No configuration found in {path}. Expected .hydra/config.yaml or config.yaml" + ) + self.read_data() + + @property + def data(self) -> pd.DataFrame: + """Raw data loaded from CSV or datacollector output. + + Returns: + DataFrame containing the raw data. + + Raises: + AttributeError: If data has not been loaded yet. + """ + return self._data + + @data.setter + def data(self, data: pd.DataFrame) -> None: + """Set the data property. + + Args: + data: DataFrame to set as the data. + + Raises: + TypeError: If data is not a DataFrame. + """ + self._check_data(data) + self._data = data + + @staticmethod + def _check_data(data: pd.DataFrame) -> None: + """Check if data is a valid DataFrame. + + Args: + data: Data to check. + + Raises: + TypeError: If data is not a DataFrame. + """ + if not isinstance(data, pd.DataFrame): + raise TypeError(f"{type(data)} is not a DataFrame.") + + def _load_hydra_cfg(self, path: PathLike) -> None: + """Load Hydra configuration and extract reporter information. + + Args: + path: Path to the directory containing config.yaml. + """ + config_path = Path(path) / "config.yaml" + if not config_path.is_file(): + # Try alternative locations + config_path = Path(path).parent / "config.yaml" + if not config_path.is_file(): + logger.warning(f"Could not find config.yaml in {path}") + self.configs = {} + self.model_reporter = {} + self.agent_reporter = {} + self.final_reporter = {} + return + + with open(config_path, "r", encoding="utf-8") as f: + loaded_configs = yaml.safe_load(f) + self.configs = loaded_configs if loaded_configs is not None else {} + + # Extract reporter information (support both 'reports' and 'tracker') + reporters: Dict[str, Any] = self.configs.get("reports", {}) or self.configs.get( + "tracker", {} + ) + self.model_reporter = reporters.get("model", {}) + self.agent_reporter = reporters.get("agents", {}) + self.final_reporter = reporters.get("final", {}) + + def read_data(self) -> None: + """Read data from CSV files or datacollector output. + + This method attempts to find and load data files in the following order: + 1. Common CSV filenames (cities.csv, 1_cities.csv, etc.) + 2. Datacollector output files if available + 3. User-specified files + + Raises: + FileNotFoundError: If no data file is found. + """ + # Try common CSV filenames + common_names = ["cities.csv", "1_cities.csv", "data.csv", "results.csv"] + for name in common_names: + csv_path = self.path / name + if csv_path.is_file(): + self.data = self.read_csv(csv_path) + logger.info(f"Loaded data from {csv_path}.") + return + + # Try to find any CSV file in the directory + csv_files = list(self.path.glob("*.csv")) + if csv_files: + # Use the first CSV file found + self.data = self.read_csv(csv_files[0]) + logger.info(f"Loaded data from {csv_files[0]}.") + return + + # If no CSV found, try to load from datacollector output + # This would require the datacollector to have saved its output + # For now, we'll raise an error + raise FileNotFoundError( + f"No data file found in {self.path}. " + f"Expected CSV files or datacollector output." + ) + + def read_csv(self, path: PathLike) -> pd.DataFrame: + """Read a CSV file into a DataFrame. + + Args: + path: Path to the CSV file. + + Returns: + DataFrame containing the CSV data. + + Raises: + FileNotFoundError: If the file does not exist or is invalid. + """ + if isinstance(path, str): + path = Path(path) + if not path.is_file(): + raise FileNotFoundError(f"CSV file not found: {path}") + if path.suffix != ".csv": + raise FileNotFoundError(f"File is not a CSV: {path}") + + # Try reading with index_col=0, fallback to no index + try: + return pd.read_csv(path, index_col=0) + except (ValueError, IndexError): + return pd.read_csv(path) + + @lru_cache(maxsize=1) + def get_data(self, **kwargs: Any) -> pd.DataFrame: + """Get processed data with optional transformations. + + This method can be overridden or extended to support different + aggregation levels or data transformations. + + Args: + **kwargs: Additional arguments for data processing. + + Returns: + Processed DataFrame. + """ + return self.data.copy() + + def select(self, key: str) -> Any: + """Select a value from the configuration. + + Args: + key: Configuration key path. + + Returns: + The value at the specified key path. + """ + return OmegaConf.select(self.config, key=key) + + +class ExpAnalyzer(_BaseAnalyzer): + """Analyzer for a group of Hydra multirun experiment results. + + This class analyzes multiple experiment runs from a Hydra multirun, + including parsing configuration overrides, aggregating data, and + comparing differences between runs. + + Attributes: + overrides: Dictionary of configuration overrides from multirun.yaml. + results: Generator yielding ResultAnalyzer for each run. + """ + + def __init__(self, path: PathLike, enable_logger: bool = True) -> None: + """Initialize the experiment analyzer. + + Args: + path: Path to the multirun output directory. + enable_logger: Whether to enable logging (default: True). + """ + super().__init__(path=path) + multirun_config = self.path / "multirun.yaml" + if multirun_config.is_file(): + self.config = multirun_config + else: + # Try alternative location + multirun_config = self.path.parent / "multirun.yaml" + if multirun_config.is_file(): + self.config = multirun_config + else: + if enable_logger: + logger.warning( + f"multirun.yaml not found in {self.path}. " + f"Some features may not work correctly." + ) + # Create an empty config + self._config = OmegaConf.create({}) + + @property + def overrides(self) -> Dict[str, List[str]]: + """Configuration overrides from multirun.yaml. + + Parses the hydra.overrides.task section to extract parameter + overrides and their values. + + Returns: + Dictionary mapping parameter names to lists of values. + """ + try: + overrides_lst = OmegaConf.select(self.config, "hydra.overrides.task") + if overrides_lst is None: + return {} + + # Convert OmegaConf object to native Python types + if hasattr(OmegaConf, "to_container"): + overrides_lst = OmegaConf.to_container(overrides_lst, resolve=True) + + if not isinstance(overrides_lst, (list, tuple)): + overrides_lst = [overrides_lst] + + result: Dict[str, List[str]] = {} + for override in overrides_lst: + # Convert to string and handle different formats + override_str = str(override).strip() + + # Remove list brackets if present + if override_str.startswith("[") and override_str.endswith("]"): + override_str = override_str[1:-1] + if "=" not in override_str: + continue + + key, value = override_str.split("=", 1) + # Strip key and remove quotes + key = key.strip().strip("'\"") + # Handle comma-separated values + values = [v.strip().strip("'\"") for v in value.split(",")] + if key: # Only add if key is not empty + result[key] = values + return result + except Exception as e: + logger.warning(f"Failed to parse overrides: {e}") + return {} + + @property + def results(self) -> Generator[ResultAnalyzer, None, None]: + """Generator yielding ResultAnalyzer for each run. + + Yields: + ResultAnalyzer instance for each subdirectory in the multirun output. + """ + for subdir in self.subdir: + try: + yield ResultAnalyzer(subdir) + except (FileNotFoundError, ValueError) as e: + logger.warning(f"Skipping {subdir}: {e}") + continue + + @cached_property + def _results_list(self) -> List[ResultAnalyzer]: + """Cached list of ResultAnalyzer instances. + + This property caches the results to avoid generator exhaustion + when results are accessed multiple times. + """ + return list(self.results) + + @cached_property + def diff_runs(self) -> pd.DataFrame: + """DataFrame showing configuration differences between runs. + + Returns: + DataFrame with columns for each override parameter and rows + for each run, showing the actual values used. + + Raises: + NotImplementedError: If unexpected configuration values are found. + """ + runs: Dict[str, List[str]] = {} + for key, expected_values in self.overrides.items(): + if len(expected_values) <= 1: + continue + values = [] + for res in self._results_list: + try: + value = res.select(key) + if value is None: + values.append("") + else: + values.append(str(value)) + except Exception as e: + logger.warning(f"Failed to select {key} from {res.path}: {e}") + values.append("") + + # Only validate if we have non-empty values + non_empty_values = [v for v in values if v] + if non_empty_values: + # Convert expected values to strings for comparison + expected_str = [str(v) for v in expected_values] + unexpected_values = set(non_empty_values) - set(expected_str) + if unexpected_values: + raise NotImplementedError( + f"Some unexpected config values {unexpected_values} in '{key}'." + ) + # Always add the key, even if all values are empty + runs[key] = values + + if not runs: + return pd.DataFrame() + return pd.DataFrame(runs) + + @cached_property + def agg_data(self) -> pd.DataFrame: + """Aggregated data from all runs. + + This property aggregates data from all runs and adds configuration + override columns to identify each run. + + Returns: + DataFrame containing aggregated data from all runs. + + Note: + This is a cached property. To refresh, delete the attribute + or use a new instance. + """ + datasets: List[pd.DataFrame] = [] + for res in self._results_list: + try: + data = res.get_data() + # Add override columns + for key in self.overrides: + try: + value = res.select(key) + if value is not None: + data[key] = value + except Exception: + # If key not found, skip + pass + datasets.append(data) + except Exception as e: + logger.warning(f"Failed to get data from {res.path}: {e}") + continue + + if not datasets: + return pd.DataFrame() + return pd.concat(datasets, ignore_index=True) + + def apply(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> pd.Series: + """Apply a function to each run's ResultAnalyzer. + + Args: + func: Function to apply. Should accept ResultAnalyzer as first argument. + *args: Additional positional arguments for the function. + **kwargs: Additional keyword arguments for the function. + + Returns: + Series with results from applying the function to each run. + """ + results = [] + for run in self._results_list: + try: + result = func(run, *args, **kwargs) + results.append(result) + except Exception as e: + logger.warning(f"Failed to apply {func.__name__} to {run.path}: {e}") + results.append(None) + + return pd.Series(results, name=func.__name__) diff --git a/abses/utils/datacollector.py b/abses/utils/datacollector.py index 60d85a41..8acfbbbd 100644 --- a/abses/utils/datacollector.py +++ b/abses/utils/datacollector.py @@ -9,6 +9,7 @@ from __future__ import annotations +import logging from typing import ( TYPE_CHECKING, Any, @@ -23,7 +24,6 @@ import numpy as np import pandas as pd -from loguru import logger if TYPE_CHECKING: from abses.agents.actor import Actor @@ -42,6 +42,8 @@ ReporterDict: TypeAlias = Dict[str, Reporter] ReportType: TypeAlias = Literal["model", "agents", "final"] | str +logger = logging.getLogger(__name__) + def _getattr_to_reporter( attribute_name: str, diff --git a/abses/utils/exp_logging.py b/abses/utils/exp_logging.py new file mode 100644 index 00000000..00dd4671 --- /dev/null +++ b/abses/utils/exp_logging.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# -*-coding:utf-8 -*- +# @Author : Shuang (Twist) Song +# @Contact : SongshGeo@gmail.com +# GitHub : https://github.com/SongshGeo +# Website: https://cv.songshgeo.com/ + +""" +Experiment-level logging configuration. + +Separates experiment-level logging from model run-level logging. +""" + +from __future__ import annotations + +import logging +from pathlib import Path +from typing import TYPE_CHECKING, Optional + +from omegaconf import DictConfig, OmegaConf + +from abses.utils.log_config import ( + DEFAULT_DATEFMT, + DEFAULT_FORMAT, + DEFAULT_LEVEL, + create_console_handler, + create_file_handler, +) +from abses.utils.log_parser import ( + get_file_config, + get_log_mode, + get_stdout_config, +) + +if TYPE_CHECKING: + pass + +# Experiment-level logger name +EXP_LOGGER_NAME = "abses.core.experiment" + + +def setup_exp_logger( + cfg: DictConfig | dict, logging_mode: Optional[str] = None +) -> logging.Logger: + """Setup experiment-level logger. + + This logger is separate from model run loggers and should only + log experiment-level information (progress, summaries, etc.). + + Args: + cfg: Configuration dictionary. + logging_mode: Logging mode - 'once', 'separate', or 'merge'. + If None, reads from config. + + Returns: + Configured experiment logger. + """ + logger = logging.getLogger(EXP_LOGGER_NAME) + logger.setLevel(logging.INFO) + logger.propagate = False # Don't propagate to avoid mixing with model loggers + + # Clear existing handlers to ensure clean state + logger.handlers.clear() + + # Ensure parent loggers don't add handlers + parent_logger = logging.getLogger("abses.core") + parent_logger.propagate = False + parent_logger.handlers.clear() + + # Get logging mode + if logging_mode is None: + logging_mode = get_log_mode(cfg) + + # Get experiment-level logging configuration + exp_stdout = get_stdout_config(cfg, "exp") + exp_file = get_file_config(cfg, "exp") + + # Setup stdout handler + if exp_stdout: + stdout_handler = create_console_handler( + level=exp_stdout.get("level", DEFAULT_LEVEL), + fmt=exp_stdout.get("format", DEFAULT_FORMAT), + datefmt=exp_stdout.get("datefmt", DEFAULT_DATEFMT), + ) + logger.addHandler(stdout_handler) + # Note: We don't add a default stdout handler if exp_stdout is disabled + # This allows users to have file-only logging for experiments + + # Setup file handler + if exp_file: + # Determine log file name + # Check if name was explicitly set in the config + if isinstance(cfg, dict): + exp_file_cfg_raw = cfg.get("log", {}).get("exp", {}).get("file", {}) + else: + try: + exp_file_cfg_raw = OmegaConf.select(cfg, "log.exp.file", default={}) + except Exception: + exp_file_cfg_raw = {} + + name_explicitly_set = ( + isinstance(exp_file_cfg_raw, dict) and "name" in exp_file_cfg_raw + ) or (isinstance(exp_file_cfg_raw, DictConfig) and "name" in exp_file_cfg_raw) + + exp_file_name = exp_file.get("name", "experiment.log") + if ( + logging_mode == "separate" + and not name_explicitly_set + and exp_file_name == "experiment.log" + ): + # In separate mode, if name not explicitly set, use run.file.name + run_file_cfg = get_file_config(cfg, "run") + if run_file_cfg: + log_name = str(run_file_cfg.get("name", "model")).replace(".log", "") + exp_file_name = f"{log_name}.log" + + # Get output path + if isinstance(cfg, dict): + outpath = cfg.get("outpath") + else: + # For DictConfig, use OmegaConf.select + try: + outpath = OmegaConf.select(cfg, "outpath", default=None) + except Exception: + outpath = None + + if outpath is None: + outpath = Path.cwd() + elif isinstance(outpath, str): + outpath = Path(outpath) + elif not isinstance(outpath, Path): + outpath = Path(str(outpath)) + + file_path = outpath / exp_file_name + + file_handler = create_file_handler( + filepath=file_path, + level=exp_file.get("level", DEFAULT_LEVEL), + fmt=exp_file.get("format", DEFAULT_FORMAT), + datefmt=exp_file.get("datefmt", DEFAULT_DATEFMT), + rotation=exp_file.get("rotation", None), + retention=exp_file.get("retention", None), + ) + logger.addHandler(file_handler) + elif logging_mode == "separate": + # In separate mode, if exp.file is not enabled, create experiment log file using run.file.name + run_file_cfg = get_file_config(cfg, "run") + log_name = ( + str(run_file_cfg.get("name", "model")).replace(".log", "") + if run_file_cfg + else "model" + ) + + # Get output path + if isinstance(cfg, dict): + outpath = cfg.get("outpath") + else: + # For DictConfig, use OmegaConf.select + try: + outpath = OmegaConf.select(cfg, "outpath", default=None) + except Exception: + outpath = None + + if outpath is None: + outpath = Path.cwd() + elif isinstance(outpath, str): + outpath = Path(outpath) + elif not isinstance(outpath, Path): + outpath = Path(str(outpath)) + + file_path = outpath / f"{log_name}.log" + + file_handler = create_file_handler( + filepath=file_path, + level=DEFAULT_LEVEL, + fmt=DEFAULT_FORMAT, + datefmt=DEFAULT_DATEFMT, + rotation=None, + retention=None, + ) + logger.addHandler(file_handler) + + return logger diff --git a/abses/utils/hydra_logging.py b/abses/utils/hydra_logging.py new file mode 100644 index 00000000..dc2feda8 --- /dev/null +++ b/abses/utils/hydra_logging.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# -*-coding:utf-8 -*- +# @Author : Shuang (Twist) Song +# @Contact : SongshGeo@gmail.com +# GitHub : https://github.com/SongshGeo +# Website: https://cv.songshgeo.com/ + +""" +Hydra logging configuration utilities. + +Generates Hydra job_logging configuration from log.hydra settings. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Dict + +from omegaconf import DictConfig + +from abses.utils.log_parser import get_stdout_config + +if TYPE_CHECKING: + pass + + +def generate_hydra_job_logging(cfg: DictConfig | Dict[str, Any]) -> Dict[str, Any]: + """Generate Hydra job_logging configuration from log.hydra settings. + + Args: + cfg: Configuration dictionary. + + Returns: + Dictionary with Hydra job_logging configuration. + """ + hydra_stdout = get_stdout_config(cfg, "hydra") + + # Default configuration + default_format = "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s" + default_datefmt = "%H:%M:%S" + default_level = "WARNING" + + if hydra_stdout: + format_str = hydra_stdout.get("format", default_format) + datefmt = hydra_stdout.get("datefmt", default_datefmt) + level = hydra_stdout.get("level", default_level) + else: + format_str = default_format + datefmt = default_datefmt + level = default_level + + return { + "version": 1, + "formatters": { + "simple": { + "format": format_str, + "datefmt": datefmt, + } + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "level": level, + "formatter": "simple", + "stream": "ext://sys.stderr", + } + }, + "root": { + "level": "INFO", + "handlers": ["console"], + }, + "disable_existing_loggers": False, + } diff --git a/abses/utils/log_config.py b/abses/utils/log_config.py index e27ed448..e749af9c 100644 --- a/abses/utils/log_config.py +++ b/abses/utils/log_config.py @@ -28,11 +28,19 @@ SIMPLE_FORMAT = "%(message)s" DATE_FORMAT = "%H:%M:%S" +# Default values (matching log_parser.py) +DEFAULT_FORMAT = ABSES_FORMAT +DEFAULT_DATEFMT = DATE_FORMAT +DEFAULT_LEVEL = "INFO" + # Logger names ABSES_LOGGER_NAME = "abses" MESA_LOGGER_NAME = "mesa" MESA_FULL_LOGGER_NAME = "MESA" # Mesa 3.3.0 uses uppercase MESA prefix +# Sentinel for log_file_path to indicate "use default" +_LOG_FILE_PATH_DEFAULT = object() + def get_abses_logger(name: str = ABSES_LOGGER_NAME) -> logging.Logger: """Get ABSESpy logger instance. @@ -69,29 +77,34 @@ def configure_root_logger(level: str = "INFO") -> None: def create_console_handler( - level: str = "WARNING", - fmt: str = ABSES_FORMAT, + level: str = DEFAULT_LEVEL, + fmt: Optional[str] = None, + datefmt: Optional[str] = None, ) -> logging.StreamHandler: """Create console handler for logging. Args: level: Logging level. - fmt: Format string. + fmt: Format string (defaults to DEFAULT_FORMAT). + datefmt: Date format string (defaults to DEFAULT_DATEFMT). Returns: Configured console handler. """ handler = logging.StreamHandler(sys.stderr) handler.setLevel(level) - formatter = logging.Formatter(fmt, datefmt=DATE_FORMAT) + formatter = logging.Formatter( + fmt or DEFAULT_FORMAT, datefmt=datefmt or DEFAULT_DATEFMT + ) handler.setFormatter(formatter) return handler def create_file_handler( filepath: Path, - level: str = "INFO", - fmt: str = ABSES_FORMAT, + level: str = DEFAULT_LEVEL, + fmt: Optional[str] = None, + datefmt: Optional[str] = None, rotation: Optional[str] = None, retention: Optional[str] = None, ) -> logging.Handler: @@ -100,7 +113,8 @@ def create_file_handler( Args: filepath: Path to log file. level: Logging level. - fmt: Format string. + fmt: Format string (defaults to DEFAULT_FORMAT). + datefmt: Date format string (defaults to DEFAULT_DATEFMT). rotation: Rotation interval (e.g., "1 day", "100 MB"). retention: Retention period (e.g., "10 days"). @@ -141,18 +155,24 @@ def create_file_handler( handler = logging.FileHandler(filepath) handler.setLevel(level) - formatter = logging.Formatter(fmt, datefmt=DATE_FORMAT) + formatter = logging.Formatter( + fmt or DEFAULT_FORMAT, datefmt=datefmt or DEFAULT_DATEFMT + ) handler.setFormatter(formatter) return handler def setup_abses_logger( name: str = ABSES_LOGGER_NAME, - level: str = "INFO", + level: str = DEFAULT_LEVEL, console: bool = True, - console_level: str = "WARNING", + console_level: Optional[str] = None, + console_format: Optional[str] = None, + console_datefmt: Optional[str] = None, file_path: Optional[Path] = None, - file_level: str = "INFO", + file_level: Optional[str] = None, + file_format: Optional[str] = None, + file_datefmt: Optional[str] = None, rotation: Optional[str] = None, retention: Optional[str] = None, ) -> logging.Logger: @@ -160,11 +180,15 @@ def setup_abses_logger( Args: name: Logger name. - level: Logger level. + level: Logger level (used if console_level/file_level not specified). console: Whether to add console handler. - console_level: Console handler level. + console_level: Console handler level (defaults to level). + console_format: Console format string (defaults to DEFAULT_FORMAT). + console_datefmt: Console date format (defaults to DEFAULT_DATEFMT). file_path: Path to log file (if None, no file handler). - file_level: File handler level. + file_level: File handler level (defaults to level). + file_format: File format string (defaults to DEFAULT_FORMAT). + file_datefmt: File date format (defaults to DEFAULT_DATEFMT). rotation: Rotation interval for file handler. retention: Retention period for file handler. @@ -180,14 +204,19 @@ def setup_abses_logger( # Add console handler if console: - handler = create_console_handler(console_level) + handler = create_console_handler( + level=console_level or level, + fmt=console_format or DEFAULT_FORMAT, + datefmt=console_datefmt or DEFAULT_DATEFMT, + ) logger.addHandler(handler) # Add file handler if file_path: handler = create_file_handler( - file_path, - level=file_level, + filepath=file_path, + level=file_level or level, + fmt=file_format or DEFAULT_FORMAT, rotation=rotation, retention=retention, ) @@ -196,17 +225,61 @@ def setup_abses_logger( return logger -def setup_mesa_logger( +def determine_log_file_path( + outpath: Optional[Path], + log_name: str, + logging_mode: str = "once", + repeat_id: Optional[int] = None, +) -> Optional[Path]: + """Determine log file path based on logging mode. + + Args: + outpath: Output directory for log files. + log_name: Base log file name (without extension). + logging_mode: Logging mode - 'once', 'separate', or 'merge'. + repeat_id: Repeat ID for the current run (1-indexed). + + Returns: + Path to log file, or None if logging should be disabled. + """ + if not outpath: + return None + + # Clean log name (remove .log extension if present) + log_name = str(log_name).replace(".log", "") + + if logging_mode == "once": + # Only log the first repeat + if repeat_id is None or repeat_id == 1: + return outpath / f"{log_name}.log" + return None + elif logging_mode == "separate": + # Each repeat gets its own file + # In separate mode, repeat_id must be provided + if repeat_id is None: + return None # Don't create default file in separate mode + return outpath / f"{log_name}_{repeat_id}.log" + elif logging_mode == "merge": + # All repeats go to the same file + return outpath / f"{log_name}.log" + else: + # Unknown mode, default to once behavior + if repeat_id is None or repeat_id == 1: + return outpath / f"{log_name}.log" + return None + + +def configure_mesa_logger_with_format( level: str = "INFO", handlers: Optional[list[logging.Handler]] = None, + mesa_format: Optional[str] = None, ) -> tuple[logging.Logger, logging.Logger]: - """Setup Mesa loggers to integrate with ABSESpy logging. - - Mesa 3.3.0 uses both 'mesa' and 'MESA' logger names. + """Configure Mesa loggers with custom format. Args: level: Logging level for Mesa. - handlers: Handlers to attach (if None, inherits from parent). + handlers: Handlers to attach (if None, creates new handlers with format). + mesa_format: Custom format string for Mesa loggers. If None, uses ABSES_FORMAT. Returns: Tuple of (mesa_logger, MESA_logger). @@ -219,7 +292,15 @@ def setup_mesa_logger( mesa_upper_logger = logging.getLogger(MESA_FULL_LOGGER_NAME) mesa_upper_logger.setLevel(level) + # Use custom format if provided, otherwise use ABSES format + format_str = mesa_format if mesa_format is not None else ABSES_FORMAT + if handlers: + # Apply format to existing handlers + formatter = logging.Formatter(format_str, datefmt=DATE_FORMAT) + for handler in handlers: + handler.setFormatter(formatter) + # Configure both loggers for logger_obj in [mesa_logger, mesa_upper_logger]: logger_obj.propagate = False @@ -234,48 +315,113 @@ def setup_mesa_logger( return mesa_logger, mesa_upper_logger +def setup_mesa_logger( + level: str = "INFO", + handlers: Optional[list[logging.Handler]] = None, + mesa_format: Optional[str] = None, +) -> tuple[logging.Logger, logging.Logger]: + """Setup Mesa loggers to integrate with ABSESpy logging. + + Mesa 3.3.0 uses both 'mesa' and 'MESA' logger names. + + Args: + level: Logging level for Mesa. + handlers: Handlers to attach (if None, inherits from parent). + mesa_format: Custom format string for Mesa loggers. If None, uses ABSES_FORMAT. + + Returns: + Tuple of (mesa_logger, MESA_logger). + """ + return configure_mesa_logger_with_format( + level=level, handlers=handlers, mesa_format=mesa_format + ) + + def setup_integrated_logging( abses_logger_name: str = ABSES_LOGGER_NAME, - level: str = "INFO", + level: str = DEFAULT_LEVEL, outpath: Optional[Path] = None, log_name: str = "abses", console: bool = True, + console_level: Optional[str] = None, + console_format: Optional[str] = None, + console_datefmt: Optional[str] = None, rotation: Optional[str] = None, retention: Optional[str] = None, + log_file_path: Optional[Path] = _LOG_FILE_PATH_DEFAULT, + file_level: Optional[str] = None, + file_format: Optional[str] = None, + file_datefmt: Optional[str] = None, + mesa_format: Optional[str] = None, + mesa_level: Optional[str] = None, ) -> tuple[logging.Logger, logging.Logger, logging.Logger]: """Setup integrated logging for ABSESpy and Mesa. Args: abses_logger_name: ABSESpy logger name. - level: Logging level. + level: Logging level (used if console_level/file_level not specified). outpath: Output directory for log files. log_name: Log file name (without extension). console: Whether to log to console. + console_level: Console handler level (defaults to level). + console_format: Console format string. + console_datefmt: Console date format string. rotation: Rotation interval. retention: Retention period. + log_file_path: Explicit log file path. If _LOG_FILE_PATH_DEFAULT, uses default from outpath/log_name. + If None, disables file logging. + file_level: File handler level (defaults to level). + file_format: File format string. + file_datefmt: File date format string. + mesa_format: Custom format string for Mesa loggers. If None, uses DEFAULT_FORMAT. + mesa_level: Logging level for Mesa loggers. If None, uses the main level. Returns: Tuple of (abses_logger, mesa_logger, mesa_upper_logger). """ # Determine file path - file_path = outpath / f"{log_name}.log" if outpath else None + if log_file_path is _LOG_FILE_PATH_DEFAULT: + # log_file_path was not provided, use default + file_path = outpath / f"{log_name}.log" if outpath else None + elif log_file_path is None: + # log_file_path was explicitly set to None, don't create file + file_path = None + else: + # log_file_path was explicitly provided + file_path = log_file_path # Setup ABSESpy logger + # Clear any existing handlers from parent loggers to prevent mixing + # This ensures experiment-level loggers don't inherit model run log handlers abses_logger = setup_abses_logger( name=abses_logger_name, level=level, console=console, - console_level="WARNING", + console_level=console_level, + console_format=console_format, + console_datefmt=console_datefmt, file_path=file_path, - file_level=level, + file_level=file_level, + file_format=file_format, + file_datefmt=file_datefmt, rotation=rotation, retention=retention, ) - # Setup Mesa loggers (both 'mesa' and 'MESA') to use same handlers + # Ensure child loggers (like abses.core.experiment) don't inherit handlers + # by setting propagate=False on parent loggers + for parent_name in ["abses.core", "abses.core.experiment"]: + parent_logger = logging.getLogger(parent_name) + parent_logger.propagate = False + # Don't clear handlers here, as they may be configured separately + + # Setup Mesa loggers (both 'mesa' and 'MESA') to use same handlers with format + # Use mesa_level if provided, otherwise use main level + mesa_log_level = mesa_level if mesa_level is not None else level mesa_logger, mesa_upper_logger = setup_mesa_logger( - level=level, + level=mesa_log_level, handlers=list(abses_logger.handlers) if abses_logger.handlers else None, + mesa_format=mesa_format, ) return abses_logger, mesa_logger, mesa_upper_logger diff --git a/abses/utils/log_parser.py b/abses/utils/log_parser.py new file mode 100644 index 00000000..b96b99ab --- /dev/null +++ b/abses/utils/log_parser.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python3 +# -*-coding:utf-8 -*- +# @Author : Shuang (Twist) Song +# @Contact : SongshGeo@gmail.com +# GitHub : https://github.com/SongshGeo +# Website: https://cv.songshgeo.com/ + +""" +Logging configuration parser for ABSESpy. + +Parses the unified logging configuration structure and provides +access to different logging levels (hydra, exp, run). +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Dict + +from omegaconf import DictConfig, OmegaConf + +if TYPE_CHECKING: + pass + +# Default values +DEFAULT_FORMAT = "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s" +DEFAULT_DATEFMT = "%H:%M:%S" +DEFAULT_LEVEL = "INFO" + + +def get_log_mode(cfg: DictConfig | Dict[str, Any]) -> str: + """Get logging mode from configuration. + + Args: + cfg: Configuration dictionary. + + Returns: + Logging mode: 'once', 'separate', or 'merge'. Defaults to 'once'. + """ + # Handle both DictConfig and plain dict + if isinstance(cfg, DictConfig): + # Check if log.mode was explicitly set (not defaulted) + mode = OmegaConf.select(cfg, "log.mode") + if mode is None: + # Key doesn't exist, use default and check backward compat + mode = "once" + old_mode = OmegaConf.select(cfg, "exp.logging.mode", default=None) + if old_mode is not None: + if isinstance(old_mode, str): + return old_mode + elif isinstance(old_mode, dict): + return old_mode.get("mode", "once") + # If mode was explicitly set, return it without checking old config + return mode if mode else "once" + else: + # Plain dict: use dict.get() with nested access + log_section = cfg.get("log", {}) + if isinstance(log_section, dict): + # Check if "mode" key exists explicitly + if "mode" in log_section: + mode = log_section.get("mode", "once") + # Explicitly set, return it without checking old config + return mode if mode else "once" + else: + # Key doesn't exist, use default and check backward compat + mode = "once" + else: + mode = "once" + # Backward compatibility: check exp.logging.mode only if mode wasn't explicitly set + exp_section = cfg.get("exp", {}) + if isinstance(exp_section, dict): + logging_section = exp_section.get("logging", {}) + if isinstance(logging_section, dict): + old_mode = logging_section.get("mode") + elif isinstance(logging_section, str): + old_mode = logging_section + else: + old_mode = None + if old_mode is not None: + if isinstance(old_mode, str): + return old_mode + elif isinstance(old_mode, dict): + return old_mode.get("mode", "once") + + return mode if mode else "once" + + +def get_log_config( + cfg: DictConfig | Dict[str, Any], level: str = "run" +) -> Dict[str, Any]: + """Get logging configuration for a specific level. + + Args: + cfg: Configuration dictionary. + level: Logging level - 'hydra', 'exp', or 'run'. + + Returns: + Dictionary with logging configuration for the specified level. + """ + # Handle both DictConfig and plain dict + if isinstance(cfg, DictConfig): + log_cfg = OmegaConf.select(cfg, f"log.{level}", default={}) + else: + # Plain dict: use dict.get() with nested access + log_section = cfg.get("log", {}) + log_cfg = log_section.get(level, {}) if isinstance(log_section, dict) else {} + + # Handle backward compatibility for old log structure + if not log_cfg and level == "run": + # Try to read from old log structure + if isinstance(cfg, DictConfig): + old_log = OmegaConf.select(cfg, "log", default={}) + else: + old_log = cfg.get("log", {}) + + if not old_log or not isinstance(old_log, dict): + return {} + + # Map old structure to new structure + result = { + "stdout": { + "enabled": old_log.get("console", False), + "level": old_log.get("level", DEFAULT_LEVEL), + "format": DEFAULT_FORMAT, + "datefmt": DEFAULT_DATEFMT, + }, + "file": { + "enabled": True, # Assume enabled if log section exists + "name": old_log.get("name", "model"), + "level": old_log.get("level", DEFAULT_LEVEL), + "format": DEFAULT_FORMAT, + "datefmt": DEFAULT_DATEFMT, + "rotation": old_log.get("rotation", None), + "retention": old_log.get("retention", None), + }, + } + + # Handle MESA config + mesa_cfg = old_log.get("mesa", {}) + if isinstance(mesa_cfg, dict): + result["mesa"] = { + "level": mesa_cfg.get("level", None), + "format": mesa_cfg.get("format", None), + } + else: + result["mesa"] = {"level": None, "format": None} + + return result + + # Convert DictConfig to dict if needed + if isinstance(log_cfg, DictConfig): + log_cfg = OmegaConf.to_container(log_cfg, resolve=True) + + if not isinstance(log_cfg, dict): + return {} + + return log_cfg + + +def get_stdout_config( + cfg: DictConfig | Dict[str, Any], level: str = "run" +) -> Dict[str, Any]: + """Get stdout logging configuration for a specific level. + + Args: + cfg: Configuration dictionary. + level: Logging level - 'hydra', 'exp', or 'run'. + + Returns: + Dictionary with stdout configuration, or empty dict if disabled. + """ + log_cfg = get_log_config(cfg, level) + stdout_cfg = log_cfg.get("stdout", {}) + + if isinstance(stdout_cfg, dict): + enabled = stdout_cfg.get("enabled", False) + if not enabled: + return {} + return { + "enabled": True, + "level": stdout_cfg.get("level", DEFAULT_LEVEL), + "format": stdout_cfg.get("format", DEFAULT_FORMAT), + "datefmt": stdout_cfg.get("datefmt", DEFAULT_DATEFMT), + } + return {} + + +def get_file_config( + cfg: DictConfig | Dict[str, Any], level: str = "run" +) -> Dict[str, Any]: + """Get file logging configuration for a specific level. + + Args: + cfg: Configuration dictionary. + level: Logging level - 'hydra', 'exp', or 'run'. + + Returns: + Dictionary with file configuration, or empty dict if disabled. + """ + log_cfg = get_log_config(cfg, level) + file_cfg = log_cfg.get("file", {}) + + if isinstance(file_cfg, dict): + enabled = file_cfg.get("enabled", True) # Default to enabled for file + if not enabled: + return {} + # Default name depends on level: "experiment.log" for exp, "model" for run + default_name = "experiment.log" if level == "exp" else "model" + return { + "enabled": True, + "name": file_cfg.get("name", default_name), + "level": file_cfg.get("level", DEFAULT_LEVEL), + "format": file_cfg.get("format", DEFAULT_FORMAT), + "datefmt": file_cfg.get("datefmt", DEFAULT_DATEFMT), + "rotation": file_cfg.get("rotation", None), + "retention": file_cfg.get("retention", None), + } + return {} + + +def get_mesa_config( + cfg: DictConfig | Dict[str, Any], level: str = "run" +) -> Dict[str, Any]: + """Get MESA logging configuration. + + Args: + cfg: Configuration dictionary. + level: Logging level (usually 'run' for MESA). + + Returns: + Dictionary with MESA configuration. + """ + log_cfg = get_log_config(cfg, level) + mesa_cfg = log_cfg.get("mesa", {}) + + if isinstance(mesa_cfg, dict): + return { + "level": mesa_cfg.get("level", None), + "format": mesa_cfg.get("format", None), + } + return {"level": None, "format": None} diff --git a/abses/utils/logging.py b/abses/utils/logging.py index ebee7765..3cc52829 100644 --- a/abses/utils/logging.py +++ b/abses/utils/logging.py @@ -21,6 +21,7 @@ from abses.utils.log_config import ( ABSES_LOGGER_NAME, LoggerAdapter, + determine_log_file_path, get_abses_logger, setup_integrated_logging, ) @@ -85,13 +86,37 @@ def setup_logger_info( logger.bind(no_format=True).info(f"Exp environment: {is_exp_env}\n") +def log_repeat_separator(repeat_id: int, total_repeats: int) -> None: + """Log a separator for a new repeat run in merge mode. + + Args: + repeat_id: Current repeat ID (1-indexed). + total_repeats: Total number of repeats. + """ + separator = "\n" + "=" * 60 + "\n" + header = f"Repeat {repeat_id}/{total_repeats}".center(60) + "\n" + footer = "=" * 60 + "\n" + logger.bind(no_format=True).info(separator + header + footer) + + def setup_model_logger( name: str = "model", level: str = "INFO", outpath: Optional[Path] = None, console: bool = True, + console_level: Optional[str] = None, + console_format: Optional[str] = None, + console_datefmt: Optional[str] = None, rotation: Optional[str] = None, retention: Optional[str] = None, + log_file_path: Optional[Path] = None, + logging_mode: str = "once", + repeat_id: Optional[int] = None, + file_level: Optional[str] = None, + file_format: Optional[str] = None, + file_datefmt: Optional[str] = None, + mesa_format: Optional[str] = None, + mesa_level: Optional[str] = None, ) -> tuple[logging.Logger, logging.Logger, logging.Logger]: """Setup logging for a model run. @@ -99,11 +124,22 @@ def setup_model_logger( Args: name: Log file name. - level: Logging level. + level: Logging level (used if console_level/file_level not specified). outpath: Output directory for log files. console: Whether to log to console. + console_level: Console handler level (defaults to level). + console_format: Console format string. + console_datefmt: Console date format string. rotation: Rotation interval (e.g., "1 day", "100 MB"). retention: Retention period (e.g., "10 days"). + log_file_path: Explicit log file path (overrides automatic path determination). + logging_mode: Logging mode - 'once', 'separate', or 'merge'. + repeat_id: Repeat ID for the current run (1-indexed). + file_level: File handler level (defaults to level). + file_format: File format string. + file_datefmt: File date format string. + mesa_format: Custom format string for Mesa loggers. If None, uses ABSESpy format. + mesa_level: Logging level for Mesa loggers. If None, uses the main level. Returns: Tuple of (abses_logger, mesa_logger, mesa_upper_logger). @@ -112,14 +148,32 @@ def setup_model_logger( if outpath and not isinstance(outpath, Path): outpath = Path(outpath) + # Determine log file path if not explicitly provided + if log_file_path is None: + log_file_path = determine_log_file_path( + outpath=outpath, + log_name=name, + logging_mode=logging_mode, + repeat_id=repeat_id, + ) + # Setup integrated logging abses_logger, mesa_logger, mesa_upper_logger = setup_integrated_logging( level=level, outpath=outpath, log_name=name, console=console, + console_level=console_level, + console_format=console_format, + console_datefmt=console_datefmt, rotation=rotation, retention=retention, + log_file_path=log_file_path, + file_level=file_level, + file_format=file_format, + file_datefmt=file_datefmt, + mesa_format=mesa_format, + mesa_level=mesa_level, ) return abses_logger, mesa_logger, mesa_upper_logger @@ -130,6 +184,7 @@ def setup_model_logger( "logger", "formatter", "log_session", + "log_repeat_separator", "setup_logger_info", "setup_model_logger", "FORMAT", diff --git a/abses/utils/tracker/factory.py b/abses/utils/tracker/factory.py index 9c7c1861..6ce49b9c 100644 --- a/abses/utils/tracker/factory.py +++ b/abses/utils/tracker/factory.py @@ -225,7 +225,7 @@ def create_tracker( model_name=model.name, version=model.version, run_id=model._run_id, - model_params=model.params, + model_params=model.settings, ) return tracker diff --git a/docs/api/analysis.md b/docs/api/analysis.md new file mode 100644 index 00000000..5abf2286 --- /dev/null +++ b/docs/api/analysis.md @@ -0,0 +1,10 @@ +--- +title: Analysis +authors: SongshGeo +date: 2024-12-20 +--- + +:::abses.utils.analysis.ResultAnalyzer + +:::abses.utils.analysis.ExpAnalyzer + diff --git a/makefile b/makefile index c4ffe256..53fc37a3 100644 --- a/makefile +++ b/makefile @@ -188,16 +188,20 @@ test-all: @echo "🧪 Running Complete Test Suite (Including Notebooks and Multi-version)..." @echo "Running standard tests..." uv run pytest tests/ -vs --clean-alluredir --alluredir tmp/allure_results --cov=abses --no-cov-on-fail + @echo "Installing docs dependencies for notebook tests..." + @uv sync --group docs || echo "⚠️ Failed to install docs dependencies" @echo "Running notebook tests..." uv run pytest --nbmake docs/tutorial/**/*.ipynb -v --tb=short || echo "⚠️ Some notebook tests may have failed (this is acceptable for documentation notebooks)" @echo "Running multi-version tests with tox..." - uv run --with tox tox -p auto || echo "⚠️ Multi-version tests completed with warnings" + @echo "⚠️ Note: tox may have issues with uv-managed Python environments. If it fails, consider using system Python for tox." + tox -p auto || echo "⚠️ Multi-version tests completed with warnings" @echo "✅ All tests completed!" # 仅运行 tox 多版本测试 test-tox: @echo "🔄 Running Multi-version Tests with Tox..." - uv run --with tox tox -p auto + @echo "⚠️ Note: tox uses system Python interpreters. Make sure python3.11, python3.12, python3.13 are available in PATH." + tox -p auto # 仅运行 notebook 测试(包括所有 ipynb 文件) test-all-notebooks: diff --git a/mkdocs.yml b/mkdocs.yml index 340afad7..1759eb17 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -64,6 +64,7 @@ nav: - Time Control: api/time.md - Random: api/random.md - Experiment: api/experiment.md + - Analysis: api/analysis.md theme: name: "material" diff --git a/pyproject.toml b/pyproject.toml index 15c7b8f3..88989737 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ line_length = 79 [project] name = "abses" -version = "0.9.0" +version = "0.10.0" description = "ABSESpy makes it easier to build artificial Social-ecological systems with real GeoSpatial datasets and fully incorporate human behaviour." authors = [{name = "Shuang Song", email = "songshgeo@gmail.com"}] license = {text = "Apache 2.0 License"} @@ -30,7 +30,6 @@ dependencies = [ "mesa-geo>=0.9.1", "xarray>=2023", "fiona>1.8", - "loguru>=0.7", "rioxarray>=0.13", "pendulum>=3.0.0", "geopandas>=0,<1", diff --git a/test_colormap.py b/test_colormap.py deleted file mode 100644 index 803d6a96..00000000 --- a/test_colormap.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -"""Test colormap issue""" - -from enum import IntEnum - -import numpy as np - - -# Test Enum to int conversion -class State(IntEnum): - EMPTY = 0 - INTACT = 1 - BURNING = 2 - SCORCHED = 3 - - -# Test with Enum keys -cmap_dict = { - State.EMPTY: "black", - State.INTACT: "green", - State.BURNING: "orange", - State.SCORCHED: "red", -} - -print("Original dict keys:", list(cmap_dict.keys())) -print("Enum to int:", [int(k) for k in cmap_dict.keys()]) - -# Convert to int keys -int_cmap = {int(k): v for k, v in cmap_dict.items()} -print("Int dict:", int_cmap) - -# Test sorted categories -categories = list(int_cmap.keys()) -color_list = [int_cmap[c] for c in sorted(categories)] -print("Sorted categories:", sorted(categories)) -print("Color list:", color_list) - -# Simulate data -data = np.array([[1, 1, 1], [3, 3, 3]]) -print("\nData values:", np.unique(data)) -print("Data should map 1->green (index 1) and 3->red (index 3)") - -# The problem: ListedColormap(color_list) with 4 colors -# But data has values 1 and 3 -# Value 1 would map to index 1 (green) ✓ -# Value 3 would map to index 3 (red) ✓ -# This should work! diff --git a/tests/utils/test_analysis.py b/tests/utils/test_analysis.py new file mode 100644 index 00000000..d644e1ad --- /dev/null +++ b/tests/utils/test_analysis.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python3 +# -*-coding:utf-8 -*- +"""Tests for analysis utilities module. + +This module tests the functionality of ResultAnalyzer and ExpAnalyzer +for analyzing Hydra multirun experiment results. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +import pandas as pd +import pytest +import yaml +from omegaconf import DictConfig + +from abses.utils.analysis import ExpAnalyzer, ResultAnalyzer + +if TYPE_CHECKING: + pass + + +@pytest.fixture +def temp_multirun_dir(tmp_path: Path) -> Path: + """Create a temporary directory structure simulating Hydra multirun output. + + Args: + tmp_path: Temporary directory provided by pytest. + + Returns: + Path to the multirun directory. + """ + multirun_dir = tmp_path / "multirun_output" + multirun_dir.mkdir() + + # Create multirun.yaml + multirun_config = { + "hydra": { + "overrides": { + "task": [ + "model.density=0.5,0.7,0.9", + "model.n_agents=10,20", + ] + } + } + } + with open(multirun_dir / "multirun.yaml", "w", encoding="utf-8") as f: + yaml.dump(multirun_config, f) + + # Create subdirectories for each run + runs = [ + ("0_model.density=0.5_model.n_agents=10", {"density": 0.5, "n_agents": 10}), + ("1_model.density=0.7_model.n_agents=10", {"density": 0.7, "n_agents": 10}), + ("2_model.density=0.9_model.n_agents=10", {"density": 0.9, "n_agents": 10}), + ("3_model.density=0.5_model.n_agents=20", {"density": 0.5, "n_agents": 20}), + ("4_model.density=0.7_model.n_agents=20", {"density": 0.7, "n_agents": 20}), + ("5_model.density=0.9_model.n_agents=20", {"density": 0.9, "n_agents": 20}), + ] + + for run_name, config_values in runs: + run_dir = multirun_dir / run_name + run_dir.mkdir() + + # Create .hydra directory + hydra_dir = run_dir / ".hydra" + hydra_dir.mkdir() + + # Create config.yaml + run_config = { + "model": { + "density": config_values["density"], + "n_agents": config_values["n_agents"], + }, + "reports": { + "model": {"population": "n_agents"}, + "agents": {"City": {"wealth": "wealth"}}, + "final": {"final_population": "n_agents"}, + }, + } + with open(hydra_dir / "config.yaml", "w", encoding="utf-8") as f: + yaml.dump(run_config, f) + + # Create CSV data file + data = pd.DataFrame( + { + "Time": ["2020-01-01", "2020-01-02", "2020-01-03"], + "province": ["A", "A", "A"], + "surface": [100.0, 110.0, 120.0], + "ground": [50.0, 55.0, 60.0], + "quota": [80.0, 85.0, 90.0], + } + ) + data.to_csv(run_dir / "cities.csv", index=True) + + return multirun_dir + + +@pytest.fixture +def temp_single_run_dir(tmp_path: Path) -> Path: + """Create a temporary directory structure for a single run. + + Args: + tmp_path: Temporary directory provided by pytest. + + Returns: + Path to the single run directory. + """ + run_dir = tmp_path / "single_run" + run_dir.mkdir() + + # Create .hydra directory + hydra_dir = run_dir / ".hydra" + hydra_dir.mkdir() + + # Create config.yaml + run_config = { + "model": {"density": 0.7, "n_agents": 50}, + "reports": { + "model": {"population": "n_agents"}, + "final": {"final_population": "n_agents"}, + }, + } + with open(hydra_dir / "config.yaml", "w", encoding="utf-8") as f: + yaml.dump(run_config, f) + + # Create CSV data file + data = pd.DataFrame( + { + "Time": ["2020-01-01", "2020-01-02"], + "province": ["A", "B"], + "surface": [100.0, 200.0], + "ground": [50.0, 100.0], + } + ) + data.to_csv(run_dir / "cities.csv", index=True) + + return run_dir + + +class TestBaseAnalyzer: + """Tests for _BaseAnalyzer base class.""" + + def test_path_property(self, temp_single_run_dir: Path) -> None: + """Test path property getter and setter.""" + from abses.utils.analysis import _BaseAnalyzer + + analyzer = _BaseAnalyzer(temp_single_run_dir) + assert analyzer.path == temp_single_run_dir + assert isinstance(analyzer.path, Path) + + # Test setter with string + analyzer.path = str(temp_single_run_dir) + assert analyzer.path == temp_single_run_dir + + def test_config_property(self, temp_single_run_dir: Path) -> None: + """Test config property loading.""" + from abses.utils.analysis import _BaseAnalyzer + + analyzer = _BaseAnalyzer(temp_single_run_dir) + config_path = temp_single_run_dir / ".hydra" / "config.yaml" + analyzer.config = config_path + + assert analyzer.config is not None + assert isinstance(analyzer.config, DictConfig) + + def test_select_method(self, temp_single_run_dir: Path) -> None: + """Test select method for configuration values.""" + from abses.utils.analysis import _BaseAnalyzer + + analyzer = _BaseAnalyzer(temp_single_run_dir) + config_path = temp_single_run_dir / ".hydra" / "config.yaml" + analyzer.config = config_path + + density = analyzer.select("model.density") + assert density == 0.7 + + def test_subdir_property(self, temp_multirun_dir: Path) -> None: + """Test subdir property.""" + from abses.utils.analysis import _BaseAnalyzer + + analyzer = _BaseAnalyzer(temp_multirun_dir) + subdirs = analyzer.subdir + + assert len(subdirs) == 6 + assert all(isinstance(d, Path) for d in subdirs) + + +class TestResultAnalyzer: + """Tests for ResultAnalyzer class.""" + + def test_initialization(self, temp_single_run_dir: Path) -> None: + """Test ResultAnalyzer initialization.""" + analyzer = ResultAnalyzer(temp_single_run_dir) + + assert analyzer.path == temp_single_run_dir + assert analyzer.config is not None + assert isinstance(analyzer.data, pd.DataFrame) + + def test_initialization_invalid_path(self, tmp_path: Path) -> None: + """Test ResultAnalyzer initialization with invalid path.""" + invalid_path = tmp_path / "nonexistent" + with pytest.raises(FileNotFoundError): + ResultAnalyzer(invalid_path) + + def test_read_data(self, temp_single_run_dir: Path) -> None: + """Test data reading from CSV.""" + analyzer = ResultAnalyzer(temp_single_run_dir) + + assert not analyzer.data.empty + assert "Time" in analyzer.data.columns + assert "province" in analyzer.data.columns + + def test_read_csv(self, temp_single_run_dir: Path) -> None: + """Test CSV reading method.""" + analyzer = ResultAnalyzer(temp_single_run_dir) + csv_path = temp_single_run_dir / "cities.csv" + + data = analyzer.read_csv(csv_path) + assert isinstance(data, pd.DataFrame) + assert len(data) > 0 + + def test_get_data(self, temp_single_run_dir: Path) -> None: + """Test get_data method.""" + analyzer = ResultAnalyzer(temp_single_run_dir) + + data = analyzer.get_data() + assert isinstance(data, pd.DataFrame) + assert not data.empty + + def test_select_config(self, temp_single_run_dir: Path) -> None: + """Test configuration selection.""" + analyzer = ResultAnalyzer(temp_single_run_dir) + + density = analyzer.select("model.density") + assert density == 0.7 + + n_agents = analyzer.select("model.n_agents") + assert n_agents == 50 + + def test_reporter_extraction(self, temp_single_run_dir: Path) -> None: + """Test reporter information extraction.""" + analyzer = ResultAnalyzer(temp_single_run_dir) + + assert hasattr(analyzer, "model_reporter") + assert hasattr(analyzer, "agent_reporter") + assert hasattr(analyzer, "final_reporter") + + +class TestExpAnalyzer: + """Tests for ExpAnalyzer class.""" + + def test_initialization(self, temp_multirun_dir: Path) -> None: + """Test ExpAnalyzer initialization.""" + analyzer = ExpAnalyzer(temp_multirun_dir) + + assert analyzer.path == temp_multirun_dir + assert analyzer.config is not None + + def test_overrides_property(self, temp_multirun_dir: Path) -> None: + """Test overrides property parsing.""" + analyzer = ExpAnalyzer(temp_multirun_dir) + + overrides = analyzer.overrides + assert "model.density" in overrides + assert "model.n_agents" in overrides + assert len(overrides["model.density"]) == 3 + assert len(overrides["model.n_agents"]) == 2 + + def test_results_generator(self, temp_multirun_dir: Path) -> None: + """Test results generator.""" + analyzer = ExpAnalyzer(temp_multirun_dir) + + results = list(analyzer.results) + assert len(results) == 6 + assert all(isinstance(r, ResultAnalyzer) for r in results) + + def test_diff_runs(self, temp_multirun_dir: Path) -> None: + """Test diff_runs property.""" + analyzer = ExpAnalyzer(temp_multirun_dir) + + diff = analyzer.diff_runs + assert isinstance(diff, pd.DataFrame) + # Should have columns for each override parameter + assert "model.density" in diff.columns or len(diff) == 0 + + def test_agg_data(self, temp_multirun_dir: Path) -> None: + """Test aggregated data property.""" + analyzer = ExpAnalyzer(temp_multirun_dir) + + agg = analyzer.agg_data + assert isinstance(agg, pd.DataFrame) + # Should have data from all runs + assert len(agg) > 0 + # Should have override columns + assert "model.density" in agg.columns or len(agg) == 0 + + def test_apply_method(self, temp_multirun_dir: Path) -> None: + """Test apply method for custom functions.""" + analyzer = ExpAnalyzer(temp_multirun_dir) + + def get_data_length(result: ResultAnalyzer) -> int: + """Get the length of data in a result.""" + return len(result.data) + + results = analyzer.apply(get_data_length) + assert isinstance(results, pd.Series) + assert len(results) == 6 + assert all(r > 0 for r in results if r is not None) + + def test_empty_multirun_dir(self, tmp_path: Path) -> None: + """Test ExpAnalyzer with empty multirun directory.""" + empty_dir = tmp_path / "empty_multirun" + empty_dir.mkdir() + + analyzer = ExpAnalyzer(empty_dir) + + # Should not raise error, but overrides should be empty + assert analyzer.overrides == {} + assert len(list(analyzer.results)) == 0 diff --git a/tests/utils/test_logging.py b/tests/utils/test_logging.py new file mode 100644 index 00000000..f690e967 --- /dev/null +++ b/tests/utils/test_logging.py @@ -0,0 +1,576 @@ +#!/usr/bin/env python3 +# -*-coding:utf-8 -*- +# @Author : Shuang (Twist) Song +# @Contact : SongshGeo@gmail.com +# GitHub : https://github.com/SongshGeo +# Website: https://cv.songshgeo.com/ + +""" +测试 logging 相关功能。 + +测试内容: +1. 配置解析功能 (log_parser.py) +2. 日志设置功能 (log_config.py) +3. 实验日志设置 (exp_logging.py) +4. 模型日志设置 (logging.py) +5. 集成测试:完整的日志流程 +""" + +from __future__ import annotations + +import logging +import tempfile +from pathlib import Path + +import pytest +from omegaconf import OmegaConf + +from abses import MainModel +from abses.core.experiment import Experiment +from abses.utils.exp_logging import EXP_LOGGER_NAME, setup_exp_logger +from abses.utils.log_config import ( + DEFAULT_DATEFMT, + DEFAULT_FORMAT, + DEFAULT_LEVEL, + create_console_handler, + create_file_handler, + determine_log_file_path, + setup_abses_logger, +) +from abses.utils.log_parser import ( + get_file_config, + get_log_mode, + get_mesa_config, + get_stdout_config, +) +from abses.utils.logging import setup_model_logger + + +class TestLogParser: + """测试配置解析功能""" + + def test_get_log_mode_default(self): + """测试获取默认日志模式""" + cfg = {} + assert get_log_mode(cfg) == "once" + + def test_get_log_mode_from_config(self): + """测试从配置中获取日志模式""" + cfg = {"log": {"mode": "separate"}} + assert get_log_mode(cfg) == "separate" + + cfg = {"log": {"mode": "merge"}} + assert get_log_mode(cfg) == "merge" + + def test_get_stdout_config_enabled(self): + """测试获取启用的 stdout 配置""" + cfg = { + "log": { + "run": { + "stdout": { + "enabled": True, + "level": "DEBUG", + "format": "[%(levelname)s] %(message)s", + "datefmt": "%Y-%m-%d", + } + } + } + } + stdout_cfg = get_stdout_config(cfg, "run") + assert stdout_cfg["enabled"] is True + assert stdout_cfg["level"] == "DEBUG" + assert stdout_cfg["format"] == "[%(levelname)s] %(message)s" + assert stdout_cfg["datefmt"] == "%Y-%m-%d" + + def test_get_stdout_config_disabled(self): + """测试获取禁用的 stdout 配置""" + cfg = {"log": {"run": {"stdout": {"enabled": False}}}} + stdout_cfg = get_stdout_config(cfg, "run") + assert stdout_cfg == {} + + def test_get_file_config_enabled(self): + """测试获取启用的文件配置""" + cfg = { + "log": { + "run": { + "file": { + "enabled": True, + "name": "test_model", + "level": "WARNING", + "format": "[%(levelname)s] %(message)s", + "rotation": "1 day", + "retention": "10 days", + } + } + } + } + file_cfg = get_file_config(cfg, "run") + assert file_cfg["enabled"] is True + assert file_cfg["name"] == "test_model" + assert file_cfg["level"] == "WARNING" + assert file_cfg["rotation"] == "1 day" + assert file_cfg["retention"] == "10 days" + + def test_get_file_config_defaults(self): + """测试文件配置的默认值""" + cfg = {"log": {"run": {"file": {"enabled": True}}}} + file_cfg = get_file_config(cfg, "run") + assert file_cfg["name"] == "model" # Default name for run + assert file_cfg["level"] == DEFAULT_LEVEL + assert file_cfg["format"] == DEFAULT_FORMAT + assert file_cfg["datefmt"] == DEFAULT_DATEFMT + + def test_get_mesa_config(self): + """测试获取 Mesa 配置""" + cfg = {"log": {"run": {"mesa": {"level": "DEBUG", "format": "%(message)s"}}}} + mesa_cfg = get_mesa_config(cfg, "run") + assert mesa_cfg["level"] == "DEBUG" + assert mesa_cfg["format"] == "%(message)s" + + def test_get_mesa_config_defaults(self): + """测试 Mesa 配置的默认值""" + cfg = {"log": {"run": {}}} + mesa_cfg = get_mesa_config(cfg, "run") + assert mesa_cfg["level"] is None + assert mesa_cfg["format"] is None + + +class TestLogConfig: + """测试日志配置功能""" + + def test_create_console_handler_defaults(self): + """测试创建控制台处理器(使用默认值)""" + handler = create_console_handler() + assert handler.level == logging.getLevelName(DEFAULT_LEVEL) + assert isinstance(handler, logging.StreamHandler) + + def test_create_console_handler_custom(self): + """测试创建自定义控制台处理器""" + handler = create_console_handler( + level="DEBUG", fmt="%(message)s", datefmt="%Y-%m-%d" + ) + assert handler.level == logging.DEBUG + formatter = handler.formatter + assert formatter._fmt == "%(message)s" + assert formatter.datefmt == "%Y-%m-%d" + + def test_create_file_handler_defaults(self, tmp_path): + """测试创建文件处理器(使用默认值)""" + log_file = tmp_path / "test.log" + handler = create_file_handler(log_file) + assert handler.level == logging.getLevelName(DEFAULT_LEVEL) + assert isinstance(handler, logging.FileHandler) + + def test_create_file_handler_custom(self, tmp_path): + """测试创建自定义文件处理器""" + log_file = tmp_path / "test.log" + handler = create_file_handler( + log_file, level="WARNING", fmt="%(message)s", datefmt="%Y-%m-%d" + ) + assert handler.level == logging.WARNING + formatter = handler.formatter + assert formatter._fmt == "%(message)s" + assert formatter.datefmt == "%Y-%m-%d" + + def test_determine_log_file_path_once_mode(self, tmp_path): + """测试确定日志文件路径(once 模式)""" + # First repeat should create file + path = determine_log_file_path( + outpath=tmp_path, log_name="test", logging_mode="once", repeat_id=1 + ) + assert path == tmp_path / "test.log" + + # Subsequent repeats should return None + path = determine_log_file_path( + outpath=tmp_path, log_name="test", logging_mode="once", repeat_id=2 + ) + assert path is None + + def test_determine_log_file_path_separate_mode(self, tmp_path): + """测试确定日志文件路径(separate 模式)""" + path1 = determine_log_file_path( + outpath=tmp_path, log_name="test", logging_mode="separate", repeat_id=1 + ) + assert path1 == tmp_path / "test_1.log" + + path2 = determine_log_file_path( + outpath=tmp_path, log_name="test", logging_mode="separate", repeat_id=2 + ) + assert path2 == tmp_path / "test_2.log" + + def test_determine_log_file_path_merge_mode(self, tmp_path): + """测试确定日志文件路径(merge 模式)""" + path1 = determine_log_file_path( + outpath=tmp_path, log_name="test", logging_mode="merge", repeat_id=1 + ) + assert path1 == tmp_path / "test.log" + + path2 = determine_log_file_path( + outpath=tmp_path, log_name="test", logging_mode="merge", repeat_id=2 + ) + assert path2 == tmp_path / "test.log" # Same file + + def test_setup_abses_logger_console_only(self): + """测试设置 ABSESpy 日志器(仅控制台)""" + logger = setup_abses_logger( + name="test_logger", console=True, console_level="DEBUG" + ) + assert logger.name == "test_logger" + assert len(logger.handlers) == 1 + assert isinstance(logger.handlers[0], logging.StreamHandler) + + def test_setup_abses_logger_file_only(self, tmp_path): + """测试设置 ABSESpy 日志器(仅文件)""" + log_file = tmp_path / "test.log" + logger = setup_abses_logger( + name="test_logger", console=False, file_path=log_file, file_level="WARNING" + ) + assert logger.name == "test_logger" + assert len(logger.handlers) == 1 + assert isinstance(logger.handlers[0], logging.FileHandler) + assert log_file.exists() + + +class TestExpLogging: + """测试实验日志功能""" + + def test_setup_exp_logger_stdout_only(self, tmp_path): + """测试设置实验日志器(仅控制台)""" + cfg = OmegaConf.create( + { + "outpath": str(tmp_path), + "log": { + "exp": { + "stdout": {"enabled": True, "level": "INFO"}, + "file": {"enabled": False}, + } + }, + } + ) + logger = setup_exp_logger(cfg) + assert logger.name == EXP_LOGGER_NAME + assert len(logger.handlers) == 1 + assert isinstance(logger.handlers[0], logging.StreamHandler) + + def test_setup_exp_logger_file_only(self, tmp_path): + """测试设置实验日志器(仅文件)""" + cfg = OmegaConf.create( + { + "outpath": str(tmp_path), + "log": { + "exp": { + "stdout": {"enabled": False}, + "file": { + "enabled": True, + "name": "experiment.log", + "level": "INFO", + }, + } + }, + } + ) + logger = setup_exp_logger(cfg) + assert logger.name == EXP_LOGGER_NAME + # When stdout is disabled, only file handler should be added + assert len(logger.handlers) == 1 + # Check that file handler exists + file_handlers = [ + h for h in logger.handlers if isinstance(h, logging.FileHandler) + ] + assert len(file_handlers) == 1 + assert (tmp_path / "experiment.log").exists() + + def test_setup_exp_logger_both(self, tmp_path): + """测试设置实验日志器(控制台和文件)""" + cfg = OmegaConf.create( + { + "outpath": str(tmp_path), + "log": { + "exp": { + "stdout": {"enabled": True, "level": "DEBUG"}, + "file": { + "enabled": True, + "name": "experiment.log", + "level": "INFO", + }, + } + }, + } + ) + logger = setup_exp_logger(cfg) + assert logger.name == EXP_LOGGER_NAME + assert len(logger.handlers) == 2 + + def test_setup_exp_logger_separate_mode(self, tmp_path): + """测试 separate 模式下的实验日志器""" + cfg = OmegaConf.create( + { + "outpath": str(tmp_path), + "log": { + "mode": "separate", + "run": {"file": {"name": "model"}}, + "exp": {"file": {"enabled": True}}, + }, + } + ) + _ = setup_exp_logger(cfg) # Setup logger + # In separate mode, exp log should use run.file.name if not explicitly set + assert (tmp_path / "model.log").exists() + + +class TestModelLogging: + """测试模型日志功能""" + + def test_setup_model_logger_console_only(self, tmp_path): + """测试设置模型日志器(仅控制台)""" + logger, mesa_logger, mesa_upper_logger = setup_model_logger( + name="test_model", + level="INFO", + outpath=None, # No outpath to disable file logging + console=True, + console_level="DEBUG", + logging_mode="once", + ) + assert logger.name == "abses" + assert len(logger.handlers) == 1 + assert isinstance(logger.handlers[0], logging.StreamHandler) + + def test_setup_model_logger_file_only(self, tmp_path): + """测试设置模型日志器(仅文件)""" + logger, mesa_logger, mesa_upper_logger = setup_model_logger( + name="test_model", + level="INFO", + outpath=tmp_path, + console=False, + logging_mode="once", + repeat_id=1, + ) + assert logger.name == "abses" + assert len(logger.handlers) == 1 + assert isinstance(logger.handlers[0], logging.FileHandler) + assert (tmp_path / "test_model.log").exists() + + def test_setup_model_logger_custom_format(self, tmp_path): + """测试设置模型日志器(自定义格式)""" + logger, mesa_logger, mesa_upper_logger = setup_model_logger( + name="test_model", + level="INFO", + outpath=tmp_path, + console=True, + console_format="%(levelname)s: %(message)s", + console_datefmt="%Y-%m-%d", + file_format="[%(asctime)s] %(message)s", + file_datefmt="%H:%M:%S", + logging_mode="once", + repeat_id=1, + ) + # Check that handlers have correct formatters + console_handler = next( + (h for h in logger.handlers if isinstance(h, logging.StreamHandler)), None + ) + assert console_handler is not None + # Check format (may include datefmt in the format string) + assert "%(levelname)s" in console_handler.formatter._fmt + assert "%(message)s" in console_handler.formatter._fmt + + # Check file handler if it exists + file_handler = next( + (h for h in logger.handlers if isinstance(h, logging.FileHandler)), None + ) + if file_handler is not None: + assert "%(asctime)s" in file_handler.formatter._fmt + assert "%(message)s" in file_handler.formatter._fmt + + +class TestLoggingIntegration: + """测试日志功能的集成测试""" + + @pytest.fixture + def temp_dir(self): + """创建临时目录""" + with tempfile.TemporaryDirectory() as tmpdir: + yield Path(tmpdir) + + def test_experiment_logging_integration(self, temp_dir): + """测试实验日志的完整流程""" + cfg = OmegaConf.create( + { + "outpath": str(temp_dir), + "log": { + "mode": "separate", + "exp": { + "stdout": {"enabled": True, "level": "INFO"}, + "file": {"enabled": True, "name": "experiment.log"}, + }, + "run": { + "stdout": {"enabled": False}, + "file": { + "enabled": True, + "name": "model", + "level": "INFO", + }, + }, + }, + } + ) + + # Setup experiment logger + exp_logger = setup_exp_logger(cfg) + + try: + # Check handlers + file_handlers = [ + h for h in exp_logger.handlers if isinstance(h, logging.FileHandler) + ] + if file_handlers: + # Get the actual file path from the handler + actual_file_path = Path(file_handlers[0].baseFilename) + exp_logger.info("Experiment started") + # Force flush to ensure file is written + file_handlers[0].flush() + # Verify the actual file exists + assert actual_file_path.exists(), ( + f"Log file not found at {actual_file_path}. Files in temp_dir: {list(temp_dir.iterdir())}" + ) + else: + # No file handler was created, which might be expected in some cases + # But in this test we expect one + assert False, f"No file handler found. Handlers: {exp_logger.handlers}" + finally: + # Close all handlers to release file handles (required on Windows) + for handler in exp_logger.handlers[:]: + handler.close() + exp_logger.removeHandler(handler) + + def test_model_logging_integration(self, temp_dir): + """测试模型日志的完整流程""" + _ = { + "outpath": str(temp_dir), + "log": { + "mode": "once", + "run": { + "stdout": {"enabled": True, "level": "INFO"}, + "file": { + "enabled": True, + "name": "model", + "level": "INFO", + }, + }, + }, + } + + # Setup model logger + logger, mesa_logger, mesa_upper_logger = setup_model_logger( + name="model", + level="INFO", + outpath=temp_dir, + console=True, + logging_mode="once", + repeat_id=1, + ) + try: + logger.info("Model started") + + # Verify model log file exists + assert (temp_dir / "model.log").exists() + finally: + # Close all handlers to release file handles (required on Windows) + for log in [logger, mesa_logger, mesa_upper_logger]: + for handler in log.handlers[:]: + handler.close() + log.removeHandler(handler) + + def test_logging_modes(self, temp_dir): + """测试不同的日志模式""" + # Test once mode + path1 = determine_log_file_path( + outpath=temp_dir, log_name="model", logging_mode="once", repeat_id=1 + ) + path2 = determine_log_file_path( + outpath=temp_dir, log_name="model", logging_mode="once", repeat_id=2 + ) + assert path1 == temp_dir / "model.log" + assert path2 is None + + # Test separate mode + path1 = determine_log_file_path( + outpath=temp_dir, log_name="model", logging_mode="separate", repeat_id=1 + ) + path2 = determine_log_file_path( + outpath=temp_dir, log_name="model", logging_mode="separate", repeat_id=2 + ) + assert path1 == temp_dir / "model_1.log" + assert path2 == temp_dir / "model_2.log" + + # Test merge mode + path1 = determine_log_file_path( + outpath=temp_dir, log_name="model", logging_mode="merge", repeat_id=1 + ) + path2 = determine_log_file_path( + outpath=temp_dir, log_name="model", logging_mode="merge", repeat_id=2 + ) + assert path1 == temp_dir / "model.log" + assert path2 == temp_dir / "model.log" + + def test_experiment_with_logging(self, temp_dir): + """测试实验运行时的日志功能""" + # Clean ExperimentManager singleton to avoid conflicts with other tests + from abses.core.job_manager import ExperimentManager + + # Save and reset the singleton instance + original_instance = getattr(ExperimentManager, "_instance", None) + ExperimentManager._instance = None + + try: + cfg = OmegaConf.create( + { + "outpath": str(temp_dir), + "time": {"end": 2}, # Add time config to avoid errors + "log": { + "mode": "once", + "exp": { + "stdout": {"enabled": True, "level": "INFO"}, + "file": {"enabled": True}, + }, + "run": { + "stdout": {"enabled": False}, + "file": {"enabled": True, "name": "model"}, + }, + }, + } + ) + + class TestModel(MainModel): + def setup(self): + pass + + def step(self): + pass + + # Create and run experiment + exp = Experiment.new(TestModel, cfg) + + # Get the actual outpath used by experiment + actual_outpath = exp.outpath + + exp.batch_run(repeats=2, display_progress=False) + + # Verify log files exist + # Files are created in exp.outpath, not necessarily temp_dir + exp_log = actual_outpath / "experiment.log" + model_log = actual_outpath / "model.log" + assert exp_log.exists(), ( + f"Experiment log not found at {exp_log}. Files in {actual_outpath}: {list(actual_outpath.iterdir()) if actual_outpath.exists() else 'directory does not exist'}" + ) + assert model_log.exists(), ( + f"Model log not found at {model_log}. Files in {actual_outpath}: {list(actual_outpath.iterdir()) if actual_outpath.exists() else 'directory does not exist'}" + ) + finally: + # Close all log handlers to release file handles (required on Windows) + for logger_name in ["abses.core.experiment", "abses", "mesa", "MESA"]: + log = logging.getLogger(logger_name) + for handler in log.handlers[:]: + handler.close() + log.removeHandler(handler) + # Restore the original instance + ExperimentManager._instance = original_instance diff --git a/tox.ini b/tox.ini index 2f19139f..185781e6 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,8 @@ envlist = py311, py312, py313 isolated_build = true skip_missing_interpreters = true +# Use system Python interpreters instead of uv-managed ones to avoid path issues +# tox will look for python3.11, python3.12, python3.13 in PATH [testenv] deps = @@ -10,3 +12,8 @@ deps = pytest-clarity pytest-sugar commands = python -m pytest {posargs:tests/} + +# Explicitly specify Python interpreter paths to avoid conflicts +# Prefer Homebrew Python over .local/bin Python +[testenv:py312] +basepython = /opt/homebrew/bin/python3.12 diff --git a/uv.lock b/uv.lock index ee85a02a..fd5c1e09 100644 --- a/uv.lock +++ b/uv.lock @@ -8,7 +8,7 @@ resolution-markers = [ [[package]] name = "abses" -version = "0.9.0" +version = "0.10.0" source = { editable = "." } dependencies = [ { name = "fiona" }, @@ -17,7 +17,6 @@ dependencies = [ { name = "geopandas" }, { name = "hydra-core" }, { name = "icons" }, - { name = "loguru" }, { name = "mesa" }, { name = "mesa-geo" }, { name = "netcdf4" }, @@ -119,7 +118,6 @@ requires-dist = [ { name = "geopandas", specifier = ">=0,<1" }, { name = "hydra-core", specifier = ">=1.3,<1.4" }, { name = "icons" }, - { name = "loguru", specifier = ">=0.7" }, { name = "mesa", specifier = ">=3.1.0" }, { name = "mesa-geo", specifier = ">=0.9.1" }, { name = "mike", marker = "extra == 'docs'", specifier = ">=2.0.0" }, @@ -763,18 +761,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3e/8d/86586c0d75110f774e46e2bd6d134e2d1cca1dedc9bb08c388fa3df76acd/cftime-1.6.5-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a3cda6fd12c7fb25eff40a6a857a2bf4d03e8cc71f80485d8ddc65ccbd80f16a", size = 1718573, upload-time = "2025-10-13T18:56:02.788Z" }, { url = "https://files.pythonhosted.org/packages/bb/fe/7956914cfc135992e89098ebbc67d683c51ace5366ba4b114fef1de89b21/cftime-1.6.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:28cda78d685397ba23d06273b9c916c3938d8d9e6872a537e76b8408a321369b", size = 1788563, upload-time = "2025-10-13T18:56:04.075Z" }, { url = "https://files.pythonhosted.org/packages/e5/c7/6669708fcfe1bb7b2a7ce693b8cc67165eac00d3ac5a5e8f6ce1be551ff9/cftime-1.6.5-cp311-cp311-win_amd64.whl", hash = "sha256:93ead088e3a216bdeb9368733a0ef89a7451dfc1d2de310c1c0366a56ad60dc8", size = 473631, upload-time = "2025-10-13T18:56:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/82/c5/d70cb1ab533ca790d7c9b69f98215fa4fead17f05547e928c8f2b8f96e54/cftime-1.6.5-cp311-cp311-win_arm64.whl", hash = "sha256:3384d69a0a7f3d45bded21a8cbcce66c8ba06c13498eac26c2de41b1b9b6e890", size = 459383, upload-time = "2026-01-02T21:16:47.317Z" }, { url = "https://files.pythonhosted.org/packages/b6/c1/e8cb7f78a3f87295450e7300ebaecf83076d96a99a76190593d4e1d2be40/cftime-1.6.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:eef25caed5ebd003a38719bd3ff8847cd52ef2ea56c3ebdb2c9345ba131fc7c5", size = 504175, upload-time = "2025-10-13T18:56:06.398Z" }, { url = "https://files.pythonhosted.org/packages/50/1a/86e1072b09b2f9049bb7378869f64b6747f96a4f3008142afed8955b52a4/cftime-1.6.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c87d2f3b949e45463e559233c69e6a9cf691b2b378c1f7556166adfabbd1c6b0", size = 485980, upload-time = "2025-10-13T18:56:08.669Z" }, { url = "https://files.pythonhosted.org/packages/35/28/d3177b60da3f308b60dee2aef2eb69997acfab1e863f0bf0d2a418396ce5/cftime-1.6.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:82cb413973cc51b55642b3a1ca5b28db5b93a294edbef7dc049c074b478b4647", size = 1591166, upload-time = "2025-10-13T19:39:14.109Z" }, { url = "https://files.pythonhosted.org/packages/d1/fd/a7266970312df65e68b5641b86e0540a739182f5e9c62eec6dbd29f18055/cftime-1.6.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85ba8e7356d239cfe56ef7707ac30feaf67964642ac760a82e507ee3c5db4ac4", size = 1642614, upload-time = "2025-10-13T18:56:09.815Z" }, { url = "https://files.pythonhosted.org/packages/c4/73/f0035a4bc2df8885bb7bd5fe63659686ea1ec7d0cc74b4e3d50e447402e5/cftime-1.6.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:456039af7907a3146689bb80bfd8edabd074c7f3b4eca61f91b9c2670addd7ad", size = 1688090, upload-time = "2025-10-13T18:56:11.442Z" }, { url = "https://files.pythonhosted.org/packages/88/15/8856a0ab76708553ff597dd2e617b088c734ba87dc3fd395e2b2f3efffe8/cftime-1.6.5-cp312-cp312-win_amd64.whl", hash = "sha256:da84534c43699960dc980a9a765c33433c5de1a719a4916748c2d0e97a071e44", size = 464840, upload-time = "2025-10-13T18:56:12.506Z" }, + { url = "https://files.pythonhosted.org/packages/3a/85/451009a986d9273d2208fc0898aa00262275b5773259bf3f942f6716a9e7/cftime-1.6.5-cp312-cp312-win_arm64.whl", hash = "sha256:c62cd8db9ea40131eea7d4523691c5d806d3265d31279e4a58574a42c28acd77", size = 450534, upload-time = "2026-01-02T21:16:48.784Z" }, { url = "https://files.pythonhosted.org/packages/2e/60/74ea344b3b003fada346ed98a6899085d6fd4c777df608992d90c458fda6/cftime-1.6.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4aba66fd6497711a47c656f3a732c2d1755ad15f80e323c44a8716ebde39ddd5", size = 502453, upload-time = "2025-10-13T18:56:13.545Z" }, { url = "https://files.pythonhosted.org/packages/1e/14/adb293ac6127079b49ff11c05cf3d5ce5c1f17d097f326dc02d74ddfcb6e/cftime-1.6.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:89e7cba699242366e67d6fb5aee579440e791063f92a93853610c91647167c0d", size = 484541, upload-time = "2025-10-13T18:56:14.612Z" }, { url = "https://files.pythonhosted.org/packages/4f/74/bb8a4566af8d0ef3f045d56c462a9115da4f04b07c7fbbf2b4875223eebd/cftime-1.6.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2f1eb43d7a7b919ec99aee709fb62ef87ef1cf0679829ef93d37cc1c725781e9", size = 1591014, upload-time = "2025-10-13T19:39:15.346Z" }, { url = "https://files.pythonhosted.org/packages/ba/08/52f06ff2f04d376f9cd2c211aefcf2b37f1978e43289341f362fc99f6a0e/cftime-1.6.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e02a1d80ffc33fe469c7db68aa24c4a87f01da0c0c621373e5edadc92964900b", size = 1633625, upload-time = "2025-10-13T18:56:15.745Z" }, { url = "https://files.pythonhosted.org/packages/cf/33/03e0b23d58ea8fab94ecb4f7c5b721e844a0800c13694876149d98830a73/cftime-1.6.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18ab754805233cdd889614b2b3b86a642f6d51a57a1ec327c48053f3414f87d8", size = 1684269, upload-time = "2025-10-13T18:56:17.04Z" }, { url = "https://files.pythonhosted.org/packages/a4/60/a0cfba63847b43599ef1cdbbf682e61894994c22b9a79fd9e1e8c7e9de41/cftime-1.6.5-cp313-cp313-win_amd64.whl", hash = "sha256:6c27add8f907f4a4cd400e89438f2ea33e2eb5072541a157a4d013b7dbe93f9c", size = 465364, upload-time = "2025-10-13T18:56:18.05Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e8/ec32f2aef22c15604e6fda39ff8d581a00b5469349f8fba61640d5358d2c/cftime-1.6.5-cp313-cp313-win_arm64.whl", hash = "sha256:31d1ff8f6bbd4ca209099d24459ec16dea4fb4c9ab740fbb66dd057ccbd9b1b9", size = 450468, upload-time = "2026-01-02T21:16:50.193Z" }, ] [[package]] @@ -2272,19 +2273,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/59/c0/aea9a0b0d180f51d742428f211eafe7bf72139589fb2f484839e7d39efca/libpysal-4.13.0-py3-none-any.whl", hash = "sha256:a030358f1bef920faa6cd7f5c41a67a2b4d934e3976e9848c494ca47adcedfd1", size = 2820702, upload-time = "2025-03-26T14:32:44.92Z" }, ] -[[package]] -name = "loguru" -version = "0.7.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "win32-setctime", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" }, -] - [[package]] name = "lxml" version = "6.0.2" @@ -5467,15 +5455,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ca/51/5447876806d1088a0f8f71e16542bf350918128d0a69437df26047c8e46f/widgetsnbextension-4.0.14-py3-none-any.whl", hash = "sha256:4875a9eaf72fbf5079dc372a51a9f268fc38d46f767cbf85c43a36da5cb9b575", size = 2196503, upload-time = "2025-04-10T13:01:23.086Z" }, ] -[[package]] -name = "win32-setctime" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" }, -] - [[package]] name = "xarray" version = "2025.10.1"