From 0ac96b4002b5a9a8bbd5961349f0087405332d7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bal=C5=A1a=20=C5=A0arenac?= Date: Thu, 5 Mar 2026 18:54:05 +0100 Subject: [PATCH 1/3] feat(logging): make cron logs outcome-focused and import-safe Server triage needed one high-signal line per run instead of scattered INFO chatter, so script entrypoints now emit concise summary events with result and duration while preserving stack traces in error logs. The shared logging setup also keeps ownership boundaries around handlers so imported usage does not clobber foreign logger configuration, and it keeps fallback behavior for resilience when file logging cannot be initialized. --- upgrade/scripts/find_compatible_versions.py | 60 +++++--- upgrade/scripts/logging_config.py | 145 ++++++++++++++++++++ upgrade/scripts/manage_venv.py | 48 +++++-- upgrade/scripts/upgrade_python_package.py | 142 ++++++++++++++----- upgrade/scripts/utils.py | 5 +- 5 files changed, 332 insertions(+), 68 deletions(-) create mode 100644 upgrade/scripts/logging_config.py diff --git a/upgrade/scripts/find_compatible_versions.py b/upgrade/scripts/find_compatible_versions.py index d09955d..1ca5302 100644 --- a/upgrade/scripts/find_compatible_versions.py +++ b/upgrade/scripts/find_compatible_versions.py @@ -1,6 +1,7 @@ import argparse import json import logging +import time from enum import Enum from typing import Any, List, Optional from urllib.parse import urljoin @@ -10,6 +11,7 @@ from packaging.utils import parse_wheel_filename from packaging.version import Version +from upgrade.scripts.logging_config import configure_script_logging, log_run_summary from upgrade.scripts.requirements import ( filter_versions, parse_requirements_txt, @@ -80,7 +82,7 @@ def get_compatible_version( installed_version = get_installed_version(requirements_obj, venv_executable) if not installed_version: raise Exception(f"Package {requirements_obj.name} is not installed") - logging.info(f"Found installed version: {installed_version}") + logging.debug("Found installed version: %s", installed_version) upgrade_versions = get_compatible_upgrade_versions(requirements_obj, cloudsmith_url) for upgrade_version in upgrade_versions: @@ -116,38 +118,58 @@ def find_compatible_versions( or if requirements or requirements_file is not provided. """ response_status = {} + start_time = time.monotonic() + package_name = None + current_version = None + target = None + result = "errored" + + configure_script_logging( + log_location=log_location, + default_log_location="/var/log/find_compatible_versions.log", + test=bool(test), + ) + try: if requirements is None and requirements_file is None: raise Exception("Either requirements or requirements_file is required.") - if test: - logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(message)s") - else: - log_location = log_location or "/var/log/manage_venv.log" - logging.basicConfig( - filename=log_location, - level=logging.WARNING, - format="%(asctime)s %(message)s", - ) if cloudsmith_url: is_cloudsmith_url_valid(cloudsmith_url) requirements = requirements or parse_requirements_txt(requirements_file) + requirements_obj = to_requirements_obj(requirements) + package_name = requirements_obj.name + venv_executable = get_venv_executable(venv_path) + current_version = is_package_already_installed(package_name, venv_executable) + upgrade_version = get_compatible_version( - to_requirements_obj(requirements), venv_path, cloudsmith_url + requirements_obj, venv_path, cloudsmith_url ) if upgrade_version: response_status["responseStatus"] = CompatibleUpgradeStatus.AVAILABLE.value - logging.info(f"Found compatible upgrade version: {upgrade_version}") + target = upgrade_version + result = "upgrade_available" else: - response_status[ - "responseStatus" - ] = CompatibleUpgradeStatus.AT_LATEST_VERSION.value - logging.info("At latest upgrade version") - except Exception as e: + response_status["responseStatus"] = ( + CompatibleUpgradeStatus.AT_LATEST_VERSION.value + ) + result = "unchanged" + except Exception: response_status["responseStatus"] = CompatibleUpgradeStatus.ERROR.value - logging.error(e) - raise e + result = "errored" + logging.exception("find_compatible_versions run failed") + raise finally: + log_run_summary( + script="find_compatible_versions", + package=package_name, + current=current_version, + target=target, + final=current_version, + result=result, + duration_seconds=time.monotonic() - start_time, + ) + response = json.dumps(response_status) print(response) diff --git a/upgrade/scripts/logging_config.py b/upgrade/scripts/logging_config.py new file mode 100644 index 0000000..b546a88 --- /dev/null +++ b/upgrade/scripts/logging_config.py @@ -0,0 +1,145 @@ +import logging +import sys +from logging.handlers import WatchedFileHandler +from pathlib import Path +from typing import Optional, Tuple + +DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s" +DEFAULT_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S%z" +UPGRADE_HANDLER_ATTR = "_upgrade_handler" + + +class _MaxLevelFilter(logging.Filter): + def __init__(self, max_level: int): + super().__init__() + self.max_level = max_level + + def filter(self, record: logging.LogRecord) -> bool: + return record.levelno < self.max_level + + +def _is_upgrade_handler(handler: logging.Handler) -> bool: + return bool(getattr(handler, UPGRADE_HANDLER_ATTR, False)) + + +def _mark_upgrade_handler(handler: logging.Handler) -> None: + setattr(handler, UPGRADE_HANDLER_ATTR, True) + + +def _remove_upgrade_handlers(root: logging.Logger) -> None: + for handler in list(root.handlers): + if _is_upgrade_handler(handler): + root.removeHandler(handler) + handler.close() + + +def get_error_log_path(log_location: str) -> str: + log_path = Path(log_location) + if log_path.suffix: + return str(log_path.with_name(f"{log_path.stem}.error{log_path.suffix}")) + return f"{log_location}.error.log" + + +def configure_logging( + *, + log_location: Optional[str], + default_log_location: str, + test: bool, + level: int = logging.INFO, +) -> Tuple[Optional[str], Optional[str]]: + root = logging.getLogger() + _remove_upgrade_handlers(root) + has_foreign_handlers = any( + not _is_upgrade_handler(handler) for handler in root.handlers + ) + + formatter = logging.Formatter(DEFAULT_LOG_FORMAT, datefmt=DEFAULT_DATE_FORMAT) + + if test: + stream_handler = logging.StreamHandler(sys.stderr) + stream_handler.setLevel(level) + stream_handler.setFormatter(formatter) + _mark_upgrade_handler(stream_handler) + root.addHandler(stream_handler) + if not has_foreign_handlers: + root.setLevel(level) + return None, None + + regular_log_path = log_location or default_log_location + error_log_path = get_error_log_path(regular_log_path) + + regular_handler = WatchedFileHandler(regular_log_path) + regular_handler.setLevel(level) + regular_handler.addFilter(_MaxLevelFilter(logging.ERROR)) + regular_handler.setFormatter(formatter) + _mark_upgrade_handler(regular_handler) + + error_handler = WatchedFileHandler(error_log_path) + error_handler.setLevel(logging.ERROR) + error_handler.setFormatter(formatter) + _mark_upgrade_handler(error_handler) + + root.addHandler(regular_handler) + root.addHandler(error_handler) + if not has_foreign_handlers: + root.setLevel(level) + return regular_log_path, error_log_path + + +def configure_script_logging( + *, + log_location: Optional[str], + default_log_location: str, + test: bool, +) -> Tuple[Optional[str], Optional[str]]: + try: + return configure_logging( + log_location=log_location, + default_log_location=default_log_location, + test=test, + level=logging.DEBUG if test else logging.INFO, + ) + except Exception as logging_config_error: + configure_logging( + log_location=None, + default_log_location=default_log_location, + test=True, + level=logging.INFO, + ) + logging.warning( + "Failed to configure file logging at %s; falling back to stderr logging: %s", + log_location or default_log_location, + logging_config_error, + ) + return None, None + + +def _summary_value(value: Optional[object]) -> str: + if value is None: + return "unknown" + value_text = str(value) + if value_text == "": + return "unknown" + return value_text + + +def log_run_summary( + *, + script: str, + package: Optional[str], + current: Optional[object], + target: Optional[object], + final: Optional[object], + result: str, + duration_seconds: float, +) -> None: + logging.info( + "summary script=%s package=%s current=%s target=%s final=%s result=%s duration_s=%.2f", + script, + _summary_value(package), + _summary_value(current), + _summary_value(target), + _summary_value(final), + result, + duration_seconds, + ) diff --git a/upgrade/scripts/manage_venv.py b/upgrade/scripts/manage_venv.py index 0a96824..e876d92 100644 --- a/upgrade/scripts/manage_venv.py +++ b/upgrade/scripts/manage_venv.py @@ -5,11 +5,13 @@ import subprocess import sys import json +import time from contextlib import contextmanager from enum import Enum from pathlib import Path from typing import Any, List, Optional +from upgrade.scripts.logging_config import configure_script_logging, log_run_summary from upgrade.scripts.requirements import parse_requirements_txt, to_requirements_obj from upgrade.scripts.utils import ( is_development_cloudsmith, @@ -262,7 +264,7 @@ def build_and_upgrade_venv( auto_upgrade = True msg = "Requirements changed. Creating new virtualenv." print(msg) - logging.info(msg) + logging.debug(msg) py_executable = create_venv( envs_home, requirements, @@ -274,7 +276,7 @@ def build_and_upgrade_venv( if not auto_upgrade: msg = "Requirements did not change. Returning venv executable." print(msg) - logging.info(msg) + logging.debug(msg) return py_executable _check_venv_consistent(requirements_obj, py_executable) @@ -292,7 +294,7 @@ def build_and_upgrade_venv( additional_dependencies or [], log_location, ) - logging.info(response) + logging.debug(response) except Exception as e: logging.error( f"Unexpected error occurred while upgrading {requirements_obj.name}{requirements_obj.specifier} {str(e)}" @@ -322,23 +324,27 @@ def manage_venv( local_installation_path: Optional[str] = None, ): response_status = {} + start_time = time.monotonic() + package_name = None + target = None + + configure_script_logging( + log_location=log_location, + default_log_location="/var/log/manage_venv.log", + test=bool(test), + ) + try: if requirements is None and requirements_file is None: raise Exception("Either requirements or requirements_file is required.") - if test: - logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(message)s") - else: - log_location = log_location or "/var/log/manage_venv.log" - logging.basicConfig( - filename=log_location, - level=logging.WARNING, - format="%(asctime)s %(message)s", - ) if cloudsmith_url: is_cloudsmith_url_valid(cloudsmith_url) requirements = requirements or parse_requirements_txt(requirements_file) + requirements_obj = to_requirements_obj(requirements) + package_name = requirements_obj.name + target = str(requirements_obj.specifier) or None build_and_upgrade_venv( requirements, @@ -354,11 +360,23 @@ def manage_venv( local_installation_path, ) response_status["responseStatus"] = VenvUpgradeStatus.UPGRADED.value - except Exception as e: - logging.error(e) + except Exception: + logging.exception("manage_venv run failed") response_status["responseStatus"] = VenvUpgradeStatus.ERROR.value - raise e + raise finally: + status = response_status.get("responseStatus") + result = "upgraded" if status == VenvUpgradeStatus.UPGRADED.value else "errored" + log_run_summary( + script="manage_venv", + package=package_name, + current=None, + target=target, + final=None, + result=result, + duration_seconds=time.monotonic() - start_time, + ) + response = json.dumps(response_status) print(response) diff --git a/upgrade/scripts/upgrade_python_package.py b/upgrade/scripts/upgrade_python_package.py index 281ea24..b8dcb66 100644 --- a/upgrade/scripts/upgrade_python_package.py +++ b/upgrade/scripts/upgrade_python_package.py @@ -4,6 +4,7 @@ import logging import os import site +import time from importlib import util from pathlib import Path from typing import Optional @@ -12,6 +13,7 @@ from packaging.utils import parse_wheel_filename from upgrade.scripts.exceptions import PipFormatDecodeFailed +from upgrade.scripts.logging_config import configure_script_logging, log_run_summary from upgrade.scripts.requirements import filter_versions from upgrade.scripts.slack import send_slack_notification from upgrade.scripts.utils import ( @@ -53,7 +55,7 @@ def upgrade_and_run( was_updated = False response_err = "" if version is not None: - logging.info( + logging.debug( "Trying to install version %s of package %s", version, package_name ) was_updated, response_err = attempt_to_install_version( @@ -65,7 +67,7 @@ def upgrade_and_run( constraints_path, ) else: - logging.info('Trying to upgrade "%s" package.', package_name) + logging.debug('Trying to upgrade "%s" package.', package_name) was_updated, response_err = attempt_upgrade( package_install_cmd, cloudsmith_url, @@ -165,11 +167,11 @@ def install_with_constraints( wheel_path, ] if constraints_file_path: - logging.info("Installing wheel with constraints %s", wheel_path) + logging.debug("Installing wheel with constraints %s", wheel_path) install_args.extend(["-c", constraints_file_path]) else: # install without constraints for backwards compatibility - logging.info( + logging.debug( "No constraints.txt found. Installing wheel %s without constraints.txt", wheel_path, ) @@ -193,7 +195,7 @@ def install_with_constraints( resp = installer(*install_args) return resp except Exception: - logging.error("Failed to install wheel %s", wheel_path) + logging.exception("Failed to install wheel %s", wheel_path) print("Failed to install wheel %s" % wheel_path) raise @@ -235,9 +237,11 @@ def install_wheel( else: wheel = parsed_wheel_versions[-1] except IndexError: + logging.error("Wheel %s not found", package_name) print(f"Wheel {package_name} not found") raise except Exception as e: + logging.error("Failed to install wheel %s: %s", package_name, e) print(f"Failed to install wheel {package_name} due to error: {e}") raise to_install = wheel + extra @@ -388,7 +392,7 @@ def attempt_to_install_version( *args, ) except Exception as e: - logging.info(f"Could not find {package_install_cmd} {version}") + logging.warning("Could not find %s %s", package_install_cmd, version) print(f"Could not find {package_install_cmd} {version}") return False, str(e) package_name, _ = split_package_name_and_extra(package_install_cmd) @@ -422,6 +426,27 @@ def _get_installed_packages_snapshot(): return snapshot +def _get_updated_packages(before_snapshot, after_snapshot): + if before_snapshot is None or after_snapshot is None: + return [] + + package_names = sorted(set(before_snapshot) | set(after_snapshot)) + updated_packages = [] + for package_name in package_names: + before_version = before_snapshot.get(package_name) + after_version = after_snapshot.get(package_name) + if before_version == after_version: + continue + updated_packages.append( + { + "package": package_name, + "from": before_version, + "to": after_version, + } + ) + return updated_packages + + def attempt_upgrade( package_install_cmd, cloudsmith_url=None, @@ -460,19 +485,33 @@ def attempt_upgrade( after_snapshot = _get_installed_packages_snapshot() after_version = is_package_already_installed(package_name) + updated_packages = _get_updated_packages(before_snapshot, after_snapshot) was_upgraded = before_version != after_version - if not was_upgraded and before_snapshot is not None and after_snapshot is not None: - was_upgraded = before_snapshot != after_snapshot + if not was_upgraded and updated_packages: + was_upgraded = True + if was_upgraded: - logging.info('"%s" package was upgraded.', package_install_cmd) + logging.debug( + '"%s" package was upgraded. from=%s to=%s updated_count=%s', + package_install_cmd, + before_version, + after_version, + len(updated_packages), + ) + if updated_packages: + logging.debug("Updated packages detail: %s", updated_packages) else: - logging.info('"%s" package was already up-to-date.', package_install_cmd) + logging.debug( + '"%s" package was already up-to-date. version=%s', + package_install_cmd, + after_version, + ) return was_upgraded, resp def reload_uwsgi_app(package_name): uwsgi_vassals_dir = "/etc/uwsgi/vassals" - logging.info("Reloading uwsgi app %s", package_name) + logging.debug("Reloading uwsgi app %s", package_name) ini_file_path = os.path.join(uwsgi_vassals_dir, f"{package_name}.ini") if not os.path.isfile(ini_file_path): logging.debug("%s is not a uwsgi app", package_name) @@ -488,7 +527,7 @@ def run_initial_post_install(package_name, *args): file_path = os.path.join("/opt/var", file_name) run_post_install = os.path.isfile(file_path) if run_post_install: - logging.info("Running initial post install of package %s", package_name) + logging.debug("Running initial post install of package %s", package_name) module_name = package_name.replace("-", "_") try_running_module(module_name, *args) # delete the file to avoid running post install again @@ -503,7 +542,10 @@ def run_module_and_reload_uwsgi_app(module_name, *args): def send_upgrade_notification(header, cloudsmith_url, slack_webhook_url): try: - log_filepath = get_log_file_path().as_posix() or "log file" + log_file_path = get_log_file_path() + log_filepath = ( + log_file_path.as_posix() if log_file_path is not None else "log file" + ) server_metadata = get_server_metadata() environment = "dev" if is_development_cloudsmith(cloudsmith_url) else "prod" text = f"{environment.upper()} - For more details, please audit {str(log_filepath)} at ({server_metadata})." @@ -512,8 +554,8 @@ def send_upgrade_notification(header, cloudsmith_url, slack_webhook_url): text, slack_webhook_url, ) - except Exception as e: - logging.error(f"Failed to send upgrade notification due to error: {e}") + except Exception: + logging.exception("Failed to send upgrade notification") raise @@ -548,7 +590,7 @@ def try_running_module(wheel, *args, **kwargs): ) raise else: - logging.info("No module named %s", module_name) + logging.warning("No module named %s", module_name) print(f"No module named {module_name}") @@ -656,17 +698,25 @@ def upgrade_python_package( *vars, ): success = False + run_succeeded = False response_output = "" + start_time = time.monotonic() + package_name, _ = split_package_name_and_extra(package) + current_version = None + final_version = None + + configure_script_logging( + log_location=log_location, + default_log_location="/var/log/upgrade_python_package.log", + test=test, + ) + + try: + current_version = is_package_already_installed(package_name) + except Exception: + logging.debug("Unable to read current package version for %s", package_name) + try: - if test: - logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(message)s") - else: - log_location = log_location or "/var/log/upgrade_python_package.log" - logging.basicConfig( - filename=log_location, - level=logging.WARNING, - format="%(asctime)s %(message)s", - ) if cloudsmith_url: is_cloudsmith_url_valid(cloudsmith_url) wheels_path = wheels_path or "/vagrant/wheels" @@ -696,17 +746,43 @@ def upgrade_python_package( constraints_path, *vars, ) + run_succeeded = True except Exception as e: + logging.exception("Upgrade run failed package=%s", package) if not format_output: - raise e + raise response_output += str(e) - if format_output: - while len(logging.root.handlers) > 0: - logging.root.removeHandler(logging.root.handlers[-1]) - logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(message)s") - response = json.dumps({"success": success, "responseOutput": response_output}) - logging.info(response) - print(response) + finally: + try: + final_version = is_package_already_installed(package_name) + except Exception: + logging.debug("Unable to read final package version for %s", package_name) + + if not run_succeeded: + result = "errored" + elif success: + result = "upgraded" + elif response_output: + result = "upgrade_failed" + else: + result = "unchanged" + + log_run_summary( + script="upgrade_python_package", + package=package_name, + current=current_version, + target=version, + final=final_version, + result=result, + duration_seconds=time.monotonic() - start_time, + ) + + if format_output: + response = json.dumps( + {"success": success, "responseOutput": response_output} + ) + logging.debug(response) + print(response) def main(): diff --git a/upgrade/scripts/utils.py b/upgrade/scripts/utils.py index 468591d..f8a69b1 100644 --- a/upgrade/scripts/utils.py +++ b/upgrade/scripts/utils.py @@ -78,6 +78,9 @@ def run(*command, **kwargs): command = command[0].split() print(*command) command = [word.format(**os.environ) for word in command] + logging.debug( + 'Running command executable="%s" arg_count=%s', command[0], len(command) - 1 + ) try: options = dict( stdout=subprocess.PIPE, @@ -108,7 +111,7 @@ def run(*command, **kwargs): raise err if completed.stdout: print(completed.stdout) - logging.info("Completed. Output: %s", completed.stdout) + logging.debug("Completed. Output: %s", completed.stdout) return completed.stdout.rstrip() if completed.returncode == 0 else None From c0421f4fe4c7abb27ddc2b7706c6fa51ba351cff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bal=C5=A1a=20=C5=A0arenac?= Date: Thu, 5 Mar 2026 20:11:33 +0100 Subject: [PATCH 2/3] docs(logging): clarify new logging design Recent logging changes intentionally prioritize one high-signal run summary at INFO while keeping verbose execution detail in DEBUG and stack traces in error logs. Adding targeted docstrings makes that design intent explicit for reviewers and future maintainers. The new notes also explain why handler ownership and selective cleanup exist: these scripts can be imported, so logger setup must avoid clobbering foreign application handlers. --- upgrade/scripts/logging_config.py | 35 +++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/upgrade/scripts/logging_config.py b/upgrade/scripts/logging_config.py index b546a88..764cc79 100644 --- a/upgrade/scripts/logging_config.py +++ b/upgrade/scripts/logging_config.py @@ -1,3 +1,14 @@ +"""Shared logging helpers for cron-oriented upgrade scripts. + +The logging model is intentionally simple for operations: +- one INFO summary line per run in the regular log +- stack traces and failures in the companion error log +- detailed execution chatter at DEBUG level + +Handlers created by this module are tagged and cleaned up selectively so importing +applications keep their own logging handlers and root-level behavior. +""" + import logging import sys from logging.handlers import WatchedFileHandler @@ -27,6 +38,10 @@ def _mark_upgrade_handler(handler: logging.Handler) -> None: def _remove_upgrade_handlers(root: logging.Logger) -> None: + """Remove only handlers that were created by this package. + + This avoids clobbering logger state for applications that import these scripts. + """ for handler in list(root.handlers): if _is_upgrade_handler(handler): root.removeHandler(handler) @@ -34,6 +49,7 @@ def _remove_upgrade_handlers(root: logging.Logger) -> None: def get_error_log_path(log_location: str) -> str: + """Build the companion error-log path from a regular log path.""" log_path = Path(log_location) if log_path.suffix: return str(log_path.with_name(f"{log_path.stem}.error{log_path.suffix}")) @@ -47,6 +63,14 @@ def configure_logging( test: bool, level: int = logging.INFO, ) -> Tuple[Optional[str], Optional[str]]: + """Configure upgrade-script handlers on the root logger. + + Non-test mode writes to two files: + - regular log receives records below ERROR + - error log receives ERROR and above + + Test/fallback mode writes to stderr. + """ root = logging.getLogger() _remove_upgrade_handlers(root) has_foreign_handlers = any( @@ -92,6 +116,11 @@ def configure_script_logging( default_log_location: str, test: bool, ) -> Tuple[Optional[str], Optional[str]]: + """Script-level logging setup with resilient fallback. + + If file logging fails (for example, missing permissions on /var/log), this + falls back to stderr logging so the script can still complete and emit status. + """ try: return configure_logging( log_location=log_location, @@ -115,6 +144,7 @@ def configure_script_logging( def _summary_value(value: Optional[object]) -> str: + """Normalize summary values so output stays parseable and explicit.""" if value is None: return "unknown" value_text = str(value) @@ -133,6 +163,11 @@ def log_run_summary( result: str, duration_seconds: float, ) -> None: + """Emit a single high-signal run summary line at INFO level. + + The output is key-value formatted so operators can grep quickly and also parse + it mechanically in log pipelines without JSON formatting requirements. + """ logging.info( "summary script=%s package=%s current=%s target=%s final=%s result=%s duration_s=%.2f", script, From 41d3ed18e49fbca24ac874f927a6c305a9fa1f0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bal=C5=A1a=20=C5=A0arenac?= Date: Thu, 5 Mar 2026 20:15:19 +0100 Subject: [PATCH 3/3] docs(changelog): add changelog entry for logging observability --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5e8569..caebaf6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,10 +12,17 @@ and this project adheres to a _modified_ form of _[Semantic Versioning][semver]_ ### Changed +- Improve logging observability with dedicated regular and error logs. ([#28]) + ### Fixed +- Fall back to stderr logging when file logging cannot be initialized. ([#28]) +- Removing upgrade-owned handlers and leave root level unchanged when foreign handlers are present. ([#28]) + ### Removed +[#28]: https://github.com/openlawlibrary/upgrade-python-package/pull/28 + ## [0.9.1] ### Added