Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 54 additions & 34 deletions configure_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
from enum import Enum
from typing import Literal

_PROFILE_FORMAT = "%(message)s"


class LogLevel(Enum):
DEBUG = logging.DEBUG
Expand Down Expand Up @@ -31,34 +33,22 @@ def from_string(cls, level_str: str) -> Enum:

def configure_logger(
name: str,
level: int = logging.DEBUG, # Use int type hint here
level: int = logging.DEBUG,
format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handler_type: Literal["console", "file"] = "console",
log_file: str = "tfbpmodeling.log",
) -> logging.Logger:
"""
Configures a logger.

:param name: Name of the logger
:type name: str
:param level: Logging level, must be one of logging.DEBUG,
logging.INFO, logging.WARNING, logging.ERROR
:type level: int
:param format: Logging format
:type format: str
:param handler_type: Type of handler, either 'console' or 'file'
:type handler_type: Literal["console", "file"]
:param log_file: Path to log file, required if handler_type is 'file'.
Default is 'tfbpmodeling.log'
:type log_file: str

:return: Configured logger
Configure a logger with a single handler.

:param name: Logger name.
:param level: Logging level (``logging.DEBUG``, ``logging.INFO``, etc.).
:param format: Log record format string.
:param handler_type: Destination — ``"console"`` or ``"file"``.
:param log_file: Path used when ``handler_type="file"``.
:returns: Configured logger.
:rtype: logging.Logger

:raises ValueError: If any of the parameters have invalid datatypes

example usage:
>>> logger = configure_logger("my_logger", level=logging.INFO)
:raises ValueError: If any parameter is invalid.

"""
if not isinstance(name, str):
Expand All @@ -69,30 +59,60 @@ def configure_logger(
raise ValueError("Invalid logging level")
if not isinstance(format, str):
raise ValueError("format must be a string")
if handler_type not in ["console", "file"]:
if handler_type not in ("console", "file"):
raise ValueError("handler_type must be 'console' or 'file'")
if handler_type == "file" and not log_file:
raise ValueError("log_file must be specified for file handler")

logger = logging.getLogger(name)
logger.setLevel(level)

# Remove all handlers associated with the logger object to avoid duplicate logs
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Remove existing handlers to avoid duplicates on re-configuration.
for h in logger.handlers[:]:
logger.removeHandler(h)

if handler_type == "console":
handler = logging.StreamHandler()
elif handler_type == "file":
if not log_file:
raise ValueError("log_file must be specified for file handler")
handler = logging.FileHandler(log_file)
handler: logging.Handler = logging.StreamHandler()
else:
raise ValueError("Invalid handler_type. Must be 'console' or 'file'.")
handler = logging.FileHandler(log_file)

handler.setLevel(level)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
handler.setFormatter(logging.Formatter(format))
logger.addHandler(handler)

return logger


def configure_profile_logger(
handler_type: Literal["console", "file"] = "console",
level: int = logging.DEBUG,
log_file: str = "tfbpshiny_profile.log",
enabled: bool = True,
) -> logging.Logger:
"""
Configure and return the ``"profiler"`` logger for timing instrumentation.

The profiler logger uses a bare ``%(message)s`` format because all
structure is embedded in the message by
:func:`tfbpshiny.utils.profiler.profile_span`.
It never propagates to the root or ``"shiny"`` logger.

:param handler_type: Destination for profile records — ``"console"`` or ``"file"``.
:param level: Log level; ignored (set to ``CRITICAL``) when ``enabled=False``.
:param log_file: Path used when ``handler_type="file"``.
:param enabled: When ``False``, silences the logger by setting its level
to ``CRITICAL``.
:returns: Configured ``"profiler"`` logger.
:rtype: logging.Logger

"""
effective_level = level if enabled else logging.CRITICAL
logger = configure_logger(
"profiler",
level=effective_level,
format=_PROFILE_FORMAT,
handler_type=handler_type,
log_file=log_file,
)
logger.propagate = False
return logger
23 changes: 22 additions & 1 deletion docs/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,25 @@ callbacks are necessary, then either a different plotting library or a custom
implementation of some sort will likely be necessary. Would be nice to explore
other options, including custom implementations of plots using d3 and then creating
the shiny widget manually. With AI, that might be more achievable (for me at least)
than it would be otherwise b/c we can give the AI the docs for all three as context.
than it would be otherwise b/c we can give the AI the docs for all three as context.

There is both a general log and profiling log

Both loggers write to stdout/stderr, Docker's awslogs driver captures everything into the shinyapp stream under /tfbpshiny/production. To later separate PROFILE lines from main log lines when parsing:

```{python}
import pandas as pd
df = pd.read_csv("exported.log", sep="|", header=None, skipinitialspace=True,
names=["marker","timestamp","elapsed_s","op","module","dataset","context"])
profile = df[df["marker"].str.strip() == "PROFILE"]
```
Comment on lines +50 to +60


Or in CloudWatch Logs Insights:

```{raw}
fields @timestamp, @message
| filter @message like /^PROFILE/
| parse @message "PROFILE | * | * | * | * | * | *" as timestamp, elapsed_s, op, module, dataset, context
Comment on lines +66 to +68
```
6 changes: 4 additions & 2 deletions production.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ services:
- HF_HOME=/hf-cache
- HF_TOKEN
- DOCKER_ENV
- TFBPSHINY_LOG_HANDLER=console
- TFBPSHINY_PROFILE_HANDLER=console
volumes:
- hf_cache:/hf-cache
expose:
Expand Down Expand Up @@ -61,8 +63,8 @@ services:
logging:
driver: awslogs
options:
awslogs-group: /tfbpshiny/production
awslogs-region: us-east-1
awslogs-group: /tfbpshiny/production/traefik
awslogs-region: us-east-2
awslogs-stream: traefik
awslogs-create-group: "true"
restart: unless-stopped
Expand Down
Loading
Loading