Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
8dc34df
adding performance logging
cmatKhan Apr 17, 2026
09f863b
adding session id to track unique visits
cmatKhan Apr 17, 2026
2f3b1bb
updating terraform to add cloudwatch dashboard
cmatKhan Apr 17, 2026
8d18802
removing traefik from shiny log
cmatKhan Apr 17, 2026
39c2767
trying to get cloudwatch dashboard working
cmatKhan Apr 17, 2026
0e6af75
giving up and did cloudwatch dashboard by hand
cmatKhan Apr 17, 2026
10f6b37
Merge branch 'dev' into add_performance_logging
cmatKhan Apr 21, 2026
91eb249
sorting imports
cmatKhan Apr 21, 2026
67e9d46
fix: eliminate spurious _topn_data reactivity bursts in comparison si…
MackLiao Apr 26, 2026
64d6165
feat: TFBPSHINY_PROFILE_LOG_FILE env var and startup destination log
MackLiao Apr 26, 2026
9e2da44
fix: eliminate spurious reactivity bursts in binding/perturbation sid…
MackLiao Apr 27, 2026
20c74d5
perf: bump _settled_datasets debounce 0.3s → 1.0s
MackLiao Apr 27, 2026
79fe931
fix: debounce dataset_filters read in _matrix_data to fix toggle-off …
MackLiao Apr 27, 2026
82c7654
fix: stabilize regulator_selector DOM via update_selectize in binding…
MackLiao Apr 27, 2026
1820565
fix: convert regulator-choices effect to lazy render to stop eager co…
MackLiao Apr 27, 2026
c9048ee
fix: pin _regulator_choices_trigger with suspend_when_hidden=False
MackLiao Apr 27, 2026
86818d3
refactor: extract _SETTLED_WINDOW_SEC and update stale sidebar docstr…
MackLiao Apr 27, 2026
678dab7
Merge branch 'dev' into fix_reactivity
cmatKhan Apr 29, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 54 additions & 34 deletions configure_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
from enum import Enum
from typing import Literal

_PROFILE_FORMAT = "%(message)s"


class LogLevel(Enum):
DEBUG = logging.DEBUG
Expand Down Expand Up @@ -31,34 +33,22 @@ def from_string(cls, level_str: str) -> Enum:

def configure_logger(
name: str,
level: int = logging.DEBUG, # Use int type hint here
level: int = logging.DEBUG,
format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handler_type: Literal["console", "file"] = "console",
log_file: str = "tfbpmodeling.log",
) -> logging.Logger:
"""
Configures a logger.

:param name: Name of the logger
:type name: str
:param level: Logging level, must be one of logging.DEBUG,
logging.INFO, logging.WARNING, logging.ERROR
:type level: int
:param format: Logging format
:type format: str
:param handler_type: Type of handler, either 'console' or 'file'
:type handler_type: Literal["console", "file"]
:param log_file: Path to log file, required if handler_type is 'file'.
Default is 'tfbpmodeling.log'
:type log_file: str

:return: Configured logger
Configure a logger with a single handler.

:param name: Logger name.
:param level: Logging level (``logging.DEBUG``, ``logging.INFO``, etc.).
:param format: Log record format string.
:param handler_type: Destination — ``"console"`` or ``"file"``.
:param log_file: Path used when ``handler_type="file"``.
:returns: Configured logger.
:rtype: logging.Logger

:raises ValueError: If any of the parameters have invalid datatypes

example usage:
>>> logger = configure_logger("my_logger", level=logging.INFO)
:raises ValueError: If any parameter is invalid.

"""
if not isinstance(name, str):
Expand All @@ -69,30 +59,60 @@ def configure_logger(
raise ValueError("Invalid logging level")
if not isinstance(format, str):
raise ValueError("format must be a string")
if handler_type not in ["console", "file"]:
if handler_type not in ("console", "file"):
raise ValueError("handler_type must be 'console' or 'file'")
if handler_type == "file" and not log_file:
raise ValueError("log_file must be specified for file handler")

logger = logging.getLogger(name)
logger.setLevel(level)

# Remove all handlers associated with the logger object to avoid duplicate logs
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Remove existing handlers to avoid duplicates on re-configuration.
for h in logger.handlers[:]:
logger.removeHandler(h)

if handler_type == "console":
handler = logging.StreamHandler()
elif handler_type == "file":
if not log_file:
raise ValueError("log_file must be specified for file handler")
handler = logging.FileHandler(log_file)
handler: logging.Handler = logging.StreamHandler()
else:
raise ValueError("Invalid handler_type. Must be 'console' or 'file'.")
handler = logging.FileHandler(log_file)

handler.setLevel(level)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
handler.setFormatter(logging.Formatter(format))
logger.addHandler(handler)

return logger


def configure_profile_logger(
handler_type: Literal["console", "file"] = "console",
level: int = logging.DEBUG,
log_file: str = "tfbpshiny_profile.log",
enabled: bool = True,
) -> logging.Logger:
"""
Configure and return the ``"profiler"`` logger for timing instrumentation.

The profiler logger uses a bare ``%(message)s`` format because all
structure is embedded in the message by
:func:`tfbpshiny.utils.profiler.profile_span`.
It never propagates to the root or ``"shiny"`` logger.

:param handler_type: Destination for profile records — ``"console"`` or ``"file"``.
:param level: Log level; ignored (set to ``CRITICAL``) when ``enabled=False``.
:param log_file: Path used when ``handler_type="file"``.
:param enabled: When ``False``, silences the logger by setting its level
to ``CRITICAL``.
:returns: Configured ``"profiler"`` logger.
:rtype: logging.Logger

"""
effective_level = level if enabled else logging.CRITICAL
logger = configure_logger(
"profiler",
level=effective_level,
format=_PROFILE_FORMAT,
handler_type=handler_type,
log_file=log_file,
)
logger.propagate = False
return logger
23 changes: 22 additions & 1 deletion docs/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,25 @@ callbacks are necessary, then either a different plotting library or a custom
implementation of some sort will likely be necessary. Would be nice to explore
other options, including custom implementations of plots using d3 and then creating
the shiny widget manually. With AI, that might be more achievable (for me at least)
than it would be otherwise b/c we can give the AI the docs for all three as context.
than it would be otherwise b/c we can give the AI the docs for all three as context.

There is both a general log and profiling log

Both loggers write to stdout/stderr, Docker's awslogs driver captures everything into the shinyapp stream under /tfbpshiny/production. To later separate PROFILE lines from main log lines when parsing:

```{python}
import pandas as pd

df = pd.read_csv("exported.log", sep="|", header=None, skipinitialspace=True,
names=["marker","timestamp","elapsed_s","op","module","dataset","context"])
profile = df[df["marker"].str.strip() == "PROFILE"]
```


Or in CloudWatch Logs Insights:

```{raw}
fields @timestamp, @message
| filter @message like /^PROFILE/
| parse @message "PROFILE | * | * | * | * | * | *" as timestamp, elapsed_s, op, module, dataset, context
```
6 changes: 4 additions & 2 deletions production.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ services:
- HF_HOME=/hf-cache
- HF_TOKEN
- DOCKER_ENV
- TFBPSHINY_LOG_HANDLER=console
- TFBPSHINY_PROFILE_HANDLER=console
volumes:
- hf_cache:/hf-cache
expose:
Expand Down Expand Up @@ -61,8 +63,8 @@ services:
logging:
driver: awslogs
options:
awslogs-group: /tfbpshiny/production
awslogs-region: us-east-1
awslogs-group: /tfbpshiny/production/traefik
awslogs-region: us-east-2
awslogs-stream: traefik
awslogs-create-group: "true"
restart: unless-stopped
Expand Down
Loading
Loading